hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bc9831a2545b162924110599d27de9217476cd42
| 2,221
|
py
|
Python
|
adminmgr/media/code/A2/python/task/BD_0012_0792_0948_1324.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 9
|
2019-11-08T02:05:27.000Z
|
2021-12-13T12:06:35.000Z
|
adminmgr/media/code/A2/python/task/BD_0012_0792_0948_1324.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 6
|
2019-11-27T03:23:16.000Z
|
2021-06-10T19:15:13.000Z
|
adminmgr/media/code/A2/python/task/BD_0012_0792_0948_1324.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 4
|
2019-11-26T17:04:27.000Z
|
2021-12-13T11:57:03.000Z
|
from __future__ import print_function
import re
import sys
from operator import add
from pyspark.sql import SparkSession
def computeContribs(url, rank):
num_urls = len(url)
for i in url:
yield (i, rank / num_urls)
def parseNeighbors(url):
prts = re.split(r',', url)
avg = int(prts[2])/int(prts[3])
return prts[1],avg
def parseNeighbors1(url):
prts = re.split(r',', url)
return prts[0], prts[1]
def converge(o_rank,n_rank):
o=o_rank.collect()
n=n_rank.collect()
cnt=0
has_converged = 1
i = 0
while i < len(n) and not has_converged:
if (n[i][1]-o[i][1] < 0.0001):
cnt+=1
has_converged &= 1
else:
has_converged &= 0
i+=1
return has_converged
if __name__ == "__main__":
print("Start")
if len(sys.argv) != 4:
print("Usage: pagerank <file> <iterations>", file=sys.stderr)
sys.exit(-1)
# Initialize the spark context.
spark = SparkSession\
.builder\
.appName("PythonPageRank")\
.getOrCreate()
print("Read")
linss = spark.read.text(sys.argv[1]).rdd.map(lambda r: r[0])
ranks = linss.map(lambda url: parseNeighbors(url)).distinct().groupByKey().mapValues(sum).cache()
ranks_new=ranks.mapValues(lambda x:max(x,1.0))
links = linss.map(lambda url: parseNeighbors1(url)).distinct().groupByKey().cache()
weight=float(sys.argv[3])
num_iters = 0
print("Begin")
if(int(sys.argv[2]) == 0):
o_rank=None
while True:
num_iters+=1
contributs = links.join(ranks_new).flatMap(lambda url_urls_rank: computeContribs(url_urls_rank[1][0], url_urls_rank[1][1]))
o_rank=ranks_new
print("check 1")
ranks = contributs.reduceByKey(add).mapValues(lambda rank: rank * weight + 1-weight)
n_rank=ranks
print("check 2")
print(num_iters)
if converge(o_rank,n_rank) :
break
print("num_iters :",num_iters)
elif(int(sys.argv[2]) > 0):
for iteration in range(int(sys.argv[2])):
contributs = links.join(ranks).flatMap(lambda url_urls_rank: computeContribs(url_urls_rank[1][0], url_urls_rank[1][1]))
ranks = contributs.reduceByKey(add).mapValues(lambda rank: rank * weight + 1-weight)
rank_sort = ranks.sortBy(lambda a: -a[1])
for (link, rank) in rank_sort.collect():
print("%s,%s." % (link, format(rank,'.12f')))
spark.stop()
| 26.129412
| 126
| 0.681225
|
259a39f4687f9ab98977b05605864d2624c92300
| 5,332
|
py
|
Python
|
integration-tests/test/test_unbond.py
|
woky/rchain
|
76ba93f4349fa525eb08d0b3f1751c23e0de74e2
|
[
"Apache-2.0"
] | null | null | null |
integration-tests/test/test_unbond.py
|
woky/rchain
|
76ba93f4349fa525eb08d0b3f1751c23e0de74e2
|
[
"Apache-2.0"
] | 7
|
2019-12-27T14:15:35.000Z
|
2019-12-30T01:06:20.000Z
|
integration-tests/test/test_unbond.py
|
woky/rchain
|
76ba93f4349fa525eb08d0b3f1751c23e0de74e2
|
[
"Apache-2.0"
] | null | null | null |
from random import Random
import pytest
from docker.client import DockerClient
from rchain.crypto import PrivateKey
from . import conftest
from .common import (
CommandLineOptions,
NotAnActiveValidatorError
)
from .rnode import (
bootstrap_connected_peer,
ready_bootstrap_with_network,
)
from .wait import (
wait_for_node_sees_block,
)
from .test_wallets import get_vault_balance
BOOTSTRAP_KEY = PrivateKey.from_hex("b2527b00340a83e302beae2a8daf6d654e8e57541acfa261cc1b5635eb16aa15")
VALIDATOR_KEY_1 = PrivateKey.from_hex("9a801debae8bb97fe54c99389cafa576c60612503348578125b65ab182ff5850")
VALIDATOR_KEY_2 = PrivateKey.generate()
VALIDATOR_KEY_3 = PrivateKey.generate()
def test_unbond_validator(command_line_options: CommandLineOptions, random_generator: Random, docker_client: DockerClient) -> None:
bonded_validator_map = {
BOOTSTRAP_KEY: 20000000,
VALIDATOR_KEY_1: 40000000,
VALIDATOR_KEY_2: 20000000,
VALIDATOR_KEY_3: 20000000,
}
wallets_file = {
BOOTSTRAP_KEY : 30000000,
VALIDATOR_KEY_1: 20000000,
VALIDATOR_KEY_2: 20000000,
VALIDATOR_KEY_3: 20000000,
}
# unbond a validator. set the epoch length to 3 and quarantine length to 2
# every epoch change the network would re-pick the active validator and
# the withdraw process is going to happen after (quarantine_length + epoch_length * (1 + blockNumber // epoch_length)) blocks
# normally quarantine_length is 5x of epoch_length
# but in order to decrease the time in running the test we choose 2x of the epoch_length
epoch_length = 3
quarantine_length = 6
with conftest.testing_context(command_line_options, random_generator, docker_client, validator_bonds_dict=bonded_validator_map, bootstrap_key=BOOTSTRAP_KEY, wallets_dict=wallets_file) as context, \
ready_bootstrap_with_network(context=context, synchrony_constraint_threshold=0, epoch_length=epoch_length, quarantine_length=quarantine_length) as bootstrap_node, \
bootstrap_connected_peer(context=context, bootstrap=bootstrap_node, name='bonded-validator-1', private_key=VALIDATOR_KEY_1, epoch_length=epoch_length, quarantine_length=quarantine_length) as validator_1, \
bootstrap_connected_peer(context=context, bootstrap=bootstrap_node, name='bonded-validator-2',
private_key=VALIDATOR_KEY_2, epoch_length=epoch_length, quarantine_length=quarantine_length) as validator_2:
# genesis block number is 0
# block number 1
validator_1.deploy('/opt/docker/examples/tut-hello.rho', VALIDATOR_KEY_3)
validator_1.propose()
# block number 2
# unbond in block number 2
# withdraw should happen after
# (quarantine_length + epoch_length ) = (6 + 3 * (1 + 2 // 3)) = 9 block number
# so the withdraw happen after block number 9
# we should see withdraw result in block number 10
validator_1.deploy_contract_with_substitution(substitute_dict={}, rho_file_path="resources/wallets/unbond.rho", private_key=VALIDATOR_KEY_1)
# block number 3
# close block happen after all deploys process
validator_1.deploy('/opt/docker/examples/tut-hello.rho', VALIDATOR_KEY_3)
b3 = validator_1.propose()
# block number 4
# validator_1 is no longer a active validator after block number 3
# validator_1 should fail on proposing
with pytest.raises(NotAnActiveValidatorError):
validator_1.deploy('/opt/docker/examples/tut-hello.rho', VALIDATOR_KEY_3)
validator_1.propose()
wait_for_node_sees_block(context, bootstrap_node, b3)
# block number 4
# withdraw not happen yet
validator_1_balance = get_vault_balance(context, bootstrap_node, VALIDATOR_KEY_1.get_public_key().get_rev_address(), VALIDATOR_KEY_2, 100000, 1)
assert validator_1_balance < 20000000
# block number 5
bootstrap_node.deploy('/opt/docker/examples/tut-hello-again.rho', VALIDATOR_KEY_3)
b5 = bootstrap_node.propose()
wait_for_node_sees_block(context, validator_2, b5)
# block number 6
validator_1_balance = get_vault_balance(context, validator_2, VALIDATOR_KEY_1.get_public_key().get_rev_address(), VALIDATOR_KEY_2, 100000, 1)
assert validator_1_balance < 20000000
# block number 7
validator_1_balance = get_vault_balance(context, validator_2, VALIDATOR_KEY_1.get_public_key().get_rev_address(), VALIDATOR_KEY_2, 100000, 1)
assert validator_1_balance < 20000000
# block number 8
validator_2.deploy('/opt/docker/examples/tut-hello.rho', VALIDATOR_KEY_3)
b8 = validator_2.propose()
wait_for_node_sees_block(context, bootstrap_node, b8)
# block number 9
validator_1_balance = get_vault_balance(context, bootstrap_node, VALIDATOR_KEY_1.get_public_key().get_rev_address(), VALIDATOR_KEY_2, 100000, 1)
assert validator_1_balance < 20000000
# block number 10
# withdraw happen in block number 9, result get in block 10
validator_1_balance = get_vault_balance(context, bootstrap_node, VALIDATOR_KEY_1.get_public_key().get_rev_address(), VALIDATOR_KEY_2, 100000, 1)
assert validator_1_balance > 50000000
| 46.77193
| 213
| 0.737622
|
7783a6e922740fe642a3b5b9dd3f0d4e5f2f6c48
| 121
|
py
|
Python
|
zevon/__init__.py
|
muckamuck/zevon
|
c1315fe7d62d0474e59157a5c351e47d6b9056d7
|
[
"MIT"
] | null | null | null |
zevon/__init__.py
|
muckamuck/zevon
|
c1315fe7d62d0474e59157a5c351e47d6b9056d7
|
[
"MIT"
] | null | null | null |
zevon/__init__.py
|
muckamuck/zevon
|
c1315fe7d62d0474e59157a5c351e47d6b9056d7
|
[
"MIT"
] | null | null | null |
from zevon.flask_lambda import FlaskLambda # noqa
from zevon.welcome import welcome_html # noqa
version = '0.2.0'
| 24.2
| 50
| 0.743802
|
a1368c980d7e27c4eb85e10aa9ef0813ca504c48
| 1,292
|
py
|
Python
|
Code/AISmartHome/SmartHome/urls.py
|
ykzzyk/SmartHomeRemoteControl_STM32
|
509417c94eac491182cd074039b773848c30421b
|
[
"MIT"
] | null | null | null |
Code/AISmartHome/SmartHome/urls.py
|
ykzzyk/SmartHomeRemoteControl_STM32
|
509417c94eac491182cd074039b773848c30421b
|
[
"MIT"
] | null | null | null |
Code/AISmartHome/SmartHome/urls.py
|
ykzzyk/SmartHomeRemoteControl_STM32
|
509417c94eac491182cd074039b773848c30421b
|
[
"MIT"
] | null | null | null |
# """SmartHome URL Configuration
#
# The `urlpatterns` list routes URLs to views. For more information please see:
# https://docs.djangoproject.com/en/2.1/topics/http/urls/
# Examples:
# Function views
# 1. Add an import: from my_app import views
# 2. Add a URL to urlpatterns: path('', views.home, name='home')
# Class-based views
# 1. Add an import: from other_app.views import Home
# 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
# Including another URLconf
# 1. Import the include() function: from django.urls import include, path
# 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
# """
# from django.contrib import admin
# from django.urls import path
#
# urlpatterns = [
# path('admin/', admin.site.urls),
# ]
from django.conf.urls import url
from django.contrib import admin
from django_web.views import index #导入views.py文件中的index函数
from django_web.views import lighton
from django_web.views import lightoff
from django_web.views import getdata
from django_web.views import upload
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^on', lighton),
url(r'^off', lightoff),
url(r'^getdata', getdata),
url(r'^upload', upload),
url(r'^', index), #在url中凡是以url开头的访问都使用index函数来处理该请求
]
| 32.3
| 79
| 0.698142
|
aaf7880f1242b7357bacc047b53f3a8435bac171
| 29,745
|
py
|
Python
|
test/test_transforms_tensor.py
|
shrutipulstya/vision
|
85982ac695e78af80bf59cd9c855e1729b7376f5
|
[
"BSD-3-Clause"
] | 1
|
2021-09-18T07:47:12.000Z
|
2021-09-18T07:47:12.000Z
|
test/test_transforms_tensor.py
|
shrutipulstya/vision
|
85982ac695e78af80bf59cd9c855e1729b7376f5
|
[
"BSD-3-Clause"
] | 4
|
2021-11-06T09:11:19.000Z
|
2021-11-22T09:21:26.000Z
|
test/test_transforms_tensor.py
|
shrutipulstya/vision
|
85982ac695e78af80bf59cd9c855e1729b7376f5
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import torch
from torchvision import transforms as T
from torchvision.transforms import functional as F
from torchvision.transforms import InterpolationMode
import numpy as np
import pytest
from typing import Sequence
from common_utils import (
get_tmp_dir,
int_dtypes,
float_dtypes,
_create_data,
_create_data_batch,
_assert_equal_tensor_to_pil,
_assert_approx_equal_tensor_to_pil,
cpu_and_gpu,
assert_equal,
)
NEAREST, BILINEAR, BICUBIC = InterpolationMode.NEAREST, InterpolationMode.BILINEAR, InterpolationMode.BICUBIC
def _test_transform_vs_scripted(transform, s_transform, tensor, msg=None):
torch.manual_seed(12)
out1 = transform(tensor)
torch.manual_seed(12)
out2 = s_transform(tensor)
assert_equal(out1, out2, msg=msg)
def _test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors, msg=None):
torch.manual_seed(12)
transformed_batch = transform(batch_tensors)
for i in range(len(batch_tensors)):
img_tensor = batch_tensors[i, ...]
torch.manual_seed(12)
transformed_img = transform(img_tensor)
assert_equal(transformed_img, transformed_batch[i, ...], msg=msg)
torch.manual_seed(12)
s_transformed_batch = s_transform(batch_tensors)
assert_equal(transformed_batch, s_transformed_batch, msg=msg)
def _test_functional_op(f, device, fn_kwargs=None, test_exact_match=True, **match_kwargs):
fn_kwargs = fn_kwargs or {}
tensor, pil_img = _create_data(height=10, width=10, device=device)
transformed_tensor = f(tensor, **fn_kwargs)
transformed_pil_img = f(pil_img, **fn_kwargs)
if test_exact_match:
_assert_equal_tensor_to_pil(transformed_tensor, transformed_pil_img, **match_kwargs)
else:
_assert_approx_equal_tensor_to_pil(transformed_tensor, transformed_pil_img, **match_kwargs)
def _test_class_op(method, device, meth_kwargs=None, test_exact_match=True, **match_kwargs):
# TODO: change the name: it's not a method, it's a class.
meth_kwargs = meth_kwargs or {}
# test for class interface
f = method(**meth_kwargs)
scripted_fn = torch.jit.script(f)
tensor, pil_img = _create_data(26, 34, device=device)
# set seed to reproduce the same transformation for tensor and PIL image
torch.manual_seed(12)
transformed_tensor = f(tensor)
torch.manual_seed(12)
transformed_pil_img = f(pil_img)
if test_exact_match:
_assert_equal_tensor_to_pil(transformed_tensor, transformed_pil_img, **match_kwargs)
else:
_assert_approx_equal_tensor_to_pil(transformed_tensor.float(), transformed_pil_img, **match_kwargs)
torch.manual_seed(12)
transformed_tensor_script = scripted_fn(tensor)
assert_equal(transformed_tensor, transformed_tensor_script)
batch_tensors = _create_data_batch(height=23, width=34, channels=3, num_samples=4, device=device)
_test_transform_vs_scripted_on_batch(f, scripted_fn, batch_tensors)
with get_tmp_dir() as tmp_dir:
scripted_fn.save(os.path.join(tmp_dir, f"t_{method.__name__}.pt"))
def _test_op(func, method, device, fn_kwargs=None, meth_kwargs=None, test_exact_match=True, **match_kwargs):
_test_functional_op(func, device, fn_kwargs, test_exact_match=test_exact_match, **match_kwargs)
_test_class_op(method, device, meth_kwargs, test_exact_match=test_exact_match, **match_kwargs)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize(
'func,method,fn_kwargs,match_kwargs', [
(F.hflip, T.RandomHorizontalFlip, None, {}),
(F.vflip, T.RandomVerticalFlip, None, {}),
(F.invert, T.RandomInvert, None, {}),
(F.posterize, T.RandomPosterize, {"bits": 4}, {}),
(F.solarize, T.RandomSolarize, {"threshold": 192.0}, {}),
(F.adjust_sharpness, T.RandomAdjustSharpness, {"sharpness_factor": 2.0}, {}),
(F.autocontrast, T.RandomAutocontrast, None, {'test_exact_match': False,
'agg_method': 'max', 'tol': (1 + 1e-5),
'allowed_percentage_diff': .05}),
(F.equalize, T.RandomEqualize, None, {})
]
)
def test_random(func, method, device, fn_kwargs, match_kwargs):
_test_op(func, method, device, fn_kwargs, fn_kwargs, **match_kwargs)
@pytest.mark.parametrize('device', cpu_and_gpu())
class TestColorJitter:
@pytest.mark.parametrize('brightness', [0.1, 0.5, 1.0, 1.34, (0.3, 0.7), [0.4, 0.5]])
def test_color_jitter_brightness(self, brightness, device):
tol = 1.0 + 1e-10
meth_kwargs = {"brightness": brightness}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device,
tol=tol, agg_method="max"
)
@pytest.mark.parametrize('contrast', [0.2, 0.5, 1.0, 1.5, (0.3, 0.7), [0.4, 0.5]])
def test_color_jitter_contrast(self, contrast, device):
tol = 1.0 + 1e-10
meth_kwargs = {"contrast": contrast}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device,
tol=tol, agg_method="max"
)
@pytest.mark.parametrize('saturation', [0.5, 0.75, 1.0, 1.25, (0.3, 0.7), [0.3, 0.4]])
def test_color_jitter_saturation(self, saturation, device):
tol = 1.0 + 1e-10
meth_kwargs = {"saturation": saturation}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device,
tol=tol, agg_method="max"
)
@pytest.mark.parametrize('hue', [0.2, 0.5, (-0.2, 0.3), [-0.4, 0.5]])
def test_color_jitter_hue(self, hue, device):
meth_kwargs = {"hue": hue}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device,
tol=16.1, agg_method="max"
)
def test_color_jitter_all(self, device):
# All 4 parameters together
meth_kwargs = {"brightness": 0.2, "contrast": 0.2, "saturation": 0.2, "hue": 0.2}
_test_class_op(
T.ColorJitter, meth_kwargs=meth_kwargs, test_exact_match=False, device=device,
tol=12.1, agg_method="max"
)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('m', ["constant", "edge", "reflect", "symmetric"])
@pytest.mark.parametrize('mul', [1, -1])
def test_pad(m, mul, device):
fill = 127 if m == "constant" else 0
# Test functional.pad (PIL and Tensor) with padding as single int
_test_functional_op(
F.pad, fn_kwargs={"padding": mul * 2, "fill": fill, "padding_mode": m},
device=device
)
# Test functional.pad and transforms.Pad with padding as [int, ]
fn_kwargs = meth_kwargs = {"padding": [mul * 2, ], "fill": fill, "padding_mode": m}
_test_op(
F.pad, T.Pad, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)
# Test functional.pad and transforms.Pad with padding as list
fn_kwargs = meth_kwargs = {"padding": [mul * 4, 4], "fill": fill, "padding_mode": m}
_test_op(
F.pad, T.Pad, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)
# Test functional.pad and transforms.Pad with padding as tuple
fn_kwargs = meth_kwargs = {"padding": (mul * 2, 2, 2, mul * 2), "fill": fill, "padding_mode": m}
_test_op(
F.pad, T.Pad, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)
@pytest.mark.parametrize('device', cpu_and_gpu())
def test_crop(device):
fn_kwargs = {"top": 2, "left": 3, "height": 4, "width": 5}
# Test transforms.RandomCrop with size and padding as tuple
meth_kwargs = {"size": (4, 5), "padding": (4, 4), "pad_if_needed": True, }
_test_op(
F.crop, T.RandomCrop, device=device, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
)
# Test transforms.functional.crop including outside the image area
fn_kwargs = {"top": -2, "left": 3, "height": 4, "width": 5} # top
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=device)
fn_kwargs = {"top": 1, "left": -3, "height": 4, "width": 5} # left
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=device)
fn_kwargs = {"top": 7, "left": 3, "height": 4, "width": 5} # bottom
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=device)
fn_kwargs = {"top": 3, "left": 8, "height": 4, "width": 5} # right
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=device)
fn_kwargs = {"top": -3, "left": -3, "height": 15, "width": 15} # all
_test_functional_op(F.crop, fn_kwargs=fn_kwargs, device=device)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('padding_config', [
{"padding_mode": "constant", "fill": 0},
{"padding_mode": "constant", "fill": 10},
{"padding_mode": "constant", "fill": 20},
{"padding_mode": "edge"},
{"padding_mode": "reflect"}
])
@pytest.mark.parametrize('size', [5, [5, ], [6, 6]])
def test_crop_pad(size, padding_config, device):
config = dict(padding_config)
config["size"] = size
_test_class_op(T.RandomCrop, device, config)
@pytest.mark.parametrize('device', cpu_and_gpu())
def test_center_crop(device, tmpdir):
fn_kwargs = {"output_size": (4, 5)}
meth_kwargs = {"size": (4, 5), }
_test_op(
F.center_crop, T.CenterCrop, device=device, fn_kwargs=fn_kwargs,
meth_kwargs=meth_kwargs
)
fn_kwargs = {"output_size": (5,)}
meth_kwargs = {"size": (5,)}
_test_op(
F.center_crop, T.CenterCrop, device=device, fn_kwargs=fn_kwargs,
meth_kwargs=meth_kwargs
)
tensor = torch.randint(0, 256, (3, 10, 10), dtype=torch.uint8, device=device)
# Test torchscript of transforms.CenterCrop with size as int
f = T.CenterCrop(size=5)
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)
# Test torchscript of transforms.CenterCrop with size as [int, ]
f = T.CenterCrop(size=[5, ])
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)
# Test torchscript of transforms.CenterCrop with size as tuple
f = T.CenterCrop(size=(6, 6))
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)
scripted_fn.save(os.path.join(tmpdir, "t_center_crop.pt"))
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('fn, method, out_length', [
# test_five_crop
(F.five_crop, T.FiveCrop, 5),
# test_ten_crop
(F.ten_crop, T.TenCrop, 10)
])
@pytest.mark.parametrize('size', [(5,), [5, ], (4, 5), [4, 5]])
def test_x_crop(fn, method, out_length, size, device):
meth_kwargs = fn_kwargs = {'size': size}
scripted_fn = torch.jit.script(fn)
tensor, pil_img = _create_data(height=20, width=20, device=device)
transformed_t_list = fn(tensor, **fn_kwargs)
transformed_p_list = fn(pil_img, **fn_kwargs)
assert len(transformed_t_list) == len(transformed_p_list)
assert len(transformed_t_list) == out_length
for transformed_tensor, transformed_pil_img in zip(transformed_t_list, transformed_p_list):
_assert_equal_tensor_to_pil(transformed_tensor, transformed_pil_img)
transformed_t_list_script = scripted_fn(tensor.detach().clone(), **fn_kwargs)
assert len(transformed_t_list) == len(transformed_t_list_script)
assert len(transformed_t_list_script) == out_length
for transformed_tensor, transformed_tensor_script in zip(transformed_t_list, transformed_t_list_script):
assert_equal(transformed_tensor, transformed_tensor_script)
# test for class interface
fn = method(**meth_kwargs)
scripted_fn = torch.jit.script(fn)
output = scripted_fn(tensor)
assert len(output) == len(transformed_t_list_script)
# test on batch of tensors
batch_tensors = _create_data_batch(height=23, width=34, channels=3, num_samples=4, device=device)
torch.manual_seed(12)
transformed_batch_list = fn(batch_tensors)
for i in range(len(batch_tensors)):
img_tensor = batch_tensors[i, ...]
torch.manual_seed(12)
transformed_img_list = fn(img_tensor)
for transformed_img, transformed_batch in zip(transformed_img_list, transformed_batch_list):
assert_equal(transformed_img, transformed_batch[i, ...])
@pytest.mark.parametrize('method', ["FiveCrop", "TenCrop"])
def test_x_crop_save(method, tmpdir):
fn = getattr(T, method)(size=[5, ])
scripted_fn = torch.jit.script(fn)
scripted_fn.save(os.path.join(tmpdir, "t_op_list_{}.pt".format(method)))
class TestResize:
@pytest.mark.parametrize('size', [32, 34, 35, 36, 38])
def test_resize_int(self, size):
# TODO: Minimal check for bug-fix, improve this later
x = torch.rand(3, 32, 46)
t = T.Resize(size=size)
y = t(x)
# If size is an int, smaller edge of the image will be matched to this number.
# i.e, if height > width, then image will be rescaled to (size * height / width, size).
assert isinstance(y, torch.Tensor)
assert y.shape[1] == size
assert y.shape[2] == int(size * 46 / 32)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('dt', [None, torch.float32, torch.float64])
@pytest.mark.parametrize('size', [[32, ], [32, 32], (32, 32), [34, 35]])
@pytest.mark.parametrize('max_size', [None, 35, 1000])
@pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC, NEAREST])
def test_resize_scripted(self, dt, size, max_size, interpolation, device):
tensor, _ = _create_data(height=34, width=36, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
if dt is not None:
# This is a trivial cast to float of uint8 data to test all cases
tensor = tensor.to(dt)
if max_size is not None and len(size) != 1:
pytest.xfail("with max_size, size must be a sequence with 2 elements")
transform = T.Resize(size=size, interpolation=interpolation, max_size=max_size)
s_transform = torch.jit.script(transform)
_test_transform_vs_scripted(transform, s_transform, tensor)
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
def test_resize_save(self, tmpdir):
transform = T.Resize(size=[32, ])
s_transform = torch.jit.script(transform)
s_transform.save(os.path.join(tmpdir, "t_resize.pt"))
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('scale', [(0.7, 1.2), [0.7, 1.2]])
@pytest.mark.parametrize('ratio', [(0.75, 1.333), [0.75, 1.333]])
@pytest.mark.parametrize('size', [(32,), [44, ], [32, ], [32, 32], (32, 32), [44, 55]])
@pytest.mark.parametrize('interpolation', [NEAREST, BILINEAR, BICUBIC])
def test_resized_crop(self, scale, ratio, size, interpolation, device):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
transform = T.RandomResizedCrop(size=size, scale=scale, ratio=ratio, interpolation=interpolation)
s_transform = torch.jit.script(transform)
_test_transform_vs_scripted(transform, s_transform, tensor)
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
def test_resized_crop_save(self, tmpdir):
transform = T.RandomResizedCrop(size=[32, ])
s_transform = torch.jit.script(transform)
s_transform.save(os.path.join(tmpdir, "t_resized_crop.pt"))
def _test_random_affine_helper(device, **kwargs):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
transform = T.RandomAffine(**kwargs)
s_transform = torch.jit.script(transform)
_test_transform_vs_scripted(transform, s_transform, tensor)
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
@pytest.mark.parametrize('device', cpu_and_gpu())
def test_random_affine(device, tmpdir):
transform = T.RandomAffine(degrees=45.0)
s_transform = torch.jit.script(transform)
s_transform.save(os.path.join(tmpdir, "t_random_affine.pt"))
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('interpolation', [NEAREST, BILINEAR])
@pytest.mark.parametrize('shear', [15, 10.0, (5.0, 10.0), [-15, 15], [-10.0, 10.0, -11.0, 11.0]])
def test_random_affine_shear(device, interpolation, shear):
_test_random_affine_helper(device, degrees=0.0, interpolation=interpolation, shear=shear)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('interpolation', [NEAREST, BILINEAR])
@pytest.mark.parametrize('scale', [(0.7, 1.2), [0.7, 1.2]])
def test_random_affine_scale(device, interpolation, scale):
_test_random_affine_helper(device, degrees=0.0, interpolation=interpolation, scale=scale)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('interpolation', [NEAREST, BILINEAR])
@pytest.mark.parametrize('translate', [(0.1, 0.2), [0.2, 0.1]])
def test_random_affine_translate(device, interpolation, translate):
_test_random_affine_helper(device, degrees=0.0, interpolation=interpolation, translate=translate)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('interpolation', [NEAREST, BILINEAR])
@pytest.mark.parametrize('degrees', [45, 35.0, (-45, 45), [-90.0, 90.0]])
def test_random_affine_degrees(device, interpolation, degrees):
_test_random_affine_helper(device, degrees=degrees, interpolation=interpolation)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('interpolation', [NEAREST, BILINEAR])
@pytest.mark.parametrize('fill', [85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1, ], 1])
def test_random_affine_fill(device, interpolation, fill):
_test_random_affine_helper(device, degrees=0.0, interpolation=interpolation, fill=fill)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('center', [(0, 0), [10, 10], None, (56, 44)])
@pytest.mark.parametrize('expand', [True, False])
@pytest.mark.parametrize('degrees', [45, 35.0, (-45, 45), [-90.0, 90.0]])
@pytest.mark.parametrize('interpolation', [NEAREST, BILINEAR])
@pytest.mark.parametrize('fill', [85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1, ], 1])
def test_random_rotate(device, center, expand, degrees, interpolation, fill):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
transform = T.RandomRotation(
degrees=degrees, interpolation=interpolation, expand=expand, center=center, fill=fill
)
s_transform = torch.jit.script(transform)
_test_transform_vs_scripted(transform, s_transform, tensor)
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
def test_random_rotate_save(tmpdir):
transform = T.RandomRotation(degrees=45.0)
s_transform = torch.jit.script(transform)
s_transform.save(os.path.join(tmpdir, "t_random_rotate.pt"))
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('distortion_scale', np.linspace(0.1, 1.0, num=20))
@pytest.mark.parametrize('interpolation', [NEAREST, BILINEAR])
@pytest.mark.parametrize('fill', [85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1, ], 1])
def test_random_perspective(device, distortion_scale, interpolation, fill):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
transform = T.RandomPerspective(
distortion_scale=distortion_scale,
interpolation=interpolation,
fill=fill
)
s_transform = torch.jit.script(transform)
_test_transform_vs_scripted(transform, s_transform, tensor)
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
def test_random_perspective_save(tmpdir):
transform = T.RandomPerspective()
s_transform = torch.jit.script(transform)
s_transform.save(os.path.join(tmpdir, "t_perspective.pt"))
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('Klass, meth_kwargs', [
(T.Grayscale, {"num_output_channels": 1}),
(T.Grayscale, {"num_output_channels": 3}),
(T.RandomGrayscale, {})
])
def test_to_grayscale(device, Klass, meth_kwargs):
tol = 1.0 + 1e-10
_test_class_op(
Klass, meth_kwargs=meth_kwargs, test_exact_match=False, device=device,
tol=tol, agg_method="max"
)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('in_dtype', int_dtypes() + float_dtypes())
@pytest.mark.parametrize('out_dtype', int_dtypes() + float_dtypes())
def test_convert_image_dtype(device, in_dtype, out_dtype):
tensor, _ = _create_data(26, 34, device=device)
batch_tensors = torch.rand(4, 3, 44, 56, device=device)
in_tensor = tensor.to(in_dtype)
in_batch_tensors = batch_tensors.to(in_dtype)
fn = T.ConvertImageDtype(dtype=out_dtype)
scripted_fn = torch.jit.script(fn)
if (in_dtype == torch.float32 and out_dtype in (torch.int32, torch.int64)) or \
(in_dtype == torch.float64 and out_dtype == torch.int64):
with pytest.raises(RuntimeError, match=r"cannot be performed safely"):
_test_transform_vs_scripted(fn, scripted_fn, in_tensor)
with pytest.raises(RuntimeError, match=r"cannot be performed safely"):
_test_transform_vs_scripted_on_batch(fn, scripted_fn, in_batch_tensors)
return
_test_transform_vs_scripted(fn, scripted_fn, in_tensor)
_test_transform_vs_scripted_on_batch(fn, scripted_fn, in_batch_tensors)
def test_convert_image_dtype_save(tmpdir):
fn = T.ConvertImageDtype(dtype=torch.uint8)
scripted_fn = torch.jit.script(fn)
scripted_fn.save(os.path.join(tmpdir, "t_convert_dtype.pt"))
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('policy', [policy for policy in T.AutoAugmentPolicy])
@pytest.mark.parametrize('fill', [None, 85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1, ], 1])
def test_autoaugment(device, policy, fill):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
transform = T.AutoAugment(policy=policy, fill=fill)
s_transform = torch.jit.script(transform)
for _ in range(25):
_test_transform_vs_scripted(transform, s_transform, tensor)
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('num_ops', [1, 2, 3])
@pytest.mark.parametrize('magnitude', [7, 9, 11])
@pytest.mark.parametrize('fill', [None, 85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1, ], 1])
def test_randaugment(device, num_ops, magnitude, fill):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
transform = T.RandAugment(num_ops=num_ops, magnitude=magnitude, fill=fill)
s_transform = torch.jit.script(transform)
for _ in range(25):
_test_transform_vs_scripted(transform, s_transform, tensor)
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('fill', [None, 85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1, ], 1])
def test_trivialaugmentwide(device, fill):
tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=device)
batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=device)
transform = T.TrivialAugmentWide(fill=fill)
s_transform = torch.jit.script(transform)
for _ in range(25):
_test_transform_vs_scripted(transform, s_transform, tensor)
_test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors)
@pytest.mark.parametrize('augmentation', [T.AutoAugment, T.RandAugment, T.TrivialAugmentWide])
def test_autoaugment_save(augmentation, tmpdir):
transform = augmentation()
s_transform = torch.jit.script(transform)
s_transform.save(os.path.join(tmpdir, "t_autoaugment.pt"))
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize(
'config', [
{"value": 0.2},
{"value": "random"},
{"value": (0.2, 0.2, 0.2)},
{"value": "random", "ratio": (0.1, 0.2)}
]
)
def test_random_erasing(device, config):
tensor, _ = _create_data(24, 32, channels=3, device=device)
batch_tensors = torch.rand(4, 3, 44, 56, device=device)
fn = T.RandomErasing(**config)
scripted_fn = torch.jit.script(fn)
_test_transform_vs_scripted(fn, scripted_fn, tensor)
_test_transform_vs_scripted_on_batch(fn, scripted_fn, batch_tensors)
def test_random_erasing_save(tmpdir):
fn = T.RandomErasing(value=0.2)
scripted_fn = torch.jit.script(fn)
scripted_fn.save(os.path.join(tmpdir, "t_random_erasing.pt"))
def test_random_erasing_with_invalid_data():
img = torch.rand(3, 60, 60)
# Test Set 0: invalid value
random_erasing = T.RandomErasing(value=(0.1, 0.2, 0.3, 0.4), p=1.0)
with pytest.raises(ValueError, match="If value is a sequence, it should have either a single value or 3"):
random_erasing(img)
@pytest.mark.parametrize('device', cpu_and_gpu())
def test_normalize(device, tmpdir):
fn = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
tensor, _ = _create_data(26, 34, device=device)
with pytest.raises(TypeError, match="Input tensor should be a float tensor"):
fn(tensor)
batch_tensors = torch.rand(4, 3, 44, 56, device=device)
tensor = tensor.to(dtype=torch.float32) / 255.0
# test for class interface
scripted_fn = torch.jit.script(fn)
_test_transform_vs_scripted(fn, scripted_fn, tensor)
_test_transform_vs_scripted_on_batch(fn, scripted_fn, batch_tensors)
scripted_fn.save(os.path.join(tmpdir, "t_norm.pt"))
@pytest.mark.parametrize('device', cpu_and_gpu())
def test_linear_transformation(device, tmpdir):
c, h, w = 3, 24, 32
tensor, _ = _create_data(h, w, channels=c, device=device)
matrix = torch.rand(c * h * w, c * h * w, device=device)
mean_vector = torch.rand(c * h * w, device=device)
fn = T.LinearTransformation(matrix, mean_vector)
scripted_fn = torch.jit.script(fn)
_test_transform_vs_scripted(fn, scripted_fn, tensor)
batch_tensors = torch.rand(4, c, h, w, device=device)
# We skip some tests from _test_transform_vs_scripted_on_batch as
# results for scripted and non-scripted transformations are not exactly the same
torch.manual_seed(12)
transformed_batch = fn(batch_tensors)
torch.manual_seed(12)
s_transformed_batch = scripted_fn(batch_tensors)
assert_equal(transformed_batch, s_transformed_batch)
scripted_fn.save(os.path.join(tmpdir, "t_norm.pt"))
@pytest.mark.parametrize('device', cpu_and_gpu())
def test_compose(device):
tensor, _ = _create_data(26, 34, device=device)
tensor = tensor.to(dtype=torch.float32) / 255.0
transforms = T.Compose([
T.CenterCrop(10),
T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
s_transforms = torch.nn.Sequential(*transforms.transforms)
scripted_fn = torch.jit.script(s_transforms)
torch.manual_seed(12)
transformed_tensor = transforms(tensor)
torch.manual_seed(12)
transformed_tensor_script = scripted_fn(tensor)
assert_equal(transformed_tensor, transformed_tensor_script, msg="{}".format(transforms))
t = T.Compose([
lambda x: x,
])
with pytest.raises(RuntimeError, match="cannot call a value of type 'Tensor'"):
torch.jit.script(t)
@pytest.mark.parametrize('device', cpu_and_gpu())
def test_random_apply(device):
tensor, _ = _create_data(26, 34, device=device)
tensor = tensor.to(dtype=torch.float32) / 255.0
transforms = T.RandomApply([
T.RandomHorizontalFlip(),
T.ColorJitter(),
], p=0.4)
s_transforms = T.RandomApply(torch.nn.ModuleList([
T.RandomHorizontalFlip(),
T.ColorJitter(),
]), p=0.4)
scripted_fn = torch.jit.script(s_transforms)
torch.manual_seed(12)
transformed_tensor = transforms(tensor)
torch.manual_seed(12)
transformed_tensor_script = scripted_fn(tensor)
assert_equal(transformed_tensor, transformed_tensor_script, msg="{}".format(transforms))
if device == "cpu":
# Can't check this twice, otherwise
# "Can't redefine method: forward on class: __torch__.torchvision.transforms.transforms.RandomApply"
transforms = T.RandomApply([
T.ColorJitter(),
], p=0.3)
with pytest.raises(RuntimeError, match="Module 'RandomApply' has no attribute 'transforms'"):
torch.jit.script(transforms)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('meth_kwargs', [
{"kernel_size": 3, "sigma": 0.75},
{"kernel_size": 23, "sigma": [0.1, 2.0]},
{"kernel_size": 23, "sigma": (0.1, 2.0)},
{"kernel_size": [3, 3], "sigma": (1.0, 1.0)},
{"kernel_size": (3, 3), "sigma": (0.1, 2.0)},
{"kernel_size": [23], "sigma": 0.75}
])
def test_gaussian_blur(device, meth_kwargs):
tol = 1.0 + 1e-10
_test_class_op(
T.GaussianBlur, meth_kwargs=meth_kwargs,
test_exact_match=False, device=device, agg_method="max", tol=tol
)
| 41.427577
| 110
| 0.690637
|
3361bd10369b5aa3cf8da7b19bd89acb30a1fe4c
| 11,493
|
py
|
Python
|
test/test_text.py
|
prmths128/pattern
|
e8870e117f94306d7bcbf3ea9dea808c4d94b037
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_text.py
|
prmths128/pattern
|
e8870e117f94306d7bcbf3ea9dea808c4d94b037
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_text.py
|
prmths128/pattern
|
e8870e117f94306d7bcbf3ea9dea808c4d94b037
|
[
"BSD-3-Clause"
] | 1
|
2017-04-15T21:35:34.000Z
|
2017-04-15T21:35:34.000Z
|
# -*- coding: utf-8 -*-
import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import unittest
import StringIO
from pattern import text
#---------------------------------------------------------------------------------------------------
class TestLexicon(unittest.TestCase):
def setUp(self):
pass
def test_lazydict(self):
# Assert lazy dictionary only has data after one of its methods is called.
class V(text.lazydict):
def load(self):
dict.__setitem__(self, "a", 1)
v = V()
self.assertTrue(dict.__len__(v) == 0)
self.assertTrue(dict.__contains__(v, "a") is False)
self.assertTrue(len(v), 1)
self.assertTrue(v["a"] == 1)
print "pattern.text.lazydict"
def test_lazylist(self):
# Assert lazy list only has data after one of its methods is called.
class V(text.lazylist):
def load(self):
list.append(self, "a")
v = V()
self.assertTrue(list.__len__(v) == 0)
self.assertTrue(list.__contains__(v, "a") is False)
self.assertTrue(len(v), 1)
self.assertTrue(v[0] == "a")
print "pattern.text.lazylist"
def test_lexicon(self):
# Assert lexicon from file (or file-like string).
f1 = u";;; Comments. \n schrödinger NNP \n cat NN"
f2 = StringIO.StringIO(u";;; Comments. \n schrödinger NNP \n cat NN")
v1 = text.Lexicon(path=f1)
v2 = text.Lexicon(path=f2)
self.assertEqual(v1[u"schrödinger"], "NNP")
self.assertEqual(v2[u"schrödinger"], "NNP")
print "pattern.text.Lexicon"
#---------------------------------------------------------------------------------------------------
class TestFrequency(unittest.TestCase):
def setUp(self):
pass
def test_frequency(self):
# Assert word frequency from file (or file-like string).
f1 = u";;; Comments. \n the 1.0000 \n of 0.5040"
f2 = StringIO.StringIO(u";;; Comments. \n the 1.0000 \n of 0.5040")
v1 = text.Frequency(path=f1)
v2 = text.Frequency(path=f2)
self.assertEqual(v1[u"of"], 0.504)
self.assertEqual(v2[u"of"], 0.504)
print "pattern.text.Frequency"
#---------------------------------------------------------------------------------------------------
class TestModel(unittest.TestCase):
def setUp(self):
pass
def test_model(self):
# Assert SLP language model.
v = text.Model()
for i in range(2):
v.train("black", "JJ", previous=("the", "DT"), next=("cat", "NN"))
v.train("on", "IN", previous=("sat", "VBD"), next=("the", "DT"))
self.assertEqual("JJ", v.classify("slack"))
self.assertEqual("JJ", v.classify("white", previous=("a", "DT"), next=("cat", "NN")))
self.assertEqual("IN", v.classify("on", previous=("sat", "VBD")))
self.assertEqual("IN", v.classify("on", next=("the", "")))
self.assertEqual(["white", "JJ"], v.apply(("white", ""), next=("cat", "")))
print "pattern.text.Model"
#---------------------------------------------------------------------------------------------------
class TestMorphology(unittest.TestCase):
def setUp(self):
pass
def test_morphology(self):
# Assert morphological tagging rules.
f = StringIO.StringIO(u"NN s fhassuf 1 NNS x")
v = text.Morphology(f)
self.assertEqual(v.apply(
["cats", "NN"]),
["cats", "NNS"])
print "pattern.text.Morphology"
#---------------------------------------------------------------------------------------------------
class TestContext(unittest.TestCase):
def setUp(self):
pass
def test_context(self):
# Assert contextual tagging rules.
f = StringIO.StringIO(u"VBD VB PREVTAG TO")
v = text.Context(path=f)
self.assertEqual(v.apply(
[["to", "TO"], ["be", "VBD"]]),
[["to", "TO"], ["be", "VB"]])
print "pattern.text.Context"
#---------------------------------------------------------------------------------------------------
class TestEntities(unittest.TestCase):
def setUp(self):
pass
def test_entities(self):
# Assert named entity recognizer.
f = StringIO.StringIO(u"Schrödinger's cat PERS")
v = text.Entities(path=f)
self.assertEqual(v.apply(
[[u"Schrödinger's", "NNP"], ["cat", "NN"]]),
[[u"Schrödinger's", "NNP-PERS"], ["cat", "NNP-PERS"]])
print "pattern.text.Entities"
#---------------------------------------------------------------------------------------------------
class TestParser(unittest.TestCase):
def setUp(self):
pass
def test_stringio(self):
# Assert loading data from file-like strings.
p = text.Parser(
lexicon = {"to": "TO", "saw": "VBD"},
morphology = StringIO.StringIO(u"NN s fhassuf 1 NNS x"),
context = StringIO.StringIO(u"VBD VB PREVTAG TO"))
self.assertEqual(p.parse("cats"), "cats/NNS/B-NP/O")
self.assertEqual(p.parse("to saw"), "to/TO/B-VP/O saw/VB/I-VP/O")
def test_find_keywords(self):
# Assert the intrinsic keyword extraction algorithm.
p = text.Parser()
p.lexicon["the"] = "DT"
p.lexicon["cat"] = "NN"
p.lexicon["dog"] = "NN"
v1 = p.find_keywords("the cat")
v2 = p.find_keywords("cat. cat. dog.")
v3 = p.find_keywords("cat. dog. dog.")
v4 = p.find_keywords("the. cat. dog.", frequency={"cat": 1.0, "dog": 0.0})
self.assertEqual(v1, ["cat"])
self.assertEqual(v2, ["cat", "dog"])
self.assertEqual(v3, ["dog", "cat"])
self.assertEqual(v3, ["dog", "cat"])
print "pattern.text.Parser.find_keywords()"
def test_find_tokens(self):
# Assert the default tokenizer and its optional parameters.
p = text.Parser()
v1 = p.find_tokens(u"Schrödinger's cat is alive!", punctuation="", replace={})
v2 = p.find_tokens(u"Schrödinger's cat is dead!", punctuation="!", replace={"'s": " 's"})
v3 = p.find_tokens(u"etc.", abbreviations=set())
v4 = p.find_tokens(u"etc.", abbreviations=set(("etc.",)))
self.assertEqual(v1[0], u"Schrödinger's cat is alive!")
self.assertEqual(v2[0], u"Schrödinger 's cat is dead !")
self.assertEqual(v3[0], "etc .")
self.assertEqual(v4[0], "etc.")
print "pattern.text.Parser.find_tokens()"
def test_find_tags(self):
# Assert the default part-of-speech tagger and its optional parameters.
p = text.Parser()
v1 = p.find_tags([u"Schrödinger", "cat", "1.0"], lexicon={}, default=("NN?", "NNP?", "CD?"))
v2 = p.find_tags([u"Schrödinger", "cat", "1.0"], lexicon={"1.0": "CD?"})
v3 = p.find_tags([u"Schrödinger", "cat", "1.0"], map=lambda token, tag: (token, tag+"!"))
v4 = p.find_tags(["observer", "observable"], language="fr")
v5 = p.find_tags(["observer", "observable"], language="en")
self.assertEqual(v1, [[u"Schr\xf6dinger", "NNP?"], ["cat", "NN?"], ["1.0", "CD?"]])
self.assertEqual(v2, [[u"Schr\xf6dinger", "NNP" ], ["cat", "NN" ], ["1.0", "CD?"]])
self.assertEqual(v3, [[u"Schr\xf6dinger", "NNP!"], ["cat", "NN!"], ["1.0", "CD!"]])
self.assertEqual(v4, [["observer", "NN"], ["observable", "NN"]])
self.assertEqual(v5, [["observer", "NN"], ["observable", "JJ"]])
print "pattern.text.Parser.find_tags()"
def test_find_chunks(self):
# Assert the default phrase chunker and its optional parameters.
p = text.Parser()
v1 = p.find_chunks([["", "DT"], ["", "JJ"], ["", "NN"]], language="en")
v2 = p.find_chunks([["", "DT"], ["", "JJ"], ["", "NN"]], language="es")
v3 = p.find_chunks([["", "DT"], ["", "NN"], ["", "JJ"]], language="en")
v4 = p.find_chunks([["", "DT"], ["", "NN"], ["", "JJ"]], language="es")
self.assertEqual(v1, [["", "DT", "B-NP", "O"], ["", "JJ", "I-NP", "O"], ["", "NN", "I-NP", "O"]])
self.assertEqual(v2, [["", "DT", "B-NP", "O"], ["", "JJ", "I-NP", "O"], ["", "NN", "I-NP", "O"]])
self.assertEqual(v3, [["", "DT", "B-NP", "O"], ["", "NN", "I-NP", "O"], ["", "JJ", "B-ADJP", "O"]])
self.assertEqual(v4, [["", "DT", "B-NP", "O"], ["", "NN", "I-NP", "O"], ["", "JJ", "I-NP", "O"]])
print "pattern.text.Parser.find_chunks()"
#---------------------------------------------------------------------------------------------------
class TestSentiment(unittest.TestCase):
def setUp(self):
pass
def test_dict(self):
# Assert weighted average polarity and subjectivity for dictionary.
s = text.Sentiment()
v = {":-(": 4, ":-)": 1}
self.assertEqual(s(v)[0], -0.5)
self.assertEqual(s(v)[1], +1.0)
self.assertEqual(s(v).assessments[0], ([":-("], -0.75, 1.0, "mood"))
self.assertEqual(s(v).assessments[1], ([":-)"], +0.50, 1.0, "mood"))
def test_bag_of_words(self):
# Assert weighted average polarity and subjectivity for bag-of-words with weighted features.
from pattern.vector import BagOfWords # Alias for pattern.vector.Document.
s = text.Sentiment()
v = BagOfWords({":-(": 4, ":-)": 1})
self.assertEqual(s(v)[0], -0.5)
self.assertEqual(s(v)[1], +1.0)
self.assertEqual(s(v).assessments[0], ([":-("], -0.75, 1.0, "mood"))
self.assertEqual(s(v).assessments[1], ([":-)"], +0.50, 1.0, "mood"))
#---------------------------------------------------------------------------------------------------
class TestMultilingual(unittest.TestCase):
def setUp(self):
pass
def test_language(self):
# Assert language recognition.
self.assertEqual(text.language(u"the cat sat on the mat")[0], "en")
self.assertEqual(text.language(u"de kat zat op de mat")[0], "nl")
self.assertEqual(text.language(u"le chat s'était assis sur le tapis")[0], "fr")
print "pattern.text.language()"
def test_deflood(self):
# Assert flooding removal.
self.assertEqual(text.deflood("NIIICE!!!", n=1), "NICE!")
self.assertEqual(text.deflood("NIIICE!!!", n=2), "NIICE!!")
print "pattern.text.deflood()"
#---------------------------------------------------------------------------------------------------
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestLexicon))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestFrequency))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestModel))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMorphology))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestContext))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestEntities))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestParser))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestSentiment))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMultilingual))
return suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=1).run(suite())
| 42.724907
| 107
| 0.516314
|
8a68ebcade19cfd73d66e039b6407c878a9ab3ca
| 2,852
|
py
|
Python
|
ml/kubeflow-argo/components/dataflow/tft/preprocessing.py
|
dk-wei/GCP-code-snippets
|
c23ca7f2981b5df946448b20a54f823ce52bba05
|
[
"Apache-2.0"
] | null | null | null |
ml/kubeflow-argo/components/dataflow/tft/preprocessing.py
|
dk-wei/GCP-code-snippets
|
c23ca7f2981b5df946448b20a54f823ce52bba05
|
[
"Apache-2.0"
] | null | null | null |
ml/kubeflow-argo/components/dataflow/tft/preprocessing.py
|
dk-wei/GCP-code-snippets
|
c23ca7f2981b5df946448b20a54f823ce52bba05
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing function, for applying tf.transform to the chicago_taxi data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import uuid
import os
import apache_beam as beam
import tensorflow as tf
import tensorflow_transform as transform
from tensorflow_transform.beam import impl as beam_impl
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform import coders as tft_coders
import taxi_schema.taxi_schema as ts
# Number of buckets used by tf.transform for encoding each feature.
FEATURE_BUCKET_COUNT = 10
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
OOV_SIZE = 10
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in ts.DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as a dense float, setting nan's to the mean.
outputs[key] = transform.scale_to_z_score(inputs[key])
for key in ts.VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[key] = transform.string_to_int(
inputs[key], top_k=VOCAB_SIZE, num_oov_buckets=OOV_SIZE)
for key in ts.BUCKET_FEATURE_KEYS:
outputs[key] = transform.bucketize(inputs[key], FEATURE_BUCKET_COUNT)
for key in ts.CATEGORICAL_FEATURE_KEYS:
outputs[key] = inputs[key]
# Was this passenger a big tipper?
def convert_label(label):
taxi_fare = inputs[ts.FARE_KEY]
return tf.where(
tf.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(label, tf.multiply(taxi_fare, tf.constant(0.2))),
tf.int64))
outputs[ts.LABEL_KEY] = transform.apply_function(convert_label,
inputs[ts.LABEL_KEY])
return outputs
| 32.044944
| 81
| 0.745091
|
b174081190d9d4c7cfbbc684ab083f68f3838451
| 528
|
py
|
Python
|
src/pytorch_metric_learning/regularizers/lp_regularizer.py
|
wconnell/pytorch-metric-learning
|
1affee7c77bb5d6d4ee559bad62b910a21b39d48
|
[
"MIT"
] | 1
|
2021-01-27T03:36:19.000Z
|
2021-01-27T03:36:19.000Z
|
src/pytorch_metric_learning/regularizers/lp_regularizer.py
|
umitkacar/pytorch-metric-learning
|
bf2b7675b7b80e5762b75428d51e4ab0a861e710
|
[
"MIT"
] | null | null | null |
src/pytorch_metric_learning/regularizers/lp_regularizer.py
|
umitkacar/pytorch-metric-learning
|
bf2b7675b7b80e5762b75428d51e4ab0a861e710
|
[
"MIT"
] | null | null | null |
from .base_regularizer import BaseRegularizer
import torch
from ..utils import common_functions as c_f
class LpRegularizer(BaseRegularizer):
def __init__(self, p=2, **kwargs):
super().__init__(**kwargs)
self.p = p
self.add_to_recordable_attributes(list_of_names=["p"], is_stat=False)
def compute_loss(self, embeddings):
reg = torch.norm(embeddings, p=self.p, dim=1)
return {"loss": {"losses": reg, "indices": c_f.torch_arange_from_size(embeddings), "reduction_type": "element"}}
| 40.615385
| 120
| 0.700758
|
4ce2599f67f7338f7e6e25df457344e53cfb1300
| 13,925
|
py
|
Python
|
stumpy/aamp_ostinato.py
|
profintegra/stumpy
|
66b3402d91820005b466e1da6fe353b61e6246c5
|
[
"BSD-3-Clause"
] | 1
|
2021-07-27T11:14:01.000Z
|
2021-07-27T11:14:01.000Z
|
stumpy/aamp_ostinato.py
|
profintegra/stumpy
|
66b3402d91820005b466e1da6fe353b61e6246c5
|
[
"BSD-3-Clause"
] | null | null | null |
stumpy/aamp_ostinato.py
|
profintegra/stumpy
|
66b3402d91820005b466e1da6fe353b61e6246c5
|
[
"BSD-3-Clause"
] | null | null | null |
# STUMPY
# Copyright 2019 TD Ameritrade. Released under the terms of the 3-Clause BSD license.
# STUMPY is a trademark of TD Ameritrade IP Company, Inc. All rights reserved.
import numpy as np
from . import core, aamp, aamped
def _aamp_across_series_nearest_neighbors(
Ts, Ts_idx, subseq_idx, m, Ts_squared, Ts_subseq_isfinite
):
"""
For multiple time series find, per individual time series, the subsequences closest
to a given query.
Parameters
----------
Ts : list
A list of time series for which to find the nearest neighbor subsequences that
are closest to the query subsequence `Ts[Ts_idx][subseq_idx : subseq_idx + m]`
Ts_idx : int
The index of time series in `Ts` which contains the query subsequence
subseq_idx : int
The subsequence index in the time series `Ts[Ts_idx]` that contains the query
subsequence
m : int
Window size
Ts_squared : list
A list of rolling window `T_squared` for each time series in `Ts`
Ts_subseq_isfinite : list
A list of rolling window `T_subseq_isfinite` for each time series in `Ts`
Returns
-------
nns_radii : ndarray
Radii to subsequences in each time series of `Ts` that are closest to the
query subsequence `Ts[Ts_idx][subseq_idx : subseq_idx + m]`
nns_subseq_idx : ndarray
Indices to subsequences in each time series of `Ts` that are closest to the
query subsequence `Ts[Ts_idx][subseq_idx : subseq_idx + m]`
"""
k = len(Ts)
Q = Ts[Ts_idx][subseq_idx : subseq_idx + m]
Q_squared = np.sum(Q * Q)
nns_radii = np.zeros(k, dtype=np.float64)
nns_subseq_idx = np.zeros(k, dtype=np.int64)
for i in range(k):
if np.any(~np.isfinite(Q)): # pragma: no cover
distance_profile = np.empty(Ts[i].shape[0] - m + 1)
distance_profile[:] = np.inf
else:
QT = core.sliding_dot_product(
Ts[Ts_idx][subseq_idx : subseq_idx + m], Ts[i]
)
distance_profile = core._mass_absolute(Q_squared, Ts_squared[i], QT)
distance_profile[~Ts_subseq_isfinite[i]] = np.inf
nns_subseq_idx[i] = np.argmin(distance_profile)
nns_radii[i] = distance_profile[nns_subseq_idx[i]]
return nns_radii, nns_subseq_idx
def _get_aamp_central_motif(
Ts, bsf_radius, bsf_Ts_idx, bsf_subseq_idx, m, Ts_squared, Ts_subseq_isfinite
):
"""
Compare subsequences with the same radius and return the most central motif (i.e.,
having the smallest average nearest neighbor radii)
Parameters
----------
Ts : list
A list of time series for which to find the most central motif
bsf_radius : float
Best-so-far sradius found by a consensus search algorithm
bsf_Ts_idx : int
The index of time series in `Ts` where the `bsf_radius` was first observed
bsf_subseq_idx : int
The subsequence index in `Ts[bsf_Ts_idx]` that has radius `bsf_radius`
m : int
Window size
Ts_squared : list
A list of rolling window `T_squared` for each time series in `Ts`
Ts_subseq_isfinite : list
A list of rolling window `T_subseq_isfinite` for each time series in `Ts`
Returns
-------
bsf_radius : float
The updated best-so-far radius of the most central consensus motif
bsf_Ts_idx : int
The updated index of time series in `Ts` which contains the most central
consensus motif
bsf_subseq_idx : int
The updated subsequence index in the time series `Ts[bsf_Ts_idx]` that contains
the most central consensus motif
"""
bsf_nns_radii, bsf_nns_subseq_idx = _aamp_across_series_nearest_neighbors(
Ts, bsf_Ts_idx, bsf_subseq_idx, m, Ts_squared, Ts_subseq_isfinite
)
bsf_nns_mean_radii = bsf_nns_radii.mean()
candidate_nns_Ts_idx = np.flatnonzero(np.isclose(bsf_nns_radii, bsf_radius))
candidate_nns_subseq_idx = bsf_nns_subseq_idx[candidate_nns_Ts_idx]
for Ts_idx, subseq_idx in zip(candidate_nns_Ts_idx, candidate_nns_subseq_idx):
candidate_nns_radii, _ = _aamp_across_series_nearest_neighbors(
Ts, Ts_idx, subseq_idx, m, Ts_squared, Ts_subseq_isfinite
)
if (
np.isclose(candidate_nns_radii.max(), bsf_radius)
and candidate_nns_radii.mean() < bsf_nns_mean_radii
):
bsf_Ts_idx = Ts_idx
bsf_subseq_idx = subseq_idx
bsf_nns_mean_radii = candidate_nns_radii.mean()
return bsf_radius, bsf_Ts_idx, bsf_subseq_idx
def _aamp_ostinato(
Ts,
m,
Ts_squared,
Ts_subseq_isfinite,
dask_client=None,
device_id=None,
mp_func=aamp,
):
"""
Find the consensus motif amongst a list of time series
Parameters
----------
Ts : list
A list of time series for which to find the consensus motif
m : int
Window size
Ts_squared : list
A list of rolling window `T_squared` for each time series in `Ts`
Ts_subseq_isfinite : list
A list of rolling window `T_subseq_isfinite` for each time series in `Ts`
dask_client : client, default None
A Dask Distributed client that is connected to a Dask scheduler and
Dask workers. Setting up a Dask distributed cluster is beyond the
scope of this library. Please refer to the Dask Distributed
documentation.
device_id : int or list, default None
The (GPU) device number to use. The default value is `0`. A list of
valid device ids (int) may also be provided for parallel GPU-STUMP
computation. A list of all valid device ids can be obtained by
executing `[device.id for device in numba.cuda.list_devices()]`.
mp_func : object, default stump
Specify a custom matrix profile function to use for computing matrix profiles
Returns
-------
bsf_radius : float
The (best-so-far) Radius of the consensus motif
bsf_Ts_idx : int
The time series index in `Ts` which contains the consensus motif
bsf_subseq_idx : int
The subsequence index within time series `Ts[bsf_Ts_idx]` the contains the
consensus motif
Notes
-----
`DOI: 10.1109/ICDM.2019.00140 \
<https://www.cs.ucr.edu/~eamonn/consensus_Motif_ICDM_Long_version.pdf>`__
See Table 2
The ostinato algorithm proposed in the paper finds the best radius
in `Ts`. Intuitively, the radius is the minimum distance of a
subsequence to encompass at least one nearest neighbor subsequence
from all other time series. The best radius in `Ts` is the minimum
radius amongst all radii. Some data sets might contain multiple
subsequences which have the same optimal radius.
The greedy Ostinato algorithm only finds one of them, which might
not be the most central motif. The most central motif amongst the
subsequences with the best radius is the one with the smallest mean
distance to nearest neighbors in all other time series. To find this
central motif it is necessary to search the subsequences with the
best radius via `stumpy.ostinato._get_central_motif`
"""
bsf_radius = np.inf
bsf_Ts_idx = 0
bsf_subseq_idx = 0
partial_mp_func = core._get_partial_mp_func(
mp_func, dask_client=dask_client, device_id=device_id
)
k = len(Ts)
for j in range(k):
if j < (k - 1):
h = j + 1
else:
h = 0
mp = partial_mp_func(Ts[j], m, Ts[h], ignore_trivial=False)
si = np.argsort(mp[:, 0])
for q in si:
Q = Ts[j][q : q + m]
Q_squared = np.sum(Q * Q)
radius = mp[q, 0]
if radius >= bsf_radius:
break
for i in range(k):
if i != j and i != h:
if np.any(~np.isfinite(Q)): # pragma: no cover
distance_profile = np.empty(Ts[i].shape[0] - m + 1)
distance_profile[:] = np.inf
else:
QT = core.sliding_dot_product(Ts[j][q : q + m], Ts[i])
distance_profile = core._mass_absolute(
Q_squared, Ts_squared[i], QT
)
distance_profile[~Ts_subseq_isfinite[i]] = np.inf
radius = np.max((radius, np.min(distance_profile)))
if radius >= bsf_radius:
break
if radius < bsf_radius:
bsf_radius, bsf_Ts_idx, bsf_subseq_idx = radius, j, q
return bsf_radius, bsf_Ts_idx, bsf_subseq_idx
def aamp_ostinato(Ts, m):
"""
Find the non-normalized (i.e., without z-normalization) consensus motif of multiple
time series
This is a wrapper around the vanilla version of the ostinato algorithm
which finds the best radius and a helper function that finds the most
central conserved motif.
Parameters
----------
Ts : list
A list of time series for which to find the most central consensus motif
m : int
Window size
Returns
-------
central_radius : float
Radius of the most central consensus motif
central_Ts_idx : int
The time series index in `Ts` which contains the most central consensus motif
central_subseq_idx : int
The subsequence index within time series `Ts[central_motif_Ts_idx]` the contains
most central consensus motif
Notes
-----
`DOI: 10.1109/ICDM.2019.00140 \
<https://www.cs.ucr.edu/~eamonn/consensus_Motif_ICDM_Long_version.pdf>`__
See Table 2
The ostinato algorithm proposed in the paper finds the best radius
in `Ts`. Intuitively, the radius is the minimum distance of a
subsequence to encompass at least one nearest neighbor subsequence
from all other time series. The best radius in `Ts` is the minimum
radius amongst all radii. Some data sets might contain multiple
subsequences which have the same optimal radius.
The greedy Ostinato algorithm only finds one of them, which might
not be the most central motif. The most central motif amongst the
subsequences with the best radius is the one with the smallest mean
distance to nearest neighbors in all other time series. To find this
central motif it is necessary to search the subsequences with the
best radius via `stumpy.ostinato._get_central_motif`
"""
Ts_squared = [None] * len(Ts)
Ts_subseq_isfinite = [None] * len(Ts)
for i, T in enumerate(Ts):
Ts[i], Ts_subseq_isfinite[i] = core.preprocess_non_normalized(T, m)
Ts_squared[i] = np.sum(core.rolling_window(Ts[i] * Ts[i], m), axis=1)
bsf_radius, bsf_Ts_idx, bsf_subseq_idx = _aamp_ostinato(
Ts, m, Ts_squared, Ts_subseq_isfinite
)
(central_radius, central_Ts_idx, central_subseq_idx,) = _get_aamp_central_motif(
Ts, bsf_radius, bsf_Ts_idx, bsf_subseq_idx, m, Ts_squared, Ts_subseq_isfinite
)
return central_radius, central_Ts_idx, central_subseq_idx
def aamp_ostinatoed(dask_client, Ts, m):
"""
Find the non-normalized (i.e., without z-normalization) consensus motif of multiple
time series with a distributed dask cluster
This is a wrapper around the vanilla version of the ostinato algorithm
which finds the best radius and a helper function that finds the most
central conserved motif.
Parameters
----------
dask_client : client
A Dask Distributed client that is connected to a Dask scheduler and
Dask workers. Setting up a Dask distributed cluster is beyond the
scope of this library. Please refer to the Dask Distributed
documentation.
Ts : list
A list of time series for which to find the most central consensus motif
m : int
Window size
Returns
-------
central_radius : float
Radius of the most central consensus motif
central_Ts_idx : int
The time series index in `Ts` which contains the most central consensus motif
central_subseq_idx : int
The subsequence index within time series `Ts[central_motif_Ts_idx]` the contains
most central consensus motif
Notes
-----
`DOI: 10.1109/ICDM.2019.00140 \
<https://www.cs.ucr.edu/~eamonn/consensus_Motif_ICDM_Long_version.pdf>`__
See Table 2
The ostinato algorithm proposed in the paper finds the best radius
in `Ts`. Intuitively, the radius is the minimum distance of a
subsequence to encompass at least one nearest neighbor subsequence
from all other time series. The best radius in `Ts` is the minimum
radius amongst all radii. Some data sets might contain multiple
subsequences which have the same optimal radius.
The greedy Ostinato algorithm only finds one of them, which might
not be the most central motif. The most central motif amongst the
subsequences with the best radius is the one with the smallest mean
distance to nearest neighbors in all other time series. To find this
central motif it is necessary to search the subsequences with the
best radius via `stumpy.ostinato._get_central_motif`
"""
Ts_squared = [None] * len(Ts)
Ts_subseq_isfinite = [None] * len(Ts)
for i, T in enumerate(Ts):
Ts[i], Ts_subseq_isfinite[i] = core.preprocess_non_normalized(T, m)
Ts_squared[i] = np.sum(core.rolling_window(Ts[i] * Ts[i], m), axis=1)
bsf_radius, bsf_Ts_idx, bsf_subseq_idx = _aamp_ostinato(
Ts, m, Ts_squared, Ts_subseq_isfinite, dask_client=dask_client, mp_func=aamped
)
(central_radius, central_Ts_idx, central_subseq_idx,) = _get_aamp_central_motif(
Ts, bsf_radius, bsf_Ts_idx, bsf_subseq_idx, m, Ts_squared, Ts_subseq_isfinite
)
return central_radius, central_Ts_idx, central_subseq_idx
| 35.796915
| 88
| 0.668366
|
c14ef9ef02c8aa7dd860a77880c3ef8658cf1651
| 759
|
py
|
Python
|
testenv.py
|
Luoyayu/Machine-Learning
|
24cd81680cf901f3bf35d19f6cbae7df16b5e547
|
[
"MIT"
] | null | null | null |
testenv.py
|
Luoyayu/Machine-Learning
|
24cd81680cf901f3bf35d19f6cbae7df16b5e547
|
[
"MIT"
] | null | null | null |
testenv.py
|
Luoyayu/Machine-Learning
|
24cd81680cf901f3bf35d19f6cbae7df16b5e547
|
[
"MIT"
] | null | null | null |
import tensorflow, keras, torch, sklearn
import numpy, scipy, pandas
import matplotlib
import matplotlib.pylab as plt
import cv2, PIL
import IPython, jupyterlab
from termcolor import cprint
import os, sys
colors = ['red', 'green', 'yellow', 'blue', 'magenta', 'cyan']
def c(modules):
for i, module in enumerate(modules):
cprint("%-20s%-20s" % (module.__name__, module.__version__), color=colors[i%len(colors)], attrs=['bold'])
import_modules = [
tensorflow, keras, torch, sklearn,
numpy, scipy, pandas,
matplotlib,
cv2, PIL,
IPython, jupyterlab
]
c(import_modules)
cprint("sys platform: "+sys.platform, color='red', attrs=['blink'])
cprint(str(sys.version_info), color='green', attrs=['blink'])
| 28.111111
| 113
| 0.671937
|
dc2c96d4489b2d15ffe329c87549f31c307863d0
| 608
|
py
|
Python
|
examples/get_token.py
|
apivideo/api.video-python
|
7c5c70a9a638c2c1d3af18aabb09dda7b4db32a4
|
[
"MIT"
] | 6
|
2021-05-20T08:51:27.000Z
|
2021-10-07T16:04:32.000Z
|
examples/get_token.py
|
apivideo/python-api-client
|
9de1127aee8ed36c42084357bfa3cda54110554a
|
[
"MIT"
] | 1
|
2022-03-21T17:15:29.000Z
|
2022-03-22T09:10:10.000Z
|
examples/get_token.py
|
apivideo/python-api-client
|
9de1127aee8ed36c42084357bfa3cda54110554a
|
[
"MIT"
] | 1
|
2022-03-01T08:58:02.000Z
|
2022-03-01T08:58:02.000Z
|
# Get information about a single token using the token ID
import apivideo
from apivideo.apis import UploadTokensApi
from apivideo.exceptions import ApiAuthException
# Set variables
api_key = "your api key here"
token = "your token ID here"
# Set up the client
client = apivideo.AuthenticatedApiClient(api_key)
# If you'd rather use the sandbox environment:
# client = apivideo.AuthenticatedApiClient(api_key, production=False)
client.connect()
tokens_api = UploadTokensApi(client)
# Send your request to retrieve information about a specific token
response = tokens_api.get_token(token)
print(response)
| 27.636364
| 69
| 0.804276
|
4e8061359da269053ee90808b14e247f3d572db5
| 876
|
py
|
Python
|
src/session/twitter/gui/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 21
|
2015-08-02T21:26:14.000Z
|
2019-12-27T09:57:44.000Z
|
src/session/twitter/gui/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 34
|
2015-01-12T00:38:14.000Z
|
2020-08-31T11:19:37.000Z
|
src/session/twitter/gui/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 15
|
2015-03-24T15:42:30.000Z
|
2020-09-24T20:26:42.000Z
|
import configuration
import list_manager
from user_list import UserListDialog
from api_count import APICountDialog
from twitter_message import TwitterMessageDialog
from new_tweet import NewTweetDialog
from new_dm import NewDirectDialog
from new_reply import NewReplyDialog
from search import TwitterSearchDialog
from follow import FollowDialog
from unfollow import UnfollowDialog
from profile import TwitterProfileDialog
from profile_update import UpdateProfileDialog
from individual import IndividualDialog
from followers import FollowersDialog
from friends import FriendsDialog
from favorites import FavoritesDialog
from user_info import UserInfoDialog
from local_trends import LocalTrendsDialog
from geo_location import GeoLocationDialog
from geo_location_input import GeoLocationInputDialog
from relationship_status import RelationshipStatusDialog
| 38.086957
| 57
| 0.876712
|
a288ae979ef7016fc1cb802c94ad9de5882033ca
| 3,653
|
py
|
Python
|
build.py
|
karellen/wheel-axle-runtime
|
1e1937c9fb470214f546bb8ca5ad9fbce739bcfe
|
[
"Apache-2.0"
] | 1
|
2022-03-16T09:00:51.000Z
|
2022-03-16T09:00:51.000Z
|
build.py
|
karellen/wheel-axle-runtime
|
1e1937c9fb470214f546bb8ca5ad9fbce739bcfe
|
[
"Apache-2.0"
] | 4
|
2022-01-29T19:39:27.000Z
|
2022-02-01T02:56:47.000Z
|
build.py
|
karellen/wheel-axle-runtime
|
1e1937c9fb470214f546bb8ca5ad9fbce739bcfe
|
[
"Apache-2.0"
] | 1
|
2022-02-21T06:01:47.000Z
|
2022-02-21T06:01:47.000Z
|
# -*- coding: utf-8 -*-
#
# (C) Copyright 2022 Karellen, Inc. (https://www.karellen.co/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pybuilder.core import (use_plugin, init, Author)
use_plugin("python.core")
use_plugin("python.integrationtest")
use_plugin("python.flake8")
use_plugin("python.coverage")
use_plugin("python.distutils")
use_plugin("python.pycharm")
use_plugin("python.coveralls")
use_plugin("copy_resources")
use_plugin("filter_resources")
name = "wheel-axle-runtime"
version = "0.0.3.dev"
summary = "Axle Runtime is the runtime part of the Python Wheel enhancement library"
authors = [Author("Karellen, Inc.", "supervisor@karellen.co")]
maintainers = [Author("Arcadiy Ivanov", "arcadiy@karellen.co")]
url = "https://github.com/karellen/wheel-axle-runtime"
urls = {
"Bug Tracker": "https://github.com/karellen/wheel-axle-runtime/issues",
"Source Code": "https://github.com/karellen/wheel-axle-runtime/",
"Documentation": "https://github.com/karellen/wheel-axle-runtime/"
}
license = "Apache License, Version 2.0"
requires_python = ">=3.7"
default_task = ["analyze", "publish"]
@init
def set_properties(project):
project.depends_on("pip")
project.depends_on("filelock")
project.set_property("coverage_break_build", False)
project.set_property("integrationtest_inherit_environment", True)
project.set_property("flake8_break_build", True)
project.set_property("flake8_extend_ignore", "E303,E402")
project.set_property("flake8_include_test_sources", True)
project.set_property("flake8_include_scripts", True)
project.set_property("flake8_max_line_length", 130)
project.set_property("copy_resources_target", "$dir_dist/wheel_axle/runtime")
project.get_property("copy_resources_glob").append("LICENSE")
project.include_file("wheel_axle/runtime", "LICENSE")
project.set_property("filter_resources_target", "$dir_dist")
project.get_property("filter_resources_glob").append("wheel_axle/runtime/__init__.py")
project.set_property("distutils_readme_description", True)
project.set_property("distutils_description_overwrite", True)
project.set_property("distutils_upload_skip_existing", True)
project.set_property("distutils_setup_keywords", ["wheel", "packaging",
"setuptools", "bdist_wheel",
"symlink", "postinstall"])
project.set_property("pybuilder_header_plugin_break_build", False)
project.set_property("distutils_classifiers", [
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Topic :: System :: Archiving :: Packaging",
"Topic :: Software Development :: Build Tools",
"Intended Audience :: Developers",
"Development Status :: 4 - Beta"
])
| 38.861702
| 90
| 0.701068
|
e564cb38713b30beeaa0301fdfbb0ee7465cf3a8
| 748
|
py
|
Python
|
alipay/aop/api/response/AlipayFundTransEdumigrateMigrateserviceModifyResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/response/AlipayFundTransEdumigrateMigrateserviceModifyResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/response/AlipayFundTransEdumigrateMigrateserviceModifyResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayFundTransEdumigrateMigrateserviceModifyResponse(AlipayResponse):
def __init__(self):
super(AlipayFundTransEdumigrateMigrateserviceModifyResponse, self).__init__()
self._result = None
@property
def result(self):
return self._result
@result.setter
def result(self, value):
self._result = value
def parse_response_content(self, response_content):
response = super(AlipayFundTransEdumigrateMigrateserviceModifyResponse, self).parse_response_content(response_content)
if 'result' in response:
self.result = response['result']
| 28.769231
| 126
| 0.727273
|
792055328fb79226606fce106fdc0481bcae4925
| 1,421
|
py
|
Python
|
implementation/dataloader.py
|
minus31/BlazeFace
|
bddfb3261868b1a888408898c3de9bf6e12a372d
|
[
"MIT"
] | 44
|
2019-08-31T03:08:10.000Z
|
2022-03-25T09:10:05.000Z
|
implementation/dataloader.py
|
minus31/BlazeFace
|
bddfb3261868b1a888408898c3de9bf6e12a372d
|
[
"MIT"
] | 4
|
2019-09-10T07:25:00.000Z
|
2022-01-08T17:40:18.000Z
|
implementation/dataloader.py
|
minus31/BlazeFace
|
bddfb3261868b1a888408898c3de9bf6e12a372d
|
[
"MIT"
] | 10
|
2019-09-04T12:16:48.000Z
|
2021-09-03T07:02:44.000Z
|
import cv2
import pickle
import glob
import os
import numpy as np
IM_EXTENSIONS = ['png', 'jpg', 'bmp']
def read_img(img_path, img_shape=(128,128)):
"""
load image file and divide by 255.
"""
img = cv2.imread(img_path)
img = cv2.resize(img, img_shape)
img /= 255.
return img
def dataloader(dataset_dir, label_path, batch_size=32, img_shape=(128, 128)):
"""
data loader
return image, [class_label, class_and_location_label]
"""
img_files = glob.glob(dataset_dir)
img_files = [f for f in img_files if f[-3:] in IM_EXTENSIONS]
with open(label_path, "rb") as f:
labels = pickle.load(f)
numofData = len(img_files)# endwiths(png,jpg ...)
data_idx = np.arange(numofData)
while True:
batch_idx = np.random.choice(data_idx, size=batch_size, replace=False)
batch_img = []
batch_label = []
batch_label_cls = []
for i in batch_idx:
img = read_img(img_files[i], img_shape=img_shape)
label = labels[i]
batch_img.append(img)
batch_label.append(label)
batch_label_cls.append(label[0:1])
yield np.array(batch_img, dtype=np.float32),
[np.array(batch_label_cls, dtype=np.float32), np.array(batch_label, dtype=np.float32)]
if __name__ == "__main__":
pass
| 22.919355
| 94
| 0.594652
|
d8bde17fee9517eb2241cecfe8cf3c5e3b9eccc3
| 2,203
|
py
|
Python
|
hsds/util/attrUtil.py
|
JonosGit/hsds
|
4abc4fc22c1e75cc9b15c879c8d00448a115fc92
|
[
"Apache-2.0"
] | 1
|
2020-03-12T12:26:26.000Z
|
2020-03-12T12:26:26.000Z
|
hsds/util/attrUtil.py
|
JonosGit/hsds
|
4abc4fc22c1e75cc9b15c879c8d00448a115fc92
|
[
"Apache-2.0"
] | null | null | null |
hsds/util/attrUtil.py
|
JonosGit/hsds
|
4abc4fc22c1e75cc9b15c879c8d00448a115fc92
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
#
# attribute related utilities
#
from aiohttp.web_exceptions import HTTPBadRequest, HTTPInternalServerError
import hsds_logger as log
def getRequestCollectionName(request):
""" request is in the form /(datasets|groups|datatypes)/<id>/attributes(/<name>),
return datasets | groups | types
"""
uri = request.path
npos = uri.find('/')
if npos < 0:
msg = "bad request uri"
log.warn(msg)
raise HTTPBadRequest(reason=msg)
uri = uri[(npos+1):]
npos = uri.find('/') # second '/'
col_name = uri[:npos]
log.debug('got collection name: [' + col_name + ']')
if col_name not in ('datasets', 'groups', 'datatypes'):
msg = "Error: collection name unexpected: {}".format(col_name)
log.error(msg)
# shouldn't get routed here in this case
raise HTTPInternalServerError()
return col_name
def validateAttributeName(name):
""" verify that the attribute name is valid
"""
if not isinstance(name, str):
msg = "attribute name must be a string, but got: {}".format(type(name))
log.warn(msg)
raise HTTPBadRequest(reason=msg)
if name.find('/') > -1:
msg = "attribute names cannot contain slashes"
log.warn(msg)
raise HTTPBadRequest(reason=msg)
# TBD - add any other restrictions
| 38.649123
| 85
| 0.533818
|
c5914ba3bb3716efabe834afd8a988be077a0f88
| 7,261
|
py
|
Python
|
util/LoadDatabase.py
|
MaxZZG/ExprLib
|
c35e361ef6af365e7cd6afca6548595693bd149a
|
[
"MIT"
] | null | null | null |
util/LoadDatabase.py
|
MaxZZG/ExprLib
|
c35e361ef6af365e7cd6afca6548595693bd149a
|
[
"MIT"
] | null | null | null |
util/LoadDatabase.py
|
MaxZZG/ExprLib
|
c35e361ef6af365e7cd6afca6548595693bd149a
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------
import numpy
class FieldInfo:
'''Provides information about the field including its name, size, and ghosts'''
def __init__(self):
self.name = ''
self.npts = numpy.array( [0, 0, 0] )
self.nghost = 0
def __init__(self, name, npts, ng):
self.name = name
self.npts = numpy.array( npts )
self.nghost = ng
def __str__(self):
return "%s size:%s nghost:%s" % (self.name, self.npts, self.nghost)
# --------------------------------------------------------------------
class Field:
'''Holds a field including its data and FieldInfo'''
def __init__(self, info, data):
assert isinstance( info, FieldInfo )
assert isinstance( data, numpy.ndarray )
self.__data = data
self.__info = info
def name(self): return self.__info.name
def npoints(self): return self.__info.npts
def nghost(self): return self.__info.nghost
def data(self): return self.__data
# --------------------------------------------------------------------
class DatabaseEntry:
'''Holds metadata for one entry in the database.
Each DatabaseEntry subdirectory should have files 'fields.dat' and 'metadata.txt'.
When constructed, the metadata is loaded. Field values are loaded on demand.'''
def __init__(self, dir):
'''Build a DatabaseEntry using information in the directory 'dir'
'dir' should contain 'metadata.txt' and 'fields.dat' files'''
self.__hasBeenLoaded = False
self.name = dir.rsplit( '/' ).pop( )
self.__file = dir + '/' + 'fields.dat'
self.__mdentries = {}
self.__load_metadata( dir )
def get_field_names(self):
'''Obtain a list of names of fields that are present'''
names = []
for fi in self.__fieldInfo:
names.append( fi.name )
return names
def get_field(self, varname):
'''Obtain the requested field as a Field object'''
if not self.__hasBeenLoaded:
self.__load_data( )
field = self.__fields.get( varname )
if field is None:
raise RuntimeError( 'Variable "' + varname + '" was not found in DatabaseEntry ' + self.name )
return field
def get_metadata_entries(self):
'''Obtain a list of metadata key entries available for query. See also 'get_metadata' '''
entries = []
for entry in iter(self.__mdentries):
entries.append(entry)
return entries
def get_metdata(self, varname):
'''Return any other metadata stored in this file'''
return self.__mdentries[varname]
def __load_metadata(self, dir):
f = open( dir + '/metadata.txt', 'r' )
self.__fieldInfo = []
self.__ndoubles = 0
self.__nints = 0
self.__nstrings = 0
endOfFields = False
nIntRead = 0
nDblRead = 0
nStrRead = 0
for line in f:
# at the end of the list of fields, we store extra metadata. First determine how many entries
if line.find( '# number of doubles' ) != -1:
self.__ndoubles = int( line[0:line.find( '#' )] )
endOfFields = True
elif line.find( '# number of ints' ) != -1:
self.__nints = int( line[0:line.find( '#' )] )
endOfFields = True
elif line.find( '# number of strings' ) != -1:
self.__nstrings = int( line[0:line.find( '#' )] )
endOfFields = True
elif endOfFields: # we are now ready to load the extra metadata entries as key-value pairs, properly typed.
sp = line.split( '=' )
key = sp[0].strip( );
val = 0
if nDblRead < self.__ndoubles:
nDblRead = nDblRead + 1
val = float( sp[1].strip( ) )
elif nIntRead < self.__nints:
nIntRead = nIntRead + 1
val = int( sp[1].strip( ) )
elif nStrRead < self.__nstrings:
val = sp[1].strip( )
self.__mdentries[key] = val
else: # field entry
a = line.split( ':' )
fieldName = a[0].strip( )
npts = a[1].strip( ).strip( '[' ).strip( ']' ).split( ',' )
ng = int( a[2].strip( ) )
self.__fieldInfo.append( FieldInfo( fieldName, [int( npts[0] ), int( npts[1] ), int( npts[2] )], ng ) )
def __load_data(self):
if self.__hasBeenLoaded:
return
else:
print('loading data for ' + self.name)
f = open( self.__file )
self.__fields = {}
i = int( 0 )
for line in f:
data = [float( j ) for j in line.strip( ).split( )]
fi = self.__fieldInfo[i]
data = numpy.array( data ).reshape( fi.npts )
self.__fields[fi.name] = Field( fi, data )
i = i + 1
self.__hasBeenLoaded = True
# --------------------------------------------------------------------
class FieldDatabase:
'''Holds a database of fields at various points in time'''
def __init__(self, dir):
'''Build a FieldDatabase from the information in the supplied directory.
The root directory should provide a file called 'databases.txt' that enumerates each of the subdirectories.
Each subdirectory should contain 'metadata.txt' and 'fields.dat' files'''
assert isinstance( dir, str )
f = open( dir + '/databases.txt', 'r' )
self.__entries = []
self.__subdirs = []
for line in f:
subdir = line.strip( )
self.__entries.append( DatabaseEntry( dir + '/' + subdir ) )
self.__subdirs.append( subdir )
def get_database_entry(self, dir):
'''Obtain the requested database entry (sub-directory)'''
return self.__entries[self.__subdirs.index( dir )]
def get_field(self, dir, var):
'''Obtain the field 'var' from the subdirectory 'dir' '''
return self.get_database_entry( dir ).get_field( var )
def get_all_field_data(self, var):
'''Obtain the requested field over all database entries (e.g., for time-varying field values)'''
field = []
for db in self.__entries:
field.append( db.get_field( var ).data() )
return field
def get_entry_names(self):
'''Obtain the list of subdirectory names for this database'''
return self.__subdirs
# --------------------------------------------------------------------
# fi = DatabaseEntry('/Users/james/tmp/odt/test/CCK_single_particle.db/0')
# print fi.get_field_names()
# x = fi.get_data('T')
# fd = FieldDatabase( '/Users/james/tmp/odt/test/CCK_single_particle.db' )
# de = fd.get_database_entry( '0' )
# T = de.get_field( 'T' )
# print T.data( )
# a = de.get_field('failure')
# print de.get_metdata("doubleEntry")
# help( DatabaseEntry )
# help( FieldDatabase )
# print de.get_field_names()
# fd = FieldDatabase('/Users/james/tmp/zdc/database_tstep0')
# de = fd.get_database_entry('iter1')
# print de.get_metadata('Zodiac-Date')
| 36.487437
| 120
| 0.546757
|
c4b7678cf2c63df388c8eff22f694fb2a4ae464b
| 435
|
py
|
Python
|
src/226. Invert Binary Tree.py
|
rajshrivastava/LeetCode
|
dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0
|
[
"MIT"
] | 1
|
2019-12-16T08:18:25.000Z
|
2019-12-16T08:18:25.000Z
|
src/226. Invert Binary Tree.py
|
rajshrivastava/LeetCode
|
dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0
|
[
"MIT"
] | null | null | null |
src/226. Invert Binary Tree.py
|
rajshrivastava/LeetCode
|
dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root: return
root.right, root.left = root.left, root.right
self.invertTree(root.left)
self.invertTree(root.right)
return root
| 31.071429
| 55
| 0.613793
|
f25be94f54fcc33e547b855ac4e0f939eb19bc14
| 64,398
|
py
|
Python
|
yaetos/etl_utils.py
|
arthurprevot/yaetos
|
5eba59538f8e53c3d7033b0af80a25828c24a43e
|
[
"Apache-2.0"
] | 8
|
2022-02-15T20:39:55.000Z
|
2022-03-24T20:50:18.000Z
|
yaetos/etl_utils.py
|
arthurprevot/yaetos
|
5eba59538f8e53c3d7033b0af80a25828c24a43e
|
[
"Apache-2.0"
] | 2
|
2021-11-01T20:42:32.000Z
|
2021-12-09T23:13:47.000Z
|
yaetos/etl_utils.py
|
arthurprevot/yaetos
|
5eba59538f8e53c3d7033b0af80a25828c24a43e
|
[
"Apache-2.0"
] | 2
|
2022-03-18T00:59:29.000Z
|
2022-03-24T20:52:00.000Z
|
"""
Helper functions. Setup to run locally and on cluster.
"""
# TODO:
# - add linter
# - finish _metadata.txt file content.
# - get inputs and output by commandline (with all related params used in yml, like 'type', 'incr'...).
# - better check that db copy is in sync with S3.
# - way to run all jobs from 1 cmd line.
import inspect
import yaml
from datetime import datetime
import os
import boto3
import argparse
from time import time
import networkx as nx
import pandas as pd
import gc
from pprint import pformat
import smtplib
import ssl
from dateutil.relativedelta import relativedelta
import yaetos.spark_utils as su
from yaetos.git_utils import Git_Config_Manager
from yaetos.env_dispatchers import FS_Ops_Dispatcher, Cred_Ops_Dispatcher
from yaetos.logger import setup_logging
logger = setup_logging('Job')
# imports should not include any native spark libs, to work in pandas without spark.
# User settable params below can be changed from command line or yml or job inputs.
JOBS_METADATA_FILE = 'conf/jobs_metadata.yml'
AWS_CONFIG_FILE = 'conf/aws_config.cfg'
CONNECTION_FILE = 'conf/connections.cfg'
CLUSTER_APP_FOLDER = '/home/hadoop/app/' # TODO: check to remove it and replace it by LOCAL_JOB_FOLDER in code, now that LOCAL_JOB_FOLDER uses 'os.getcwd()'
CI_APP_FOLDER = '/home/runner/work/yaetos/yaetos/' # TODO: check to remove it now that LOCAL_JOB_FOLDER uses 'os.getcwd()'
LOCAL_FRAMEWORK_FOLDER = os.environ.get('YAETOS_FRAMEWORK_HOME', '') # YAETOS_FRAMEWORK_HOME should end with '/'. Only useful when using job folder separate from framework folder and framework folder is yaetos repo (no pip installed).
LOCAL_JOB_FOLDER = (os.getcwd() + '/') or os.environ.get('YAETOS_JOBS_HOME', '') # location of folder with jobs, regardless of where framework code is (in main repo or pip installed). It will be the same as LOCAL_FRAMEWORK_FOLDER when the jobs are in the main repo.
AWS_SECRET_ID = '/yaetos/connections'
JOB_FOLDER = 'jobs/'
PACKAGES_EMR = ['com.databricks:spark-redshift_2.11:2.0.1', 'org.apache.spark:spark-avro_2.11:2.4.0', 'mysql:mysql-connector-java:8.0.22', 'org.postgresql:postgresql:42.2.18'] # necessary for reading/writing to redshift, mysql & clickhouse using spark connector.
PACKAGES_EMR_ALT = ['io.github.spark-redshift-community:spark-redshift_2.12:5.0.3', 'org.apache.spark:spark-avro_2.12:3.1.1', 'mysql:mysql-connector-java:8.0.22', 'org.postgresql:postgresql:42.2.18'] # same but compatible with spark 3.
PACKAGES_LOCAL = PACKAGES_EMR + ['com.amazonaws:aws-java-sdk-pom:1.11.760', 'org.apache.hadoop:hadoop-aws:2.7.0']
PACKAGES_LOCAL_ALT = PACKAGES_EMR_ALT + ['com.amazonaws:aws-java-sdk-pom:1.11.760', 'org.apache.hadoop:hadoop-aws:2.7.0'] # will probably need to be moved to hadoop-aws:3.2.1 to work locally.
JARS = 'https://s3.amazonaws.com/redshift-downloads/drivers/jdbc/1.2.41.1065/RedshiftJDBC42-no-awssdk-1.2.41.1065.jar' # not available in public repo so cannot be put in "packages" var.
class ETL_Base(object):
TABULAR_TYPES = ('csv', 'parquet', 'excel', 'df', 'mysql', 'clickhouse')
SPARK_DF_TYPES = ('csv', 'parquet', 'excel', 'df', 'mysql', 'clickhouse')
PANDAS_DF_TYPES = ('csv', 'parquet', 'excel', 'df')
FILE_TYPES = ('csv', 'parquet', 'excel', 'txt')
OTHER_TYPES = ('other', 'None')
SUPPORTED_TYPES = set(TABULAR_TYPES) \
.union(set(SPARK_DF_TYPES)) \
.union(set(PANDAS_DF_TYPES)) \
.union(set(FILE_TYPES)) \
.union(set(OTHER_TYPES))
def __init__(self, pre_jargs={}, jargs=None, loaded_inputs={}):
self.loaded_inputs = loaded_inputs
self.jargs = self.set_jargs(pre_jargs, loaded_inputs) if not jargs else jargs
if self.jargs.manage_git_info:
git_yml = Git_Config_Manager().get_config(mode=self.jargs.mode, local_app_folder=LOCAL_FRAMEWORK_FOLDER, cluster_app_folder=CLUSTER_APP_FOLDER)
[git_yml.pop(key, None) for key in ('diffs_current', 'diffs_yaetos') if git_yml]
logger.info('Git info {}'.format(git_yml))
def etl(self, sc, sc_sql):
""" Main function. If incremental, reruns ETL process multiple time until
fully loaded, otherwise, just run ETL once.
It's a way to deal with case where full incremental rerun from scratch would
require a larger cluster to build in 1 shot than the typical incremental.
"""
try:
if not self.jargs.is_incremental:
output = self.etl_one_pass(sc, sc_sql, self.loaded_inputs)
else:
output = self.etl_multi_pass(sc, sc_sql, self.loaded_inputs)
except Exception as err:
if self.jargs.mode in ('prod_EMR') and self.jargs.merged_args.get('owners'):
self.send_job_failure_email(err)
raise Exception("Job failed, error: \n{}".format(err))
self.out_df = output
return output
def etl_multi_pass(self, sc, sc_sql, loaded_inputs={}):
needs_run = True
ii = 0
while needs_run: # TODO: check to rewrite as for loop. Simpler and avoiding potential infinite loops.
# TODO: isolate code below into separate functions.
ii += 1
if self.jargs.merged_args.get('job_increment') == 'daily':
if ii == 1:
first_day = self.jargs.merged_args['first_day']
last_run_period = self.get_last_run_period_daily(sc, sc_sql)
periods = Period_Builder().get_last_output_to_last_day(last_run_period, first_day)
if len(periods) == 0:
logger.info('Output up to date. Nothing to run. last processed period={} and last period from now={}'.format(last_run_period, Period_Builder.get_last_day()))
output = su.create_empty_sdf(sc_sql)
self.final_inc = True # remove "self." when sandbox job doesn't depend on it.
else:
logger.info('Periods remaining to load: {}'.format(periods))
period = periods[0]
logger.info('Period to be loaded in this run: {}'.format(period))
self.period = period # to be captured in etl_one_pass, needed for in database filtering.
self.period_next = periods[1] if len(periods) >= 2 else None # same
self.jargs.merged_args['file_tag'] = period
output = self.etl_one_pass(sc, sc_sql, loaded_inputs)
self.final_inc = period == periods[-1]
periods.pop(0) # for next increment.
else:
raise Exception("'job_increment' param has to be set to 'daily'")
if self.jargs.rerun_criteria == 'last_date': # i.e. stop when reached final increment, i.e. current period is last to process. Pb: can go in infinite loop if missing data.
needs_run = not self.final_inc
elif self.jargs.rerun_criteria == 'output_empty': # i.e. stop when current inc is empty. Good to deal with late arriving data, but will be a pb if some increment doesn't have data and will never have.
needs_run = not self.output_empty
elif self.jargs.rerun_criteria == 'both':
needs_run = not (self.output_empty or self.final_inc)
if needs_run:
del(output)
gc.collect()
logger.info('Incremental build needs other run -> {}'.format(needs_run))
# TODO: check to change output to reload all outputs from inc build
return output
def etl_one_pass(self, sc, sc_sql, loaded_inputs={}):
""" Main etl function, loads inputs, runs transform, and saves output."""
logger.info("-------Starting running job '{}'--------".format(self.jargs.job_name))
start_time = time()
self.start_dt = datetime.utcnow() # attached to self so available within "transform()" func.
output, schemas = self.etl_no_io(sc, sc_sql, loaded_inputs)
if output is None:
if self.jargs.is_incremental:
logger.info("-------End job '{}', increment with empty output--------".format(self.jargs.job_name))
self.output_empty = True
else:
logger.info("-------End job '{}', no output--------".format(self.jargs.job_name))
# TODO: add process time in that case.
return None
if not self.jargs.no_fw_cache or (self.jargs.is_incremental and self.jargs.rerun_criteria == 'output_empty'):
logger.info('Output sample:')
try:
output.show()
except Exception as e:
logger.info("Warning: Failed showing table sample with error '{}'.".format(e))
pass
count = output.count()
logger.info('Output count: {}'.format(count))
if self.jargs.output.get('df_type', 'spark') == 'spark':
logger.info("Output data types: {}".format(pformat([(fd.name, fd.dataType) for fd in output.schema.fields])))
self.output_empty = count == 0
self.save_output(output, self.start_dt)
end_time = time()
elapsed = end_time - start_time
logger.info('Process time to complete (post save to file but pre copy to db if any, also may not include processing if output not saved): {} s'.format(elapsed))
if self.jargs.save_schemas and schemas:
schemas.save_yaml(self.jargs.job_name)
# self.save_metadata(elapsed) # disable for now to avoid spark parquet reading issues. TODO: check to re-enable.
if self.jargs.merged_args.get('copy_to_redshift') and self.jargs.enable_redshift_push:
self.copy_to_redshift_using_spark(output) # to use pandas: self.copy_to_redshift_using_pandas(output, self.OUTPUT_TYPES)
if self.jargs.merged_args.get('copy_to_clickhouse') and self.jargs.enable_redshift_push: # TODO: rename enable_redshift_push to enable_db_push since not redshift here.
self.copy_to_clickhouse(output)
if self.jargs.merged_args.get('copy_to_kafka'):
self.push_to_kafka(output, self.OUTPUT_TYPES)
if self.jargs.output.get('df_type', 'spark') == 'spark':
output.unpersist()
end_time = time()
elapsed = end_time - start_time
logger.info('Process time to complete job (post db copies if any): {} s'.format(elapsed))
logger.info("-------End job '{}'--------".format(self.jargs.job_name))
return output
def etl_no_io(self, sc, sc_sql, loaded_inputs={}, jargs=None):
""" Function to load inputs (including from live vars) and run transform. No output to disk.
Having this code isolated is useful for cases with no I/O possible, like testing."""
self.jargs = jargs or self.jargs
self.sc = sc
self.sc_sql = sc_sql
self.app_name = sc.appName if sc else self.jargs.job_name
self.logger = logger
if self.jargs.job_name != self.app_name:
logger.info("... part of spark app '{}'".format(self.app_name))
loaded_datasets = self.load_inputs(loaded_inputs)
output = self.transform(**loaded_datasets)
if output is not None and self.jargs.output['type'] in self.TABULAR_TYPES and self.jargs.output.get('df_type', 'spark') == 'spark':
if self.jargs.add_created_at == 'true':
output = su.add_created_at(output, self.start_dt)
output.cache()
schemas = Schema_Builder()
schemas.generate_schemas(loaded_datasets, output)
else:
schemas = None
return output, schemas
def transform(self, **app_args):
""" The function that needs to be overriden by each specific job."""
raise NotImplementedError
def get_last_run_period_daily(self, sc, sc_sql):
previous_output_max_timestamp = self.get_previous_output_max_timestamp(sc, sc_sql)
last_run_period = previous_output_max_timestamp.strftime("%Y-%m-%d") if previous_output_max_timestamp else None # TODO: if get_output_max_timestamp()=None, means new build, so should delete instance in DBs.
return last_run_period
def set_jargs(self, pre_jargs, loaded_inputs={}):
""" jargs means job args. Function called only if running the job directly, i.e. "python some_job.py"""
py_job = self.set_py_job()
job_name = Job_Yml_Parser.set_job_name_from_file(py_job)
return Job_Args_Parser(defaults_args=pre_jargs['defaults_args'], yml_args=None, job_args=pre_jargs['job_args'], cmd_args=pre_jargs['cmd_args'], job_name=job_name, loaded_inputs=loaded_inputs) # set yml_args=None so loading yml is handled in Job_Args_Parser()
def set_py_job(self):
""" Returns the file being executed. For ex, when running "python some_job.py", this functions returns "some_job.py".
Only gives good output when the job is launched that way."""
py_job = inspect.getsourcefile(self.__class__)
logger.info("py_job: '{}'".format(py_job))
return py_job
def load_inputs(self, loaded_inputs):
app_args = {}
for item in self.jargs.inputs.keys():
# Load from memory if available
if item in loaded_inputs.keys():
app_args[item] = loaded_inputs[item]
logger.info("Input '{}' passed in memory from a previous job.".format(item))
continue
# Skip "other" types
if self.jargs.inputs[item]['type'] == "other":
app_args[item] = None
logger.info("Input '{}' not loaded since type set to 'other'.".format(item))
continue
# Load from disk
app_args[item] = self.load_input(item)
logger.info("Input '{}' loaded.".format(item))
if self.jargs.is_incremental and self.jargs.inputs[item]['type'] not in ('mysql', 'clickouse'):
if self.jargs.merged_args.get('motm_incremental'):
app_args = self.filter_incremental_inputs_motm(app_args)
else:
app_args = self.filter_incremental_inputs_period(app_args)
self.sql_register(app_args)
return app_args
def filter_incremental_inputs_motm(self, app_args):
"""Filter based on Min Of The Max (motm) of all inputs. Good to deal with late arriving data or async load but
gets stuck if 1 input never has any new data arriving.
Assumes increment fields are datetime."""
min_dt = self.get_previous_output_max_timestamp(self.sc, self.sc_sql) if len(app_args.keys()) > 0 else None
# Get latest timestamp in common across incremental inputs
maxes = []
for item in app_args.keys():
input_is_tabular = self.jargs.inputs[item]['type'] in self.TABULAR_TYPES and self.jargs.inputs[item]('df_type', 'spark') == 'spark'
inc = self.jargs.inputs[item].get('inc_field', None)
if input_is_tabular and inc:
max_dt = app_args[item].agg({inc: "max"}).collect()[0][0]
maxes.append(max_dt)
max_dt = min(maxes) if len(maxes) > 0 else None
# Filter
for item in app_args.keys():
input_is_tabular = self.jargs.inputs[item]['type'] in self.TABULAR_TYPES and self.jargs.inputs[item]('df_type', 'spark') == 'spark'
inc = self.jargs.inputs[item].get('inc_field', None)
if inc:
if input_is_tabular:
# TODO: add limit to amount of input data, and set self.final_inc=False
# inc_type = {k: v for k, v in app_args[item].dtypes}[inc] # TODO: add check that inc_type is timestamp
logger.info("Input dataset '{}' will be filtered for min_dt={} max_dt={}".format(item, min_dt, max_dt))
if min_dt:
# min_dt = to_date(lit(s)).cast(TimestampType() # TODO: deal with dt type, as coming from parquet
app_args[item] = app_args[item].filter(app_args[item][inc] > min_dt)
if max_dt:
app_args[item] = app_args[item].filter(app_args[item][inc] <= max_dt)
else:
raise Exception("Incremental loading is not supported for unstructured input. You need to handle the incremental logic in the job code.")
return app_args
def filter_incremental_inputs_period(self, app_args):
"""Filter based on period defined in. Simple but can be a pb if late arriving data or dependencies not run.
Inputs filtered inside source database will be filtered again."""
for item in app_args.keys():
input_is_tabular = self.jargs.inputs[item]['type'] in self.TABULAR_TYPES and self.jargs.inputs[item]('df_type', 'spark') == 'spark'
inc = self.jargs.inputs[item].get('inc_field', None)
if inc:
if input_is_tabular:
# TODO: add limit to amount of input data, and set self.final_inc=False
# inc_type = {k: v for k, v in app_args[item].dtypes}[inc] # TODO: add check that inc_type is timestamp
logger.info("Input dataset '{}' will be filtered for {}='{}'".format(item, inc, self.period))
app_args[item] = app_args[item].filter(app_args[item][inc] == self.period)
else:
raise Exception("Incremental loading is not supported for unstructured input. You need to handle the incremental logic in the job code.")
return app_args
def sql_register(self, app_args):
for item in app_args.keys():
input_is_tabular = hasattr(app_args[item], "rdd") # assuming DataFrame will keep 'rdd' attribute
# ^ better than using self.jargs.inputs[item]['type'] in self.TABULAR_TYPES since doesn't require 'type' being defined.
if input_is_tabular:
app_args[item].createOrReplaceTempView(item)
def load_input(self, input_name):
input_type = self.jargs.inputs[input_name]['type']
if input_type in self.FILE_TYPES:
path = self.jargs.inputs[input_name]['path']
path = path.replace('s3://', 's3a://') if self.jargs.mode == 'dev_local' else path
logger.info("Input '{}' to be loaded from files '{}'.".format(input_name, path))
path = Path_Handler(path, self.jargs.base_path).expand_later()
self.jargs.inputs[input_name]['path_expanded'] = path
# Unstructured type
if input_type == 'txt':
rdd = self.sc.textFile(path)
logger.info("Input '{}' loaded from files '{}'.".format(input_name, path))
return rdd
# Tabular, Pandas
if self.jargs.inputs[input_name].get('df_type') == 'pandas':
if input_type == 'csv':
pdf = FS_Ops_Dispatcher().load_pandas(path, file_type='csv', read_func='read_csv', read_kwargs=self.jargs.inputs[input_name].get('read_kwargs', {}))
elif input_type == 'parquet':
pdf = FS_Ops_Dispatcher().load_pandas(path, file_type='parquet', read_func='read_parquet', read_kwargs=self.jargs.inputs[input_name].get('read_kwargs', {}))
elif input_type == 'excel':
pdf = FS_Ops_Dispatcher().load_pandas(path, file_type='excel', read_func='read_excel', read_kwargs=self.jargs.inputs[input_name].get('read_kwargs', {}))
else:
raise Exception("Unsupported input type '{}' for path '{}'. Supported types for pandas are: {}. ".format(input_type, self.jargs.inputs[input_name].get('path'), self.PANDAS_DF_TYPES))
logger.info("Input '{}' loaded from files '{}'.".format(input_name, path))
# logger.info("Input data types: {}".format(pformat([(fd.name, fd.dataType) for fd in sdf.schema.fields]))) # TODO adapt to pandas
return pdf
# Tabular types, Spark
if input_type == 'csv':
delimiter = self.jargs.merged_args.get('csv_delimiter', ',')
sdf = self.sc_sql.read.option("delimiter", delimiter).csv(path, header=True)
logger.info("Input '{}' loaded from files '{}'.".format(input_name, path))
elif input_type == 'parquet':
sdf = self.sc_sql.read.parquet(path)
logger.info("Input '{}' loaded from files '{}'.".format(input_name, path))
elif input_type == 'mysql':
sdf = self.load_mysql(input_name)
logger.info("Input '{}' loaded from mysql".format(input_name))
elif input_type == 'clickhouse':
sdf = self.load_clickhouse(input_name)
logger.info("Input '{}' loaded from clickhouse".format(input_name))
else:
raise Exception("Unsupported input type '{}' for path '{}'. Supported types are: {}. ".format(input_type, self.jargs.inputs[input_name].get('path'), self.SUPPORTED_TYPES))
logger.info("Input data types: {}".format(pformat([(fd.name, fd.dataType) for fd in sdf.schema.fields])))
return sdf
def load_data_from_files(self, name, path, type, sc, sc_sql):
"""Loading any dataset (input or not) and only from file system (not from DBs). Used by incremental jobs to load previous output.
Different from load_input() which only loads input (input jargs hardcoded) and from any source."""
# TODO: integrate with load_input to remove duplicated code.
input_type = type
input_name = name
path = path.replace('s3://', 's3a://') if self.jargs.mode == 'dev_local' else path
logger.info("Dataset '{}' to be loaded from files '{}'.".format(input_name, path))
path = Path_Handler(path, self.jargs.base_path).expand_later()
self.jargs.inputs[input_name]['path_expanded'] = path
if input_type == 'txt':
rdd = self.sc.textFile(path)
logger.info("Dataset '{}' loaded from files '{}'.".format(input_name, path))
return rdd
# Tabular types
if input_type == 'csv':
sdf = sc_sql.read.csv(path, header=True) # TODO: add way to add .option("delimiter", ';'), useful for metric_budgeting.
logger.info("Dataset '{}' loaded from files '{}'.".format(input_name, path))
elif input_type == 'parquet':
# TODO: check to add ...read.option("mergeSchema", "true").parquet...
sdf = sc_sql.read.parquet(path)
logger.info("Dataset '{}' loaded from files '{}'.".format(input_name, path))
else:
raise Exception("Unsupported dataset type '{}' for path '{}'. Supported types are: {}. ".format(input_type, path, self.SUPPORTED_TYPES))
# New param "custom_schema" to work for both db and file inputs (instead of just db). TODO: finish.
# df_custom_schema = self.jargs.merged_args.get('df_custom_schema')
# if df_custom_schema:
# for field, type in df_custom_schema.items():
# table_to_copy = table_to_copy.withColumn(field, table_to_copy[field].cast(type))
logger.info("Dataset data types: {}".format(pformat([(fd.name, fd.dataType) for fd in sdf.schema.fields])))
return sdf
def load_mysql(self, input_name):
creds = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage, aws_creds=AWS_SECRET_ID, local_creds=self.jargs.connection_file)
creds_section = self.jargs.inputs[input_name]['creds']
db = creds[creds_section]
extra_params = '' # can use '?zeroDateTimeBehavior=CONVERT_TO_NULL' to help solve "java.sql.SQLException: Zero date value prohibited" but leads to other error msg.
url = 'jdbc:mysql://{host}:{port}/{service}{extra_params}'.format(host=db['host'], port=db['port'], service=db['service'], extra_params=extra_params)
dbtable = self.jargs.inputs[input_name]['db_table']
inc_field = self.jargs.inputs[input_name].get('inc_field')
if not inc_field:
logger.info('Pulling table "{}" from mysql'.format(dbtable))
sdf = self.sc_sql.read \
.format('jdbc') \
.option('driver', "com.mysql.cj.jdbc.Driver") \
.option("url", url) \
.option("user", db['user']) \
.option("password", db['password']) \
.option("dbtable", dbtable)\
.load()
else:
inc_field = self.jargs.inputs[input_name]['inc_field']
# query_str = "select * from {} where {} = '{}'".format(dbtable, inc_field, period)
higher_limit = "AND {inc_field} < '{period_next}'".format(inc_field=inc_field, period_next=self.period_next) if self.period_next else ''
query_str = "select * from {dbtable} where {inc_field} >= '{period}' {higher_limit}".format(dbtable=dbtable, inc_field=inc_field, period=self.period, higher_limit=higher_limit)
logger.info('Pulling table from mysql with query_str "{}"'.format(query_str))
# if self.jargs.merged_args.get('custom_schema', '')
# db_overridden_types_str = ', '.join([k + ' ' + v for k, v in db_overridden_types.items()])
# TODO: check if it should use com.mysql.cj.jdbc.Driver instead as above
sdf = self.sc_sql.read \
.format('jdbc') \
.option('driver', "com.mysql.jdbc.Driver") \
.option('fetchsize', 10000) \
.option('numPartitions', 3) \
.option("url", url) \
.option("user", db['user']) \
.option("password", db['password']) \
.option("customSchema", self.jargs.merged_args.get('jdbc_custom_schema', '')) \
.option("query", query_str) \
.load()
return sdf
def load_clickhouse(self, input_name):
creds = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage, aws_creds=AWS_SECRET_ID, local_creds=self.jargs.connection_file)
creds_section = self.jargs.inputs[input_name]['creds']
db = creds[creds_section]
url = 'jdbc:postgresql://{host}/{service}'.format(host=db['host'], service=db['service'])
dbtable = self.jargs.inputs[input_name]['db_table']
inc_field = self.jargs.inputs[input_name].get('inc_field')
if not inc_field:
logger.info('Pulling table "{}" from Clickhouse'.format(dbtable))
sdf = self.sc_sql.read \
.format('jdbc') \
.option('driver', "org.postgresql.Driver") \
.option("url", url) \
.option("user", db['user']) \
.option("password", db['password']) \
.option("dbtable", dbtable)\
.load()
else:
inc_field = self.jargs.inputs[input_name]['inc_field']
period = self.period
query_str = "select * from {} where {} = '{}'".format(dbtable, inc_field, period)
logger.info('Pulling table from Clickhouse with query_str "{}"'.format(query_str))
sdf = self.sc_sql.read \
.format('jdbc') \
.option('driver', "org.postgresql.Driver") \
.option('fetchsize', 10000) \
.option('numPartitions', 3) \
.option("url", url) \
.option("user", db['user']) \
.option("password", db['password']) \
.option("query", query_str) \
.load()
return sdf
def get_previous_output_max_timestamp(self, sc, sc_sql):
path = self.jargs.output['path'] # implies output path is incremental (no "{now}" in string.)
path += '*' if self.jargs.merged_args.get('incremental_type') == 'no_schema' else '' # '*' to go into output subfolders.
try:
df = self.load_data_from_files(name='output', path=path, type=self.jargs.output['type'], sc=sc, sc_sql=sc_sql)
except Exception as e: # TODO: don't catch all
logger.info("Previous increment could not be loaded or doesn't exist. It will be ignored. Folder '{}' failed loading with error '{}'.".format(path, e))
return None
dt = self.get_max_timestamp(df)
logger.info("Max timestamp of previous increment: '{}'".format(dt))
return dt
def get_max_timestamp(self, df):
return df.agg({self.jargs.output['inc_field']: "max"}).collect()[0][0]
def save_output(self, output, now_dt=None):
self.path = self.save(output=output,
path=self.jargs.output['path'],
base_path=self.jargs.base_path,
type=self.jargs.output['type'],
now_dt=now_dt,
is_incremental=self.jargs.is_incremental,
incremental_type=self.jargs.merged_args.get('incremental_type', 'no_schema'),
partitionby=self.jargs.output.get('inc_field') or self.jargs.merged_args.get('partitionby'),
file_tag=self.jargs.merged_args.get('file_tag')) # TODO: make param standard in cmd_args ?
def save(self, output, path, base_path, type, now_dt=None, is_incremental=None, incremental_type=None, partitionby=None, file_tag=None):
"""Used to save output to disk. Can be used too inside jobs to output 2nd output for testing."""
path = Path_Handler(path, base_path).expand_now(now_dt)
self.jargs.output['path_expanded'] = path
if type == 'None':
logger.info('Did not write output to disk')
return None
if is_incremental and incremental_type == 'no_schema':
current_time = now_dt.strftime('%Y%m%d_%H%M%S_utc') # no use of now_dt to make it updated for each inc.
file_tag = ('_' + file_tag) if file_tag else "" # TODO: make that param standard in cmd_args ?
path += 'inc_{}{}/'.format(current_time, file_tag)
# TODO: rename 'partitioned' to 'spark_partitions' and 'no_schema' to 'yaetos_partitions'
write_mode = 'append' if incremental_type == 'partitioned' or partitionby else 'error'
partitionby = partitionby.split(',') if partitionby else []
# Tabular, Pandas
if self.jargs.output.get('df_type') == 'pandas':
if type == 'csv':
FS_Ops_Dispatcher().save_pandas(output, path, save_method='to_csv', save_kwargs=self.jargs.output.get('save_kwargs', {}))
elif type == 'parquet':
FS_Ops_Dispatcher().save_pandas(output, path, save_method='to_parquet', save_kwargs=self.jargs.output.get('save_kwargs', {}))
else:
raise Exception("Need to specify supported output type for pandas, csv only for now.")
logger.info('Wrote output to ' + path)
return path
# TODO: deal with cases where "output" is df when expecting rdd, or at least raise issue in a cleaner way.
if type == 'txt':
output.saveAsTextFile(path)
elif type == 'parquet':
output.write.partitionBy(*partitionby).mode(write_mode).parquet(path)
elif type == 'csv':
output.write.partitionBy(*partitionby).mode(write_mode).option("header", "true").csv(path)
else:
raise Exception("Need to specify supported output type, either txt, parquet or csv.")
logger.info('Wrote output to ' + path)
return path
def save_metadata(self, elapsed):
fname = self.path + '_metadata.txt'
content = """
-- app_name: %s
-- job_name: %s
-- time (s): %s
-- cluster_setup : TBD
-- input folders : TBD
-- output folder : TBD
-- github hash: TBD
-- code: TBD
""" % (self.app_name, self.jargs.job_name, elapsed)
FS_Ops_Dispatcher().save_metadata(fname, content)
def query(self, query_str):
logger.info('Query string:\n' + query_str)
df = self.sc_sql.sql(query_str)
df.cache()
return df
def copy_to_redshift_using_pandas(self, output, types):
# import put here below to avoid loading heavy libraries when not needed (optional feature).
from yaetos.redshift_pandas import create_table
from yaetos.db_utils import cast_col
df = output.toPandas()
df = cast_col(df, types)
connection_profile = self.jargs.copy_to_redshift['creds']
schema, name_tb = self.jargs.copy_to_redshift['table'].split('.')
schema = schema.format(schema=self.jargs.schema) if '{schema}' in schema else schema
creds = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage, aws_creds=AWS_SECRET_ID, local_creds=self.jargs.connection_file)
create_table(df, connection_profile, name_tb, schema, types, creds, self.jargs.is_incremental)
del(df)
def copy_to_redshift_using_spark(self, sdf):
# import put here below to avoid loading heavy libraries when not needed (optional feature).
from yaetos.redshift_spark import create_table
connection_profile = self.jargs.copy_to_redshift['creds']
schema, name_tb = self.jargs.copy_to_redshift['table'].split('.')
schema = schema.format(schema=self.jargs.schema) if '{schema}' in schema else schema
creds = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage, aws_creds=AWS_SECRET_ID, local_creds=self.jargs.connection_file)
create_table(sdf, connection_profile, name_tb, schema, creds, self.jargs.is_incremental, self.jargs.redshift_s3_tmp_dir, self.jargs.merged_args.get('spark_version', '2.4'))
def copy_to_clickhouse(self, sdf):
# import put here below to avoid loading heavy libraries when not needed (optional feature).
from yaetos.clickhouse import create_table
connection_profile = self.jargs.copy_to_clickhouse['creds']
schema, name_tb = self.jargs.copy_to_clickhouse['table'].split('.')
schema = schema.format(schema=self.jargs.schema) if '{schema}' in schema else schema
creds = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage, aws_creds=AWS_SECRET_ID, local_creds=self.jargs.connection_file)
create_table(sdf, connection_profile, name_tb, schema, creds, self.jargs.is_incremental)
def push_to_kafka(self, output, types):
""" Needs to be overriden by each specific job."""
raise NotImplementedError
def send_msg(self, msg, recipients=None):
""" Sending message to recipients (list of email addresse) or, if not specified, to yml 'owners'.
Pulling email sender account info from connection_file."""
if not recipients:
recipients = self.jargs.merged_args.get('owners')
if not recipients:
logger.error("Email can't be sent since no recipient set in {}, .\nMessage : \n{}".format(self.jargs.job_param_file, msg))
return None
creds = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage, aws_creds=AWS_SECRET_ID, local_creds=self.jargs.connection_file)
creds_section = self.jargs.email_cred_section
sender_email = creds.get(creds_section, 'sender_email')
password = creds.get(creds_section, 'password')
smtp_server = creds.get(creds_section, 'smtp_server')
port = creds.get(creds_section, 'port')
for recipient in recipients:
send_email(msg, recipient, sender_email, password, smtp_server, port)
logger.info('Email sent to {}'.format(recipient))
def send_job_failure_email(self, error_msg):
message = """Subject: [Data Pipeline Failure] {name}\n\nA Data pipeline named '{name}' failed.\nError message:\n{error}\n\nPlease check logs in AWS.""".format(name=self.jargs.job_name, error=error_msg)
self.send_msg(message)
@staticmethod
def check_pk(df, pks):
count = df.count()
count_pk = df.select(pks).dropDuplicates().count()
if count != count_pk:
logger.error("Given fields ({}) are not PKs since not unique. count={}, count_pk={}".format(pks, count, count_pk))
return False
else:
logger.info("Given fields ({}) are PKs (i.e. unique). count=count_pk={}".format(pks, count))
return True
def identify_non_unique_pks(self, df, pks):
return su.identify_non_unique_pks(df, pks)
class Period_Builder():
@staticmethod
def get_last_day(as_of_date=datetime.utcnow()):
last_day_dt = as_of_date + relativedelta(days=-1)
last_day = last_day_dt.strftime("%Y-%m-%d")
return last_day
@staticmethod
def get_first_to_last_day(first_day, as_of_date=datetime.utcnow()):
now = as_of_date
start = datetime.strptime(first_day, "%Y-%m-%d")
delta = now - start
number_days = delta.days
periods = []
iter_days = start
for item in range(number_days):
periods.append(iter_days.strftime("%Y-%m-%d"))
iter_days = iter_days + relativedelta(days=+1)
return periods
def get_last_output_to_last_day(self, last_run_period, first_day_input, as_of_date=datetime.utcnow()):
periods = self.get_first_to_last_day(first_day_input, as_of_date)
if last_run_period:
periods = [item for item in periods if item > last_run_period]
# periods = [item for item in periods if item < '2021-01-02'] # TODO: make end period parametrizable from args.
return periods
class Schema_Builder():
TYPES_FOLDER = 'schemas/'
def generate_schemas(self, loaded_datasets, output):
yml = {'inputs': {}}
for key, value in loaded_datasets.items():
if value:
# TODO: make it fail softly in case code below fails, so it doesn't block job, since it is for logging only.
yml['inputs'][key] = {fd.name: fd.dataType.__str__() for fd in value.schema.fields}
yml['output'] = {fd.name: fd.dataType.__str__() for fd in output.schema.fields}
self.yml = yml
def save_yaml(self, job_name):
job_name = job_name.replace('.py', '')
fname = self.TYPES_FOLDER + job_name + '.yaml'
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'w') as file:
yaml.dump(self.yml, file)
class Job_Yml_Parser():
"""Functions to load and parse yml, and functions to get job_name, which is the key to the yml info."""
def __init__(self, job_name, job_param_file, mode, skip_job=False):
self.yml_args = self.set_job_yml(job_name, job_param_file, mode, skip_job)
self.yml_args['job_name'] = job_name
self.yml_args['py_job'] = self.yml_args.get('py_job') or self.set_py_job_from_name(job_name)
self.yml_args['sql_file'] = self.set_sql_file_from_name(job_name, mode)
@staticmethod
def set_job_name_from_file(job_file):
# when run from Flow(), job_file is full path. When run from ETL directly, job_file is "jobs/..." .
if job_file.startswith(CLUSTER_APP_FOLDER + 'jobs/'):
job_name = job_file[len(CLUSTER_APP_FOLDER + 'jobs/'):]
elif job_file.startswith(CLUSTER_APP_FOLDER + 'scripts.zip/jobs/'):
job_name = job_file[len(CLUSTER_APP_FOLDER + 'scripts.zip/jobs/'):]
elif job_file.startswith(CI_APP_FOLDER + 'jobs/'):
job_name = job_file[len(CI_APP_FOLDER + 'jobs/'):]
elif job_file.startswith(LOCAL_JOB_FOLDER + 'jobs/'): # when run from external repo.
job_name = job_file[len(LOCAL_JOB_FOLDER + 'jobs/'):]
elif job_file.startswith('jobs/'):
job_name = job_file[len('jobs/'):]
elif job_file.__contains__('/scripts.zip/jobs/'):
# To deal with cases like job_file = '/mnt/tmp/spark-48e465ad-cca8-4216-a77f-ce069d04766f/userFiles-b1dad8aa-76ea-4adf-97da-dc9273666263/scripts.zip/jobs/infojobs/churn_prediction/users_inscriptions_daily.py' that appeared in new emr version.
job_name = job_file[job_file.find('/scripts.zip/jobs/') + len('/scripts.zip/jobs/'):]
else:
# To deal with case when job is defined outside of this repo (and not in jobs/ folder in external folder), i.e. isn't located in 'jobs/' folder. In this case, job name in metadata file should include full path (inc job base path).
job_name = job_file
logger.info("job_name: '{}', from job_file: '{}'".format(job_name, job_file))
return job_name
@staticmethod
def set_py_job_from_name(job_name):
py_job = 'jobs/{}'.format(job_name)
logger.info("py_job: '{}', from job_name: '{}'".format(py_job, job_name))
return py_job
@staticmethod
def set_sql_file_from_name(job_name, mode):
if not job_name.endswith('.sql'):
return None
if mode in ('dev_EMR', 'prod_EMR'):
sql_file = CLUSTER_APP_FOLDER + 'jobs/{}'.format(job_name)
elif mode == 'dev_local':
sql_file = 'jobs/{}'.format(job_name)
else:
raise Exception("Mode not supported in set_sql_file_from_name(): {}".format(mode))
logger.info("sql_file: '{}', from job_name: '{}'".format(sql_file, job_name))
return sql_file
def set_job_yml(self, job_name, job_param_file, yml_mode, skip_job):
if job_param_file is None:
return {}
yml = self.load_meta(job_param_file)
if job_name not in yml['jobs'] and not skip_job:
raise KeyError("Your job '{}' can't be found in jobs_metadata file '{}'. Add it there or make sure the name matches".format(job_name, job_param_file))
elif job_name not in yml['jobs'] and skip_job:
job_yml = {}
else:
job_yml = yml['jobs'][job_name]
if yml_mode not in yml['common_params']['mode_specific_params']:
raise KeyError("Your yml mode '{}' can't be found in jobs_metadata file '{}'. Add it there or make sure the name matches".format(yml_mode, job_param_file))
mode_spec_yml = yml['common_params']['mode_specific_params'][yml_mode]
out = yml['common_params']['all_mode_params']
out.update(mode_spec_yml)
out.update(job_yml)
return out
@staticmethod
def load_meta(fname):
with open(fname, 'r') as stream:
yml = yaml.load(stream, Loader=yaml.FullLoader)
return yml
class Job_Args_Parser():
DEPLOY_ARGS_LIST = ['aws_config_file', 'aws_setup', 'leave_on', 'push_secrets', 'frequency', 'start_date',
'email', 'mode', 'deploy', 'terminate_after', 'spark_version']
def __init__(self, defaults_args, yml_args, job_args, cmd_args, job_name=None, loaded_inputs={}):
"""Mix all params, add more and tweak them when needed (like depending on storage type, execution mode...).
If yml_args not provided, it will go and get it.
Sets of params:
- defaults_args: defaults command line args, as defined in define_commandline_args()
- yml_args: args for specific job from yml. If = None, it will rebuild it using job_name param.
- job_args: args passed to "Commandliner(Job, **args)" in each job file
- cmd_args: args passed in commandline, like "python some_job.py --some_args=xxx", predefined in define_commandline_args() or not
- job_name: to use only when yml_args is set to None, to specify what section of the yml to pick.
"""
if yml_args is None:
# Getting merged args, without yml (order matters)
args = defaults_args.copy()
args.update(job_args)
args.update(cmd_args)
args.update({'job_name': job_name} if job_name else {})
args['mode'] = 'dev_EMR' if args['mode'] == 'dev_local' and args['deploy'] in ('EMR', 'EMR_Scheduled') else args['mode']
assert 'job_name' in args.keys()
yml_args = Job_Yml_Parser(args['job_name'], args['job_param_file'], args['mode'], args.get('skip_job', False)).yml_args
# Get merged args, with yml (order matters)
# TODO: need to add business of flatten/unflatten so they can be merged cleanely.
args = defaults_args.copy()
args.update(yml_args)
args.update(job_args)
args.update(cmd_args)
args['mode'] = 'dev_EMR' if args['mode'] == 'dev_local' and args['deploy'] in ('EMR', 'EMR_Scheduled') else args['mode']
args = self.update_args(args, loaded_inputs)
[setattr(self, key, value) for key, value in args.items()] # attach vars to self.*
# Other access to vars
self.merged_args = args
self.defaults_args = defaults_args
self.yml_args = yml_args
self.job_args = job_args
self.cmd_args = cmd_args
logger.info("Job args: \n{}".format(pformat(args)))
def get_deploy_args(self):
return {key: value for key, value in self.merged_args.items() if key in self.DEPLOY_ARGS_LIST}
def get_app_args(self):
return {key: value for key, value in self.merged_args.items() if key not in self.DEPLOY_ARGS_LIST or key == 'mode'}
def update_args(self, args, loaded_inputs):
""" Updating params or adding new ones, according to execution environment (local, prod...)"""
args['inputs'] = self.set_inputs(args, loaded_inputs)
# args['output'] = self.set_output(cmd_args, yml_args) # TODO: fix later
args['is_incremental'] = self.set_is_incremental(args.get('inputs', {}), args.get('output', {}))
args['output']['type'] = args.pop('output.type', None) or args['output']['type']
return args
# TODO: modify later since not used now
def set_inputs(self, args, loaded_inputs):
# inputs_in_args = any([item.startswith('input_') for item in cmd_args.keys()])
# if inputs_in_args:
# # code below limited, will break in non-friendly way if not all input params are provided, doesn't support other types of inputs like db ones. TODO: make it better.
# input_paths = {key.replace('input_path_', ''): {'path': val} for key, val in cmd_args.items() if key.startswith('input_path_')}
# input_types = {key.replace('input_type_', ''): {'type': val} for key, val in cmd_args.items() if key.startswith('input_type_')}
# inputs = {key: {'path': val['path'], 'type':input_types[key]['type']} for key, val in input_paths.items()}
# return inputs
if loaded_inputs:
return {key: {'path': val, 'type': 'df'} for key, val in loaded_inputs.items()}
else:
return args.get('inputs', {})
# TODO: modify later since not used now
# def set_output(self, cmd_args, yml_args):
# output_in_args = any([item == 'output_path' for item in cmd_args.keys()])
# if output_in_args:
# # code below limited, will break in non-friendly way if not all output params are provided, doesn't support other types of outputs like db ones. TODO: make it better.
# output = {'path':cmd_args['output_path'], 'type':cmd_args['output_type']}
# return output
# elif cmd_args.get('job_param_file'): # should be before loaded_inputs to use yaml if available. Later function load_inputs uses both self.jargs.inputs and loaded_inputs, so not incompatible.
# return yml_args.get('output', {})
# elif cmd_args.get('mode_no_io'):
# output = {}
# logger.info("No output given")
# else:
# raise Exception("No output given")
# return output
def set_is_incremental(self, inputs, output):
return any(['inc_field' in inputs[item] for item in inputs.keys()]) or 'inc_field' in output
class Path_Handler():
def __init__(self, path, base_path=None):
if base_path:
path = path.format(base_path=base_path, latest='{latest}', now='{now}')
self.path = path
def expand_later(self):
path = self.path
if '{latest}' in path:
upstream_path = path.split('{latest}')[0]
paths = FS_Ops_Dispatcher().listdir(upstream_path)
latest_date = max(paths)
path = path.format(latest=latest_date)
return path
def expand_now(self, now_dt):
path = self.path
if '{now}' in path:
current_time = now_dt.strftime('date%Y%m%d_time%H%M%S_utc')
path = path.format(now=current_time)
return path
def get_base(self):
if '{latest}' in self.path:
return self.path.split('{latest}')[0]
elif '{now}' in self.path:
return self.path.split('{now}')[0]
else:
return self.path
def Commandliner(Job, **job_args): # TODO: change name to reflect fact it is not a class anymore
Runner(Job, **job_args).parse_cmdline_and_run()
class Runner():
def __init__(self, Job, **job_args):
self.Job = Job
self.job_args = job_args
def parse_cmdline_and_run(self):
self.job_args['parse_cmdline'] = True
return self.run()
def run(self):
Job = self.Job
job_args = self.job_args
parser, defaults_args = self.define_commandline_args()
cmd_args = self.set_commandline_args(parser) if job_args.get('parse_cmdline') else {}
# Building "job", which will include all job args.
if Job is None: # when job run from "python launcher.py --job_name=some_name_from_job_metadata_file"
jargs = Job_Args_Parser(defaults_args=defaults_args, yml_args=None, job_args=job_args, cmd_args=cmd_args, loaded_inputs={})
Job = get_job_class(jargs.py_job)
job = Job(jargs=jargs)
else: # when job run from "python some_job.py"
job = Job(pre_jargs={'defaults_args': defaults_args, 'job_args': job_args, 'cmd_args': cmd_args}) # can provide jargs directly here since job_file (and so job_name) needs to be extracted from job first. So, letting job build jargs.
# Executing or deploying
if job.jargs.deploy in ('none'): # when executing job code
job = self.launch_run_mode(job)
elif job.jargs.deploy in ('EMR', 'EMR_Scheduled', 'code'): # when deploying to AWS for execution there
self.launch_deploy_mode(job.jargs.get_deploy_args(), job.jargs.get_app_args())
return job
@staticmethod
def set_commandline_args(parser):
"""Command line arguments take precedence over function ones."""
cmd_args, cmd_unknown_args = parser.parse_known_args()
cmd_args = {key: value for (key, value) in cmd_args.__dict__.items() if value is not None}
cmd_unknown_args = dict([item[2:].split('=') for item in cmd_unknown_args]) # imposes for unknown args to be defined with '=' and to start with '--'
cmd_args.update(cmd_unknown_args)
return cmd_args
@staticmethod
def define_commandline_args():
# Defined here separatly from parsing for overridability.
# Defaults should not be set in parser so they can be set outside of command line functionality.
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--deploy", choices=set(['none', 'EMR', 'EMR_Scheduled', 'EMR_DataPipeTest', 'code']), help="Choose where to run the job.")
parser.add_argument("-m", "--mode", choices=set(['dev_local', 'dev_EMR', 'prod_EMR']), help="Choose which set of params to use from jobs_metadata.yml file.")
parser.add_argument("-j", "--job_param_file", help="Identify file to use. It can be set to 'False' to not load any file and provide all parameters through job or command line arguments.")
parser.add_argument("-n", "--job_name", help="Identify registry job to use.")
parser.add_argument("-q", "--sql_file", help="Path to an sql file to execute.")
parser.add_argument("--connection_file", help="Identify file to use. Default to repo one.")
parser.add_argument("--jobs_folder", help="Identify the folder where job code is. Necessary if job code is outside the repo, i.e. if this is used as an external library. By default, uses the repo 'jobs/' folder.")
parser.add_argument("-s", "--storage", choices=set(['local', 's3']), help="Choose 'local' (default) or 's3'.")
parser.add_argument("-x", "--dependencies", action='store_true', help="Run the job dependencies and then the job itself")
parser.add_argument("-c", "--rerun_criteria", choices=set(['last_date', 'output_empty', 'both']), help="Choose criteria to rerun the next increment or not. 'last_date' usefull if we know data goes to a certain date. 'output_empty' not to be used if increment may be empty but later ones not. Only relevant for incremental job.")
parser.add_argument("--chain_dependencies", action='store_true', help="Run dependant jobs in a chained way, i.e. passing output to next step without dropping to disk. Only useful if ran with dependencies (-x) and requires output to be dataframes.")
parser.add_argument("-l", "--load_connectors", choices=set(['all', 'none']), help="Load java packages to enable spark connectors (s3, redshift, mysql). Set to 'none' to have faster spark start time and smaller log when connectors are not necessary. Only useful when mode=dev_local.")
parser.add_argument("-t", "--output.type", choices=set(['csv', 'parquet']), help="Override output type. Useful for development. Can be ignored otherwise.")
# Deploy specific
parser.add_argument("--aws_config_file", help="Identify file to use. Default to repo one.")
parser.add_argument("-a", "--aws_setup", help="Choose aws setup from conf/aws_config.cfg, typically 'prod' or 'dev'. Only relevant if choosing to deploy to a cluster.")
parser.add_argument("-o", "--leave_on", action='store_true', help="Use arg to not terminate cluster after running the job. Mostly for testing. Only relevant when creating a new cluster when deploy=EMR.")
parser.add_argument("-p", "--push_secrets", action='store_true', help="Pushing secrets to cluster. Only relevant if choosing to deploy to a cluster.")
# --inputs and --output args can be set from job or commandline too, just not set here.
defaults = {
'deploy': 'none',
'mode': 'dev_local',
'job_param_file': JOBS_METADATA_FILE,
'job_name': None,
'sql_file': None,
'connection_file': CONNECTION_FILE,
'jobs_folder': JOB_FOLDER,
'storage': 'local',
'dependencies': False, # will be overriden by default in cmdline arg unless cmdline args disabled (ex: unitests)
'rerun_criteria': 'last_date',
'chain_dependencies': False, # will be overriden by default in cmdline arg unless cmdline args disabled (ex: unitests)
'load_connectors': 'all',
# 'output.type': 'csv', # skipped on purpose to avoid setting it if not set in cmd line.
# -- Deploy specific below --
'aws_config_file': AWS_CONFIG_FILE,
'aws_setup': 'dev',
'code_source': 'lib', # Other options: 'repo' TODO: make it automatic so parameter not needed.
'leave_on': False, # will be overriden by default in cmdline arg unless cmdline args disabled (ex: unitests)
'push_secrets': False, # will be overriden by default in cmdline arg unless cmdline args disabled (ex: unitests)
# -- Not added in command line args:
'enable_redshift_push': True,
'base_path': '',
'save_schemas': False,
'manage_git_info': False,
'add_created_at': 'true', # set as string to be overrideable in cmdline.
'no_fw_cache': False,
'spark_boot': True, # options ('spark', 'pandas') (experimental).
}
return parser, defaults
def launch_run_mode(self, job):
app_name = job.jargs.job_name
if job.jargs.spark_boot is True:
sc, sc_sql = self.create_contexts(app_name, job.jargs) # TODO: set spark_version default upstream, remove it from here and from deploy.py.
else:
sc, sc_sql = None, None
if not job.jargs.dependencies:
job.etl(sc, sc_sql)
else:
job = Flow(job.jargs, app_name).run_pipeline(sc, sc_sql) # 'job' is the last job object one in pipeline.
return job
def launch_deploy_mode(self, deploy_args, app_args):
# Load deploy lib here instead of at module level to remove dependency on it when running code locally
from yaetos.deploy import DeployPySparkScriptOnAws
DeployPySparkScriptOnAws(deploy_args, app_args).run()
def create_contexts(self, app_name, jargs):
# Load spark here instead of at module level to remove dependency on spark when only deploying code to aws or running pandas job only.
from pyspark.sql import SQLContext
from pyspark.sql import SparkSession
from pyspark import SparkConf
conf = SparkConf()
# TODO: move spark-submit params here since it is more generic than in spark submit, params like "spark.driver.memoryOverhead" cause pb in spark submit.
if jargs.merged_args.get('driver-memoryOverhead'): # For extra overhead for python in driver (typically pandas)
conf = conf.set("spark.driver.memoryOverhead", jargs.merged_args['driver-memoryOverhead'])
if jargs.mode == 'dev_local' and jargs.load_connectors == 'all':
# Env vars for S3 access
credentials = boto3.Session(profile_name='default').get_credentials()
os.environ['AWS_ACCESS_KEY_ID'] = credentials.access_key
os.environ['AWS_SECRET_ACCESS_KEY'] = credentials.secret_key
# JARs
package = PACKAGES_LOCAL if jargs.merged_args.get('spark_version', '2.4') == '2.4' else PACKAGES_LOCAL_ALT
package_str = ','.join(package)
conf = conf \
.set("spark.jars.packages", package_str) \
.set("spark.jars", JARS)
# Setup above not needed when running from EMR where setup done in spark-submit.
if jargs.merged_args.get('emr_core_instances') == 0:
conf = conf \
.set("spark.hadoop.fs.s3a.buffer.dir", '/tmp') \
.set("spark.hadoop.fs.s3a.fast.upload.active.blocks", '1')
spark = SparkSession.builder \
.appName(app_name) \
.config(conf=conf) \
.getOrCreate()
sc = spark.sparkContext
sc_sql = SQLContext(sc)
logger.info('Spark Config: {}'.format(sc.getConf().getAll()))
return sc, sc_sql
class Flow():
def __init__(self, launch_jargs, app_name):
self.app_name = app_name
df = self.create_connections_jobs(launch_jargs.storage, launch_jargs.merged_args)
logger.debug('Flow app_name : {}, connection_table: {}'.format(app_name, df))
graph = self.create_global_graph(df) # top to bottom
tree = self.create_local_tree(graph, nx.DiGraph(), app_name) # bottom to top
self.leafs = self.get_leafs(tree, leafs=[]) # bottom to top
logger.info('Sequence of jobs to be run: {}'.format(self.leafs))
logger.info('-' * 80)
logger.info('-')
launch_jargs.cmd_args.pop('job_name', None) # removing since it should be pulled from yml and not be overriden by cmd_args.
launch_jargs.job_args.pop('job_name', None) # same
self.launch_jargs = launch_jargs
def run_pipeline(self, sc, sc_sql):
"""Load all job classes and run them"""
df = {}
for job_name in self.leafs:
logger.info('About to run job_name: {}'.format(job_name))
# Get yml
yml_args = Job_Yml_Parser(job_name, self.launch_jargs.job_param_file, self.launch_jargs.mode).yml_args
# Get loaded_inputs
loaded_inputs = {}
if self.launch_jargs.merged_args.get('chain_dependencies'):
if yml_args.get('inputs', 'no input') == 'no input':
raise Exception("Pb with loading job_yml or finding 'inputs' parameter in it, so 'chain_dependencies' argument not useable in this case.")
for in_name, in_properties in yml_args['inputs'].items():
if in_properties.get('from'):
loaded_inputs[in_name] = df[in_properties['from']]
# Get jargs
jargs = Job_Args_Parser(self.launch_jargs.defaults_args, yml_args, self.launch_jargs.job_args, self.launch_jargs.cmd_args, loaded_inputs=loaded_inputs)
Job = get_job_class(yml_args['py_job'])
job = Job(jargs=jargs, loaded_inputs=loaded_inputs)
df[job_name] = job.etl(sc, sc_sql) # at this point df[job_name] is unpersisted. TODO: keep it persisted.
if not self.launch_jargs.merged_args.get('chain_dependencies'): # or self.launch_jargs.merged_args.get('keep_df', True): TODO: check if it works in pipeline.
df[job_name].unpersist()
del df[job_name]
gc.collect()
logger.info('-' * 80)
logger.info('-')
return job
@staticmethod
def create_connections_jobs(storage, args):
yml = Job_Yml_Parser.load_meta(args['job_param_file'])
connections = []
for job_name, job_meta in yml['jobs'].items():
dependencies = job_meta.get('dependencies') or []
for dependency in dependencies:
row = {'source_job': dependency, 'destination_job': job_name}
connections.append(row)
return pd.DataFrame(connections)
@staticmethod
def create_global_graph(df):
""" Directed Graph from source to target. df must contain 'source_dataset' and 'target_dataset'.
All other fields are attributed to target."""
DG = nx.DiGraph()
for ii, item in df.iterrows():
item = item.to_dict()
source_dataset = item.pop('source_job')
target_dataset = item.pop('destination_job')
item.update({'name': target_dataset})
DG.add_edge(source_dataset, target_dataset)
DG.add_node(source_dataset, name=source_dataset) # (source_dataset, **{'name':source_dataset})
DG.add_node(target_dataset, **item)
return DG
def create_local_tree(self, DG, tree, ref_node):
""" Builds tree recursively. Uses graph data structure but enforces tree to simplify downstream."""
nodes = DG.predecessors(ref_node)
tree.add_node(ref_node, name=DG.nodes[ref_node])
for item in nodes:
if not tree.has_node(item):
tree.add_edge(ref_node, item)
tree.add_node(item, name=DG.nodes[item])
self.create_local_tree(DG, tree, item)
return tree
def get_leafs(self, tree, leafs):
"""Recursive function to extract all leafs in order out of tree.
Each pass, jobs are moved from "tree" to "leafs" variables until done.
"""
cur_leafs = [node for node in tree.nodes() if tree.in_degree(node) != 0 and tree.out_degree(node) == 0]
leafs += cur_leafs
for leaf in cur_leafs:
tree.remove_node(leaf)
if len(tree.nodes()) >= 2:
self.get_leafs(tree, leafs)
return leafs + list(tree.nodes())
def get_job_class(py_job):
name_import = py_job.replace('/', '.').replace('.py', '')
import_cmd = "from {} import Job".format(name_import)
namespace = {}
exec(import_cmd, namespace)
return namespace['Job']
def send_email(message, receiver_email, sender_email, password, smtp_server, port):
context = ssl.create_default_context()
with smtplib.SMTP(smtp_server, port) as server:
server.starttls(context=context)
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
| 55.515517
| 336
| 0.63401
|
5bb666162181416e06b6984b17c3bf9515b89bb3
| 24,736
|
py
|
Python
|
envs/car_racing/AttentionAgent/car_racing.py
|
amansinha/neural-bridge
|
5955bcb59a9c3519834069186637472738606898
|
[
"MIT"
] | 3
|
2021-02-23T06:14:40.000Z
|
2022-02-06T09:42:01.000Z
|
envs/car_racing/AttentionAgent/car_racing.py
|
amansinha/neural-bridge
|
5955bcb59a9c3519834069186637472738606898
|
[
"MIT"
] | null | null | null |
envs/car_racing/AttentionAgent/car_racing.py
|
amansinha/neural-bridge
|
5955bcb59a9c3519834069186637472738606898
|
[
"MIT"
] | 2
|
2021-04-15T12:09:21.000Z
|
2021-12-11T16:55:14.000Z
|
"""This file is based on the car_racing.py in OpenAI's gym.
Description
We provide 3 modified CarRacing environments here:
1. CarRacingColor-v0
Both the lane and the grass colors are perturbed at the env.reset().
2. CarRacingBar-v0
We add vertical bars on the left and right side of the screen.
3. CarRacingBlob-v0
A red blob that follows the car at a fixed position in the car's frame.
4. CarRacingNoise-v0
We replace the green background with noise.
5. CarRacingVideo-v0-xxx
We replace the green background with frames from a video, the user is
responsible for creating these frames and pass the directory as xxx
in gym.make().
These environments were first used in the paper "Neuroevolution of Self-Interpretable Agent"
https://attentionagent.github.io/
Author
Yujin Tang (yujintang@google.com)
"""
import sys, math
import numpy as np
import Box2D
from Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)
import gym
from gym import spaces
from .car_dynamics import Car
from gym.utils import colorize, seeding, EzPickle
import pyglet
from pyglet import gl
# Easiest continuous control task to learn from pixels, a top-down racing environment.
# Discrete control is reasonable in this environment as well, on/off discretization is
# fine.
#
# State consists of STATE_W x STATE_H pixels.
#
# Reward is -0.1 every frame and +1000/N for every track tile visited, where N is
# the total number of tiles visited in the track. For example, if you have finished in 732 frames,
# your reward is 1000 - 0.1*732 = 926.8 points.
#
# Game is solved when agent consistently gets 900+ points. Track generated is random every episode.
#
# Episode finishes when all tiles are visited. Car also can go outside of PLAYFIELD, that
# is far off the track, then it will get -100 and die.
#
# Some indicators shown at the bottom of the window and the state RGB buffer. From
# left to right: true speed, four ABS sensors, steering wheel position and gyroscope.
#
# To play yourself (it's rather fast for humans), type:
#
# python gym/envs/box2d/car_racing.py
#
# Remember it's powerful rear-wheel drive car, don't press accelerator and turn at the
# same time.
#
# Created by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.
STATE_W = 96 # less than Atari 160x192
STATE_H = 96
VIDEO_W = 600
VIDEO_H = 400
WINDOW_W = 1000
WINDOW_H = 800
SCALE = 6.0 # Track scale
TRACK_RAD = 900/SCALE # Track is heavily morphed circle with this radius
PLAYFIELD = 2000/SCALE # Game over boundary
FPS = 50 # Frames per second
ZOOM = 2.7 # Camera zoom
ZOOM_FOLLOW = True # Set to False for fixed view (don't use zoom)
TRACK_DETAIL_STEP = 21/SCALE
TRACK_TURN_RATE = 0.31
TRACK_WIDTH = 40/SCALE
BORDER = 8/SCALE
BORDER_MIN_COUNT = 4
ROAD_COLOR = [0.4, 0.4, 0.4]
class FrictionDetector(contactListener):
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
self._contact(contact, True)
def EndContact(self, contact):
self._contact(contact, False)
def _contact(self, contact, begin):
tile = None
obj = None
u1 = contact.fixtureA.body.userData
u2 = contact.fixtureB.body.userData
if u1 and "road_friction" in u1.__dict__:
tile = u1
obj = u2
if u2 and "road_friction" in u2.__dict__:
tile = u2
obj = u1
if not tile:
return
tile.color[0] = self.env.road_color[0]
tile.color[1] = self.env.road_color[1]
tile.color[2] = self.env.road_color[2]
if not obj or "tiles" not in obj.__dict__:
return
if begin:
obj.tiles.add(tile)
# print tile.road_friction, "ADD", len(obj.tiles)
if not tile.road_visited:
tile.road_visited = True
self.env.reward += 1000.0/len(self.env.track)
self.env.tile_visited_count += 1
else:
obj.tiles.remove(tile)
# print tile.road_friction, "DEL", len(obj.tiles) -- should delete to zero when on grass (this works)
class CarRacing(gym.Env, EzPickle):
metadata = {
'render.modes': ['human', 'rgb_array', 'state_pixels'],
'video.frames_per_second' : FPS
}
def __init__(self, verbose=1, **kwargs):
EzPickle.__init__(self)
self.seed()
self.road_color = ROAD_COLOR[:]
self.grass_color = [0.4, 0.8, 0.4, 1]
if 'modification' in kwargs:
self._modification_type = kwargs['modification']
else:
self._modification_type = ''
self.contactListener_keepref = FrictionDetector(self)
self.world = Box2D.b2World((0,0), contactListener=self.contactListener_keepref)
self.viewer = None
self.invisible_state_window = None
self.invisible_video_window = None
self.road = None
self.car = None
self.reward = 0.0
self.prev_reward = 0.0
self.verbose = verbose
self.fd_tile = fixtureDef(
shape = polygonShape(vertices=
[(0, 0),(1, 0),(1, -1),(0, -1)]))
self.action_space = spaces.Box( np.array([-1,0,0]), np.array([+1,+1,+1]), dtype=np.float32) # steer, gas, brake
self.observation_space = spaces.Box(low=0, high=255, shape=(STATE_H, STATE_W, 3), dtype=np.uint8)
self.step_cnt = 0
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _destroy(self):
if not self.road:
return
for t in self.road:
self.world.DestroyBody(t)
self.road = []
self.car.destroy()
def _create_track(self):
CHECKPOINTS = 12
# Create checkpoints
checkpoints = []
for c in range(CHECKPOINTS):
## AS
# alpha = 2*math.pi*c/CHECKPOINTS + self.np_random.uniform(0, 2*math.pi*1/CHECKPOINTS)
# rad = self.np_random.uniform(TRACK_RAD/3, TRACK_RAD)
alpha = 2*math.pi*c/CHECKPOINTS + self.noise[c,0]*2*math.pi*1./CHECKPOINTS
rad = TRACK_RAD*(self.noise[c,1]*2./3. + 1./3.)
if c==0:
alpha = 0
rad = 1.5*TRACK_RAD
if c==CHECKPOINTS-1:
alpha = 2*math.pi*c/CHECKPOINTS
self.start_alpha = 2*math.pi*(-0.5)/CHECKPOINTS
rad = 1.5*TRACK_RAD
# # YUJIN
# rad = 1 * TRACK_RAD
# alpha = 2 * math.pi * c / CHECKPOINTS
# self.start_alpha = 2*math.pi*(-0.5)/CHECKPOINTS
checkpoints.append( (alpha, rad*math.cos(alpha), rad*math.sin(alpha)) )
# print "\n".join(str(h) for h in checkpoints)
# self.road_poly = [ ( # uncomment this to see checkpoints
# [ (tx,ty) for a,tx,ty in checkpoints ],
# (0.7,0.7,0.9) ) ]
self.road = []
# Go from one checkpoint to another to create track
x, y, beta = 1.5*TRACK_RAD, 0, 0
dest_i = 0
laps = 0
track = []
no_freeze = 2500
visited_other_side = False
while True:
alpha = math.atan2(y, x)
if visited_other_side and alpha > 0:
laps += 1
visited_other_side = False
if alpha < 0:
visited_other_side = True
alpha += 2*math.pi
while True: # Find destination from checkpoints
failed = True
while True:
dest_alpha, dest_x, dest_y = checkpoints[dest_i % len(checkpoints)]
if alpha <= dest_alpha:
failed = False
break
dest_i += 1
if dest_i % len(checkpoints) == 0:
break
if not failed:
break
alpha -= 2*math.pi
continue
r1x = math.cos(beta)
r1y = math.sin(beta)
p1x = -r1y
p1y = r1x
dest_dx = dest_x - x # vector towards destination
dest_dy = dest_y - y
proj = r1x*dest_dx + r1y*dest_dy # destination vector projected on rad
while beta - alpha > 1.5*math.pi:
beta -= 2*math.pi
while beta - alpha < -1.5*math.pi:
beta += 2*math.pi
prev_beta = beta
proj *= SCALE
if proj > 0.3:
beta -= min(TRACK_TURN_RATE, abs(0.001*proj))
if proj < -0.3:
beta += min(TRACK_TURN_RATE, abs(0.001*proj))
x += p1x*TRACK_DETAIL_STEP
y += p1y*TRACK_DETAIL_STEP
track.append( (alpha,prev_beta*0.5 + beta*0.5,x,y) )
if laps > 4:
break
no_freeze -= 1
if no_freeze==0:
break
# print "\n".join([str(t) for t in enumerate(track)])
# Find closed loop range i1..i2, first loop should be ignored, second is OK
i1, i2 = -1, -1
i = len(track)
while True:
i -= 1
if i==0:
return False # Failed
pass_through_start = track[i][0] > self.start_alpha and track[i-1][0] <= self.start_alpha
if pass_through_start and i2==-1:
i2 = i
elif pass_through_start and i1==-1:
i1 = i
break
if self.verbose == 1:
print("Track generation: %i..%i -> %i-tiles track" % (i1, i2, i2-i1))
assert i1!=-1
assert i2!=-1
track = track[i1:i2-1]
first_beta = track[0][1]
first_perp_x = math.cos(first_beta)
first_perp_y = math.sin(first_beta)
# Length of perpendicular jump to put together head and tail
well_glued_together = np.sqrt(
np.square( first_perp_x*(track[0][2] - track[-1][2]) ) +
np.square( first_perp_y*(track[0][3] - track[-1][3]) ))
if well_glued_together > TRACK_DETAIL_STEP:
return False
# Red-white border on hard turns
border = [False]*len(track)
for i in range(len(track)):
good = True
oneside = 0
for neg in range(BORDER_MIN_COUNT):
beta1 = track[i-neg-0][1]
beta2 = track[i-neg-1][1]
good &= abs(beta1 - beta2) > TRACK_TURN_RATE*0.2
oneside += np.sign(beta1 - beta2)
good &= abs(oneside) == BORDER_MIN_COUNT
border[i] = good
for i in range(len(track)):
for neg in range(BORDER_MIN_COUNT):
border[i-neg] |= border[i]
# Create tiles
for i in range(len(track)):
alpha1, beta1, x1, y1 = track[i]
alpha2, beta2, x2, y2 = track[i-1]
road1_l = (x1 - TRACK_WIDTH*math.cos(beta1), y1 - TRACK_WIDTH*math.sin(beta1))
road1_r = (x1 + TRACK_WIDTH*math.cos(beta1), y1 + TRACK_WIDTH*math.sin(beta1))
road2_l = (x2 - TRACK_WIDTH*math.cos(beta2), y2 - TRACK_WIDTH*math.sin(beta2))
road2_r = (x2 + TRACK_WIDTH*math.cos(beta2), y2 + TRACK_WIDTH*math.sin(beta2))
vertices = [road1_l, road1_r, road2_r, road2_l]
self.fd_tile.shape.vertices = vertices
t = self.world.CreateStaticBody(fixtures=self.fd_tile)
t.userData = t
c = 0.01*(i%3)
t.color = [self.road_color[0] + c,
self.road_color[1] + c,
self.road_color[2] + c]
t.road_visited = False
t.road_friction = 1.0
t.fixtures[0].sensor = True
self.road_poly.append(( [road1_l, road1_r, road2_r, road2_l], t.color ))
# # YUJIN: add a fake lane
# offset = -20
# froad1_l = (x1 - (offset+TRACK_WIDTH)*math.cos(beta1), y1 - (offset+TRACK_WIDTH)*math.sin(beta1))
# froad1_r = (x1 + (-offset+TRACK_WIDTH)*math.cos(beta1), y1 + (-offset+TRACK_WIDTH)*math.sin(beta1))
# froad2_l = (x2 - (offset+TRACK_WIDTH)*math.cos(beta2), y2 - (offset+TRACK_WIDTH)*math.sin(beta2))
# froad2_r = (x2 + (-offset+TRACK_WIDTH)*math.cos(beta2), y2 + (-offset+TRACK_WIDTH)*math.sin(beta2))
# r_color = ROAD_COLOR[:]
# r_color[2] += 0.1
# self.froad_poly.append(( [froad1_l, froad1_r, froad2_r, froad2_l], r_color))
self.road.append(t)
if border[i]:
side = np.sign(beta2 - beta1)
b1_l = (x1 + side* TRACK_WIDTH *math.cos(beta1), y1 + side* TRACK_WIDTH *math.sin(beta1))
b1_r = (x1 + side*(TRACK_WIDTH+BORDER)*math.cos(beta1), y1 + side*(TRACK_WIDTH+BORDER)*math.sin(beta1))
b2_l = (x2 + side* TRACK_WIDTH *math.cos(beta2), y2 + side* TRACK_WIDTH *math.sin(beta2))
b2_r = (x2 + side*(TRACK_WIDTH+BORDER)*math.cos(beta2), y2 + side*(TRACK_WIDTH+BORDER)*math.sin(beta2))
self.road_poly.append(( [b1_l, b1_r, b2_r, b2_l], (1,1,1) if i%2==0 else (1,0,0) ))
self.track = track
return True
def reset(self):
self._destroy()
self.reward = 0.0
self.prev_reward = 0.0
self.tile_visited_count = 0
self.t = 0.0
self.road_poly = []
self.froad_poly = []
self.step_cnt = 0
# Color modification.
self.road_color = np.array([0.4, 0.4, 0.4]) # Original road color.
self.grass_color = np.array([0.4, 0.8, 0.4, 1]) # Original grass color.
if self._modification_type == 'color':
noise1 = np.random.uniform(-0.2, 0.2)
noise2 = np.random.uniform(-0.2, 0.2)
print('noise1={}'.format(noise1))
print('noise2={}'.format(noise2))
self.road_color += noise1
self.grass_color[:3] += noise2
## AS
# while True:
# success = self._create_track()
# if success:
# break
# if self.verbose == 1:
# print("retry to generate track (normal if there are not many of this messages)")
# add_blob = self._modification_type == 'blob'
# self.car = Car(self.world, *self.track[0][1:4], add_blob=add_blob)
# return self.step(None)[0]
success = self._create_track()
if success:
add_blob = self._modification_type == 'blob'
self.car = Car(self.world, *self.track[0][1:4], add_blob=add_blob)
return self.step(None)[0]
return None
def step(self, action):
if action is not None:
self.car.steer(-action[0])
self.car.gas(action[1])
self.car.brake(action[2])
self.car.step(1.0/FPS)
self.world.Step(1.0/FPS, 6*30, 2*30)
self.t += 1.0/FPS
self.state = self.render("state_pixels")
step_reward = 0
done = False
if action is not None: # First step without action, called from reset()
self.reward -= 0.1
# We actually don't want to count fuel spent, we want car to be faster.
# self.reward -= 10 * self.car.fuel_spent / ENGINE_POWER
self.car.fuel_spent = 0.0
step_reward = self.reward - self.prev_reward
self.prev_reward = self.reward
if self.tile_visited_count==len(self.track):
done = True
x, y = self.car.hull.position
if abs(x) > PLAYFIELD or abs(y) > PLAYFIELD:
done = True
step_reward = -100
return self.state, step_reward, done, {}
def render(self, mode='human'):
assert mode in ['human', 'state_pixels', 'rgb_array']
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(WINDOW_W, WINDOW_H)
self.score_label = pyglet.text.Label('0000', font_size=36,
x=20, y=WINDOW_H*2.5/40.00, anchor_x='left', anchor_y='center',
color=(255,255,255,255))
self.transform = rendering.Transform()
if "t" not in self.__dict__: return # reset() not called yet
zoom = 0.1*SCALE*max(1-self.t, 0) + ZOOM*SCALE*min(self.t, 1) # Animate zoom first second
zoom_state = ZOOM*SCALE*STATE_W/WINDOW_W
zoom_video = ZOOM*SCALE*VIDEO_W/WINDOW_W
scroll_x = self.car.hull.position[0]
scroll_y = self.car.hull.position[1]
angle = -self.car.hull.angle
vel = self.car.hull.linearVelocity
if np.linalg.norm(vel) > 0.5:
angle = math.atan2(vel[0], vel[1])
self.transform.set_scale(zoom, zoom)
self.transform.set_translation(
WINDOW_W/2 - (scroll_x*zoom*math.cos(angle) - scroll_y*zoom*math.sin(angle)),
WINDOW_H/4 - (scroll_x*zoom*math.sin(angle) + scroll_y*zoom*math.cos(angle)) )
self.transform.set_rotation(angle)
self.car.draw(self.viewer, mode!="state_pixels")
arr = None
win = self.viewer.window
win.switch_to()
win.dispatch_events()
win.clear()
t = self.transform
if mode=='rgb_array':
VP_W = VIDEO_W
VP_H = VIDEO_H
elif mode == 'state_pixels':
VP_W = STATE_W
VP_H = STATE_H
else:
pixel_scale = 1
if hasattr(win.context, '_nscontext'):
pixel_scale = win.context._nscontext.view().backingScaleFactor() # pylint: disable=protected-access
VP_W = int(pixel_scale * WINDOW_W)
VP_H = int(pixel_scale * WINDOW_H)
gl.glViewport(0, 0, VP_W, VP_H)
t.enable()
self.render_road()
for geom in self.viewer.onetime_geoms:
geom.render()
self.viewer.onetime_geoms = []
t.disable()
self.render_indicators(WINDOW_W, WINDOW_H)
# # YUJIN
# image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
# arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')
# arr = arr.reshape(VP_H, VP_W, 4)
# arr[:(VP_H // 2), :, :3] = 0
# image_data = arr.data.__str__()
# pyglet.image.ImageData(VP_H, VP_W, 'RGBA', image_data, pitch=VP_W*4)
if mode == 'human':
win.flip()
return self.viewer.isopen
image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')
arr = arr.reshape(VP_H, VP_W, 4)
arr = arr[::-1, :, 0:3]
return arr
# YUJIN
if VP_W == 96:
self.step_cnt += 1
if self.step_cnt > 1:
import os
import cv2
# img_path = os.path.join(
# '/usr/local/google/home/yujintang/PycharmProjects/evolution_strategy/models/kof/images')
# img_path = os.path.join(
# img_path, 'out{}.png'.format(self.step_cnt + 1900 - 200))
# img_data = cv2.imread(img_path, cv2.IMREAD_COLOR)
# img_data = cv2.resize(img_data, (VP_W, VP_H))
# if VP_H == 96:
# img_data = cv2.resize(img_data, (144, 96))
# h, w, c = img_data.shape
# img_data = img_data[:, (w//2 - VP_W//2):(w//2 + VP_W//2), :]
mask = np.zeros([VP_H, VP_W], dtype=bool)
mask[(arr[:, :, 1] == 204) | (arr[:, :, 1] == 230)] = True
rand_noise = np.random.randint(0, 256, VP_W * VP_H * 3)
rand_noise = rand_noise.reshape([VP_H, VP_W, 3])
for i in range(3):
# arr[:, :, i][mask] = img_data[:, :, 2-i][mask]
arr[:, :, i][mask] = rand_noise[:, :, i][mask]
self.step_cnt += 1
return arr
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
def render_road(self):
gl.glBegin(gl.GL_QUADS)
gl.glColor4f(*self.grass_color)
gl.glVertex3f(-PLAYFIELD, +PLAYFIELD, 0)
gl.glVertex3f(+PLAYFIELD, +PLAYFIELD, 0)
gl.glVertex3f(+PLAYFIELD, -PLAYFIELD, 0)
gl.glVertex3f(-PLAYFIELD, -PLAYFIELD, 0)
gl.glColor4f(0.4, 0.9, 0.4, 1.0)
k = PLAYFIELD/20.0
for x in range(-20, 20, 2):
for y in range(-20, 20, 2):
gl.glVertex3f(k*x + k, k*y + 0, 0)
gl.glVertex3f(k*x + 0, k*y + 0, 0)
gl.glVertex3f(k*x + 0, k*y + k, 0)
gl.glVertex3f(k*x + k, k*y + k, 0)
for poly, color in self.road_poly:
gl.glColor4f(color[0], color[1], color[2], 1)
for p in poly:
gl.glVertex3f(p[0], p[1], 0)
# YUJIN
for poly, color in self.froad_poly:
gl.glColor4f(color[0], color[1], color[2], 1)
for p in poly:
gl.glVertex3f(p[0], p[1], 0)
gl.glEnd()
def render_indicators(self, W, H):
gl.glBegin(gl.GL_QUADS)
s = W/40.0
h = H/40.0
gl.glColor4f(0,0,0,1)
gl.glVertex3f(W, 0, 0)
gl.glVertex3f(W, 5*h, 0)
gl.glVertex3f(0, 5*h, 0)
gl.glVertex3f(0, 0, 0)
if self._modification_type == 'bar':
gl.glVertex3f(W, 5*h, 0)
gl.glVertex3f(W, H, 0)
gl.glVertex3f(W-3*s, H, 0)
gl.glVertex3f(W-3*s, 5*h, 0)
gl.glVertex3f(3*s, 5*h, 0)
gl.glVertex3f(3*s, H, 0)
gl.glVertex3f(0, H, 0)
gl.glVertex3f(0, 5*h, 0)
def vertical_ind(place, val, color):
gl.glColor4f(color[0], color[1], color[2], 1)
gl.glVertex3f((place+0)*s, h + h*val, 0)
gl.glVertex3f((place+1)*s, h + h*val, 0)
gl.glVertex3f((place+1)*s, h, 0)
gl.glVertex3f((place+0)*s, h, 0)
def horiz_ind(place, val, color):
gl.glColor4f(color[0], color[1], color[2], 1)
gl.glVertex3f((place+0)*s, 4*h , 0)
gl.glVertex3f((place+val)*s, 4*h, 0)
gl.glVertex3f((place+val)*s, 2*h, 0)
gl.glVertex3f((place+0)*s, 2*h, 0)
true_speed = np.sqrt(np.square(self.car.hull.linearVelocity[0]) + np.square(self.car.hull.linearVelocity[1]))
vertical_ind(5, 0.02*true_speed, (1,1,1))
vertical_ind(7, 0.01*self.car.wheels[0].omega, (0.0,0,1)) # ABS sensors
vertical_ind(8, 0.01*self.car.wheels[1].omega, (0.0,0,1))
vertical_ind(9, 0.01*self.car.wheels[2].omega, (0.2,0,1))
vertical_ind(10,0.01*self.car.wheels[3].omega, (0.2,0,1))
horiz_ind(20, -10.0*self.car.wheels[0].joint.angle, (0,1,0))
horiz_ind(30, -0.8*self.car.hull.angularVelocity, (1,0,0))
gl.glEnd()
self.score_label.text = "%04i" % self.reward
self.score_label.draw()
if __name__=="__main__":
from pyglet.window import key
a = np.array( [0.0, 0.0, 0.0] )
def key_press(k, mod):
global restart
if k==0xff0d: restart = True
if k==key.LEFT: a[0] = -1.0
if k==key.RIGHT: a[0] = +1.0
if k==key.UP: a[1] = +1.0
if k==key.DOWN: a[2] = +0.8 # set 1.0 for wheels to block to zero rotation
def key_release(k, mod):
if k==key.LEFT and a[0]==-1.0: a[0] = 0
if k==key.RIGHT and a[0]==+1.0: a[0] = 0
if k==key.UP: a[1] = 0
if k==key.DOWN: a[2] = 0
env = CarRacing()
env.render()
env.viewer.window.on_key_press = key_press
env.viewer.window.on_key_release = key_release
record_video = False
if record_video:
from gym.wrappers.monitor import Monitor
env = Monitor(env, '/tmp/video-test', force=True)
isopen = True
while isopen:
env.reset()
total_reward = 0.0
steps = 0
restart = False
while True:
s, r, done, info = env.step(a)
total_reward += r
if steps % 200 == 0 or done:
print("\naction " + str(["{:+0.2f}".format(x) for x in a]))
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
#import matplotlib.pyplot as plt
#plt.imshow(s)
#plt.savefig("test.jpeg")
steps += 1
isopen = env.render()
if done or restart or isopen == False:
break
env.close()
| 38.589704
| 120
| 0.552676
|
4ea53d53f8551b5cee2aa6cc4c687edf8f2fa83e
| 904
|
py
|
Python
|
sdk/python/pulumi_azure_native/relay/v20170401/__init__.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/relay/v20170401/__init__.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/relay/v20170401/__init__.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .get_hybrid_connection import *
from .get_hybrid_connection_authorization_rule import *
from .get_namespace import *
from .get_namespace_authorization_rule import *
from .get_wcf_relay import *
from .get_wcf_relay_authorization_rule import *
from .hybrid_connection import *
from .hybrid_connection_authorization_rule import *
from .list_hybrid_connection_keys import *
from .list_namespace_keys import *
from .list_wcf_relay_keys import *
from .namespace import *
from .namespace_authorization_rule import *
from .wcf_relay import *
from .wcf_relay_authorization_rule import *
from ._inputs import *
from . import outputs
| 34.769231
| 80
| 0.800885
|
dc53577430a5bdda00c7728e4f0d77513fd666e5
| 2,122
|
py
|
Python
|
mqttsqlite/mqttsqlite.py
|
gemasr/mqttSqlLite
|
aa6b5515bcd2bf47ae1207a241886a506a86ec0c
|
[
"MIT"
] | null | null | null |
mqttsqlite/mqttsqlite.py
|
gemasr/mqttSqlLite
|
aa6b5515bcd2bf47ae1207a241886a506a86ec0c
|
[
"MIT"
] | null | null | null |
mqttsqlite/mqttsqlite.py
|
gemasr/mqttSqlLite
|
aa6b5515bcd2bf47ae1207a241886a506a86ec0c
|
[
"MIT"
] | null | null | null |
import paho.mqtt.client as mqtt
from .core.mqtt_controller import MqttController
from .orm.models import Setting, create_tables
import argparse
from .settings.private_settings import *
def on_connect(client, userdata, flags, rc):
print('connecting...')
mqtt_controller = MqttController()
mqtt_controller.on_connect(client)
print('connected')
def on_message(client, userdata, msg):
print('received message')
print(msg.topic + ' : ' + str(msg.payload))
mqtt_controller = MqttController()
mqtt_controller.on_message(client, msg)
print('proccessed Message')
def init_settings(args):
print('Checking Settings')
try:
settings = Setting.select()
settings.count()
except:
create_tables()
settings = Setting.select()
if args.mqtt_host is not 'localhost':
set_host(args.mqtt_host)
if args.mqtt_port is not '1883':
set_port(args.mqtt_port)
if args.root_topic is not 'logger/':
set_root_topic(args.mqtt_port)
if args.management_password is not 'admin1234':
set_management_pass(args.management_password)
if args.query_password is not 'query1234':
set_query_pass(args.query_password)
def main():
parser = argparse.ArgumentParser(description='Sqlite Logger for MQTT broker')
parser.add_argument('--host', dest='mqtt_host', default='localhost', help='Mqtt Broker URL')
parser.add_argument('--port', dest='mqtt_port', default=1883, help='Mqtt Broker Port')
parser.add_argument('--root', dest='root_topic', default='logger/', help='Root topic for logger commands')
parser.add_argument('--mgpassword', dest='management_password', default='admin1234', help='password for management options')
parser.add_argument('--qrpassword', dest='query_password', default='query1234', help='password for query options')
init_settings(parser.parse_args())
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
print(get_host() + ':' + str(get_port()))
client.connect(get_host(), int(get_port()), 60)
client.loop_forever()
| 35.366667
| 128
| 0.705467
|
a0c8633a024b672af1374e38374758af8f7e7725
| 685
|
py
|
Python
|
venues/migrations/0024_auto_20190723_2130.py
|
danroberts728/hsvdotbeer
|
5b977bf4a7aab149ad56564b3adbb09424500308
|
[
"Apache-2.0"
] | 18
|
2018-12-06T01:46:37.000Z
|
2021-10-17T10:37:17.000Z
|
venues/migrations/0024_auto_20190723_2130.py
|
danroberts728/hsvdotbeer
|
5b977bf4a7aab149ad56564b3adbb09424500308
|
[
"Apache-2.0"
] | 194
|
2018-11-04T12:50:49.000Z
|
2022-01-06T22:43:43.000Z
|
venues/migrations/0024_auto_20190723_2130.py
|
danroberts728/hsvdotbeer
|
5b977bf4a7aab149ad56564b3adbb09424500308
|
[
"Apache-2.0"
] | 7
|
2019-03-18T05:36:06.000Z
|
2020-12-25T03:27:29.000Z
|
# Generated by Django 2.2.3 on 2019-07-23 21:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("venues", "0023_auto_20190723_0226"),
]
operations = [
migrations.AddField(
model_name="venue",
name="latitude",
field=models.DecimalField(
blank=True, decimal_places=8, max_digits=10, null=True
),
),
migrations.AddField(
model_name="venue",
name="longitude",
field=models.DecimalField(
blank=True, decimal_places=8, max_digits=11, null=True
),
),
]
| 24.464286
| 70
| 0.550365
|
54f6e399113ff38c9ed6834aca56c4b14e80e92c
| 2,307
|
py
|
Python
|
src/yawgmoth.py
|
Providenciyye/yawgmoth
|
7f3b4bba26919ca29fdcfe58edc595f17c905a2b
|
[
"MIT"
] | null | null | null |
src/yawgmoth.py
|
Providenciyye/yawgmoth
|
7f3b4bba26919ca29fdcfe58edc595f17c905a2b
|
[
"MIT"
] | null | null | null |
src/yawgmoth.py
|
Providenciyye/yawgmoth
|
7f3b4bba26919ca29fdcfe58edc595f17c905a2b
|
[
"MIT"
] | null | null | null |
# ---------------------------
# Imports
# ---------------------------
import discord
import sys
import commands
# ---------------------------
# Initialization
# ---------------------------
yawgmoth = discord.Client()
yawgmoth.login(sys.argv[1], sys.argv[2])
# ---------------------------
# Event: Ready
# ---------------------------
@yawgmoth.event
def on_ready():
server = yawgmoth.servers[0]
channel = server.channels[0]
print 'User:' + '\t\t' + yawgmoth.user.name
print 'ID:' + '\t\t' + yawgmoth.user.id
print 'Server:' + '\t\t' + server.name + ", " + server.id
yawgmoth.send_message(channel, 'I rise...')
# ---------------------------
# Event: Message
# ---------------------------
@yawgmoth.event
def on_message(message):
response = commands.cmd_fetch(message)
if message.content.startswith('!details'):
response += commands.cmd_details(message)
if message.content.startswith('!rulings'):
response += commands.cmd_rulings(message)
if message.content.startswith('!standardban'):
response += commands.cmd_standardban(message)
if message.content.startswith('!modernban'):
response += commands.cmd_modernban(message)
if message.content.startswith('!legacyban'):
response += commands.cmd_legacyban(message)
if message.content.startswith('!vintageban'):
response += commands.cmd_vintageban(message)
if message.content.startswith('!edhban'):
response += commands.cmd_edhban(message)
if message.content.startswith('!obey'):
response += commands.cmd_obey(message)
if message.content.startswith('!moon'):
response += commands.cmd_moon(message)
if message.content.startswith('!sun'):
response += ':sun_with_face:'
if message.content.startswith('!git'):
response += commands.cmd_git(message)
if message.content.startswith('!version'):
response += commands.cmd_version(message)
if message.content.startswith('!rules'):
response += 'http://media.wizards.com/2016/docs/MagicCompRules_04082016.pdf'
if message.content.startswith('!reset'):
response += commands.cmd_reset(message)
yawgmoth.send_message(message.channel, response)
# ---------------------------
# Startup
# ---------------------------
yawgmoth.run()
| 34.954545
| 84
| 0.596879
|
8665aef169380074e912f71217b8fbe735936c81
| 6,542
|
py
|
Python
|
dask/array/wrap.py
|
JimCircadian/dask
|
212c1058c19fa543d60d9a294f8f9feda3e1c9dc
|
[
"BSD-3-Clause"
] | null | null | null |
dask/array/wrap.py
|
JimCircadian/dask
|
212c1058c19fa543d60d9a294f8f9feda3e1c9dc
|
[
"BSD-3-Clause"
] | null | null | null |
dask/array/wrap.py
|
JimCircadian/dask
|
212c1058c19fa543d60d9a294f8f9feda3e1c9dc
|
[
"BSD-3-Clause"
] | null | null | null |
from functools import partial
from itertools import product
import numpy as np
from tlz import curry
from ..base import tokenize
from ..layers import BlockwiseCreateArray
from ..utils import funcname
from .core import Array, normalize_chunks
from .utils import (
empty_like_safe,
full_like_safe,
meta_from_array,
ones_like_safe,
zeros_like_safe,
)
def _parse_wrap_args(func, args, kwargs, shape):
if isinstance(shape, np.ndarray):
shape = shape.tolist()
if not isinstance(shape, (tuple, list)):
shape = (shape,)
name = kwargs.pop("name", None)
chunks = kwargs.pop("chunks", "auto")
dtype = kwargs.pop("dtype", None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
dtype = np.dtype(dtype)
chunks = normalize_chunks(chunks, shape, dtype=dtype)
name = name or funcname(func) + "-" + tokenize(
func, shape, chunks, dtype, args, kwargs
)
return {
"shape": shape,
"dtype": dtype,
"kwargs": kwargs,
"chunks": chunks,
"name": name,
}
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
if "shape" not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop("shape")
if isinstance(shape, Array):
raise TypeError(
"Dask array input not supported. "
"Please use tuple, list, or a 1D numpy array instead."
)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
func = partial(func, dtype=dtype, **kwargs)
graph = BlockwiseCreateArray(
name,
func,
shape,
chunks,
)
return Array(graph, name, chunks, dtype=dtype, meta=kwargs.get("meta", None))
def wrap_func_like(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
x = args[0]
meta = meta_from_array(x)
shape = kwargs.get("shape", x.shape)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
shapes = list(shapes)
kw = [kwargs for _ in shapes]
for i, s in enumerate(list(shapes)):
kw[i]["shape"] = s
vals = ((partial(func, dtype=dtype, **k),) + args for (k, s) in zip(kw, shapes))
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, meta=meta.astype(dtype))
def wrap_func_like_safe(func, func_like, *args, **kwargs):
"""
Safe implementation for wrap_func_like(), attempts to use func_like(),
if the shape keyword argument, falls back to func().
"""
try:
return func_like(*args, **kwargs)
except TypeError:
return func(*args, **kwargs)
@curry
def wrap(wrap_func, func, **kwargs):
func_like = kwargs.pop("func_like", None)
if func_like is None:
f = partial(wrap_func, func, **kwargs)
else:
f = partial(wrap_func, func_like, **kwargs)
template = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also features
optional keyword arguments ``chunks: int, tuple, or dict`` and ``name: str``.
Original signature follows below.
"""
if func.__doc__ is not None:
f.__doc__ = template % {"name": func.__name__} + func.__doc__
f.__name__ = "blocked_" + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
@curry
def _broadcast_trick_inner(func, shape, meta=(), *args, **kwargs):
if shape == ():
return np.broadcast_to(func(meta, shape=(), *args, **kwargs), shape)
else:
return np.broadcast_to(func(meta, shape=1, *args, **kwargs), shape)
def broadcast_trick(func):
"""
Provide a decorator to wrap common numpy function with a broadcast trick.
Dask arrays are currently immutable; thus when we know an array is uniform,
we can replace the actual data by a single value and have all elements point
to it, thus reducing the size.
>>> x = np.broadcast_to(1, (100,100,100))
>>> x.base.nbytes
8
Those array are not only more efficient locally, but dask serialisation is
aware of the _real_ size of those array and thus can send them around
efficiently and schedule accordingly.
Note that those array are read-only and numpy will refuse to assign to them,
so should be safe.
"""
inner = _broadcast_trick_inner(func)
if func.__doc__ is not None:
inner.__doc__ = func.__doc__
inner.__name__ = func.__name__
if inner.__name__.endswith("_like_safe"):
inner.__name__ = inner.__name__[:-10]
return inner
ones = w(broadcast_trick(ones_like_safe), dtype="f8")
zeros = w(broadcast_trick(zeros_like_safe), dtype="f8")
empty = w(broadcast_trick(empty_like_safe), dtype="f8")
w_like = wrap(wrap_func_like_safe)
empty_like = w_like(np.empty, func_like=np.empty_like)
# full and full_like require special casing due to argument check on fill_value
# Generate wrapped functions only once
_full = w(broadcast_trick(full_like_safe))
_full_like = w_like(np.full, func_like=np.full_like)
# workaround for numpy doctest failure: https://github.com/numpy/numpy/pull/17472
_full.__doc__ = _full.__doc__.replace(
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
)
def full(shape, fill_value, *args, **kwargs):
# np.isscalar has somewhat strange behavior:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
if "dtype" not in kwargs:
kwargs["dtype"] = type(fill_value)
return _full(shape=shape, fill_value=fill_value, *args, **kwargs)
def full_like(a, fill_value, *args, **kwargs):
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full_like(
a=a,
fill_value=fill_value,
*args,
**kwargs,
)
full.__doc__ = _full.__doc__
full_like.__doc__ = _full_like.__doc__
| 28.077253
| 87
| 0.64491
|
2a3cdcab4a9349beeb80478d7c2c283f13bf8d4d
| 290
|
py
|
Python
|
lcd-tester.py
|
stefan-krstikj/pi-weather-display
|
0db0a3387ba4ab90135776faeafa42914cfcea9f
|
[
"MIT"
] | null | null | null |
lcd-tester.py
|
stefan-krstikj/pi-weather-display
|
0db0a3387ba4ab90135776faeafa42914cfcea9f
|
[
"MIT"
] | null | null | null |
lcd-tester.py
|
stefan-krstikj/pi-weather-display
|
0db0a3387ba4ab90135776faeafa42914cfcea9f
|
[
"MIT"
] | null | null | null |
# import necessary libraries
from lcd import lcddriver
import time
import pyowm
# initialize the display
lcd = lcddriver.lcd()
lcd.lcd_display_string("test string 1", 1)
time.sleep(2)
lcd.centered = 1
lcd.lcd_clear()
time.sleep(2)
lcd.lcd_display_string("test string 2", 1)
time.sleep(1)
| 18.125
| 42
| 0.765517
|
33c72717e7f84774d886b46612092f183e348704
| 943
|
py
|
Python
|
CyberPi/Python with CyberPi 021.py
|
nothingszpt/PythonWithHardware
|
95b580824f1bb11e36c0a27fdcbd4aec07548b59
|
[
"MIT"
] | 2
|
2020-08-15T02:49:19.000Z
|
2020-08-15T02:49:31.000Z
|
CyberPi/Python with CyberPi 021.py
|
nothingszpt/PythonWithHardware
|
95b580824f1bb11e36c0a27fdcbd4aec07548b59
|
[
"MIT"
] | null | null | null |
CyberPi/Python with CyberPi 021.py
|
nothingszpt/PythonWithHardware
|
95b580824f1bb11e36c0a27fdcbd4aec07548b59
|
[
"MIT"
] | 1
|
2022-02-24T05:30:30.000Z
|
2022-02-24T05:30:30.000Z
|
""""
名称:021 音效的终止
硬件:童芯派
功能介绍:通过条件语句和break关键字结束音效的播放。
难度:⭐⭐
支持的模式:在线 上传
使用功能解读:
1. cyberpi.audio.play_drum("bass-drum", 0.25)
童芯派.音效.架子鼓(“type”,beat)
tpye处填入音效类型,beat参数填入节拍,0.25拍约等于0.25秒
2.break
结束
一般与if语句一起使用,用于跳出当前所在的循环。
"""
# ---------程序分割线----------------程序分割线----------------程序分割线----------
import cyberpi
cyberpi.display.clear()
cyberpi.audio.set_vol(100)
while True: # 重复循环
if cyberpi.controller.is_press("a"): # 如果童芯派按键A按下:
cyberpi.console.println("程序运行结束!") # 屏幕显示程序运行结束。
break # 跳出当前所在的while循环
else: # 否则
cyberpi.audio.play_drum("bass-drum", 0.25) # 播放音效bass-drum 0.25拍
cyberpi.audio.play_drum("bass-drum", 0.25) # 播放音效bass-drum 0.25拍
cyberpi.audio.play_drum("snare", 0.25) # 播放音效snare 0.25拍
| 27.735294
| 77
| 0.518558
|
1b8a7abbc129e30921e2726d3c4e327b2ee8759c
| 4,467
|
py
|
Python
|
stats/basic_research/minimal_diff_between_sublineage.py
|
methylgrammarlab/proj_scwgbs
|
287196898796eb617fef273bfaf9e978a57047dc
|
[
"MIT"
] | null | null | null |
stats/basic_research/minimal_diff_between_sublineage.py
|
methylgrammarlab/proj_scwgbs
|
287196898796eb617fef273bfaf9e978a57047dc
|
[
"MIT"
] | null | null | null |
stats/basic_research/minimal_diff_between_sublineage.py
|
methylgrammarlab/proj_scwgbs
|
287196898796eb617fef273bfaf9e978a57047dc
|
[
"MIT"
] | null | null | null |
import argparse
import glob
import itertools
import os
import re
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
HIGH_T = 0.5
LOW_T = 0.25
MID = 0.5
sys.path.append(os.getcwd())
from format_files.format_cpg_context_map import NUMBER_OF_ORPH_PER_INDEX
from commons import consts
from format_files import handle_pmds, format_sublineage_info
ORPH_COLS = ["num_cpg_in_%s" % i for i in NUMBER_OF_ORPH_PER_INDEX]
CPG_FORMAT_FILE_RE = re.compile(".+(CRC\d+)_(chr\d+).dummy.pkl.zip")
CPG_FORMAT_FILE_FORMAT = "all_cpg_ratios_*_%s.dummy.pkl.zip"
BEDGRAPH_FILE_NAME_RE = re.compile(".*_chr_(\d+)_.*")
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument('--cpg_format_files', help='Path to folder or file of parsed scWGBS', required=True)
parser.add_argument('--sublineage_cov_folder', help='Path to folder or file of parsed scWGBS',
required=False)
parser.add_argument('--output_folder', help='Path of the output folder', required=False)
args = parser.parse_args()
return args
def format_args():
"""
Format the args for this script
:return: The path of the files and the output directory
"""
args = parse_input()
output = args.output_folder
if not output:
output = os.path.dirname(sys.argv[0])
if os.path.isdir(args.cpg_format_files):
cpg_format_file_path = os.path.join(args.cpg_format_files, CPG_FORMAT_FILE_FORMAT % '*')
all_cpg_format_file_paths = glob.glob(cpg_format_file_path)
else:
all_cpg_format_file_paths = [args.cpg_format_files]
covariance_dict = get_covariance_dict(args.sublineage_cov_folder)
return all_cpg_format_file_paths, covariance_dict, output
def get_covariance_dict(covariance_path):
covariance_path = os.path.join(covariance_path, "*.bedgraph")
d = {}
bedgraph_files = glob.glob(covariance_path)
for f in bedgraph_files:
name = f.split("and")[1].split(".")[0]
d[name] = f
return d
def minimal_diff_in_sub_lineage_meth_levels(df, patient, chromosome):
sublineage_info = format_sublineage_info.get_sublineage_info(consts.SUBLINEAGE_FILE_LOCAL_DROR)
patient_info = sublineage_info[patient]
dfs = []
for sublineage in patient_info:
if sublineage == "NC":
continue
region_cell_ids = []
for sample in patient_info[sublineage]:
region_cell_ids.extend([cell_id for cell_id in df.index if cell_id.startswith(sample)])
region_df = df.loc[region_cell_ids, :].mean(axis=0, skipna=True)
region_df[region_df >= HIGH_T] = 1
region_df[region_df <= LOW_T] = 0
dfs.append(region_df)
accuracy = 0
for couple in list(itertools.combinations(dfs, 2)):
df1 = couple[0]
df2 = couple[1]
index = np.logical_and(np.logical_or(df1 == 0, df1 == 1), np.logical_or(df2 == 0, df2 == 1))
diff = np.sum(np.abs(df1[index] - df2[index]) == 0)
accuracy = max(diff / np.sum(index == 1), accuracy)
print(accuracy)
def minimal_diff_in_sub_lineage_cov(covariance_dict, patient, chromosome):
### Unfinished ###
sublineage_info = format_sublineage_info.get_sublineage_info(consts.SUBLINEAGE_FILE_LOCAL_DROR)
patient_info = sublineage_info[patient]
dfs = {}
for sublineage in patient_info:
if sublineage == "NC":
continue
covariance_pmd_df = handle_pmds.convert_bedgraph_to_df_with_pmd_filter(covariance_dict[sublineage],
chromosome)
accuracy = 0
for couple in list(itertools.combinations(dfs, 2)):
df1 = couple[0]
df2 = couple[1]
index = np.logical_and(np.logical_or(df1 == 0, df1 == 1), np.logical_or(df2 == 0, df2 == 1))
diff = np.sum(np.abs(df1[index] - df2[index]) == 0)
accuracy = max(diff / np.sum(index == 1), accuracy)
print(accuracy)
def main():
input_files, covariance_dict, output_dir = format_args()
for file_path in tqdm(input_files):
patient, chromosome = CPG_FORMAT_FILE_RE.findall(file_path)[0]
df = pd.read_pickle(file_path)
pmd_df = handle_pmds.filtered_out_non_pmd(df, chromosome)
# minimal_diff_in_sub_lineage_meth_levels(pmd_df, patient, chromosome)
minimal_diff_in_sub_lineage_cov(covariance_dict, patient, chromosome)
if __name__ == '__main__':
main()
| 32.369565
| 108
| 0.680099
|
634f0c111d44660dddd56a07d5518f7f7a92e86b
| 10,182
|
py
|
Python
|
Xenios-Source/contrib/linearize/linearize-data.py
|
cspiliakos/xenioscoin
|
a80871d38fffc328b6dfd2d7cd919e373153a35b
|
[
"MIT"
] | 3
|
2020-12-19T11:26:09.000Z
|
2022-02-14T22:51:03.000Z
|
Xenios-Source/contrib/linearize/linearize-data.py
|
cspiliakos/xenioscoin
|
a80871d38fffc328b6dfd2d7cd919e373153a35b
|
[
"MIT"
] | 1
|
2021-02-13T23:44:16.000Z
|
2021-02-14T00:58:28.000Z
|
Xenios-Source/contrib/linearize/linearize-data.py
|
cspiliakos/xenioscoin
|
a80871d38fffc328b6dfd2d7cd919e373153a35b
|
[
"MIT"
] | 7
|
2021-02-13T09:35:43.000Z
|
2022-02-14T22:51:05.000Z
|
#!/usr/bin/env python3
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import struct
import re
import os
import os.path
import sys
import hashlib
import xenios_hash
import datetime
import time
from collections import namedtuple
from binascii import hexlify, unhexlify
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
def uint32(x):
return x & 0xffffffff
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return b''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return b''.join(out_words)
def calc_hdr_hash(blk_hdr):
#hash1 = hashlib.sha256()
#hash1.update(blk_hdr)
#hash1_o = hash1.digest()
#hash2 = hashlib.sha256()
#hash2.update(hash1_o)
#hash2_o = hash2.digest()
#return hash2_o
pow_hash = xenios_hash.getPoWHash(blk_hdr)
return pow_hash
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hexlify(hash).decode('utf-8')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
line = hex_switchEndian(line)
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
# The block map shouldn't give or receive byte-reversed hashes.
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
self.lastDate = blkDate
if self.outF:
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
self.outFname = self.settings['output_file']
else:
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + self.outFname)
self.outF = open(self.outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
self.hash_str = calc_hash_str(blk_hdr)
if not self.hash_str in blkmap:
# Because blocks can be written to files out-of-order as of 0.10, the script
# may encounter blocks it doesn't know about. Treat as debug output.
if settings['debug_output'] == 'true':
print("Skipping unknown block " + self.hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[self.hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
# Force hash byte format setting to be lowercase to make comparisons easier.
# Also place upfront in case any settings need to know about it.
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
if 'netmagic' not in settings:
settings['netmagic'] = 'bf0c6bbd'
if 'genesis' not in settings:
settings['genesis'] = '00000ffd590b1485b3caadc19b22e6379c733355108f107a430458cdf3407ab6'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000 * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
if 'debug_output' not in settings:
settings['debug_output'] = 'false'
settings['max_out_sz'] = int(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
settings['debug_output'] = settings['debug_output'].lower()
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
# Block hash map won't be byte-reversed. Neither should the genesis hash.
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| 31.233129
| 108
| 0.693184
|
3fb2498f5bb2cbbb17d6f6f559fafef97689bee8
| 4,326
|
py
|
Python
|
LeetCode-All-Solution/Python3/LC-0088-Merge-Sorted-Array.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-0088-Merge-Sorted-Array.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-0088-Merge-Sorted-Array.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0088-Merge-Sorted-Array.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-02-26
=================================================================="""
import sys
import time
from typing import List
# import functools
"""
LeetCode - 0088 - (Easy) - Merge Sorted Array
https://leetcode.com/problems/merge-sorted-array/
Description & Requirement:
You are given two integer arrays nums1 and nums2, sorted in non-decreasing order,
and two integers m and n, representing the number of elements in nums1 and nums2 respectively.
Merge nums1 and nums2 into a single array sorted in non-decreasing order.
The final sorted array should not be returned by the function,
but instead be stored inside the array nums1. To accommodate this,
nums1 has a length of m + n, where the first m elements denote the elements that should be merged,
and the last n elements are set to 0 and should be ignored. nums2 has a length of n.
Example 1:
Input: nums1 = [1,2,3,0,0,0], m = 3, nums2 = [2,5,6], n = 3
Output: [1,2,2,3,5,6]
Explanation: The arrays we are merging are [1,2,3] and [2,5,6].
The result of the merge is [1,2,2,3,5,6] with the underlined elements coming from nums1.
Example 2:
Input: nums1 = [1], m = 1, nums2 = [], n = 0
Output: [1]
Explanation: The arrays we are merging are [1] and [].
The result of the merge is [1].
Example 3:
Input: nums1 = [0], m = 0, nums2 = [1], n = 1
Output: [1]
Explanation: The arrays we are merging are [] and [1].
The result of the merge is [1].
Note that because m = 0, there are no elements in nums1.
The 0 is only there to ensure the merge result can fit in nums1.
Constraints:
nums1.length == m + n
nums2.length == n
0 <= m, n <= 200
1 <= m + n <= 200
-10^9 <= nums1[i], nums2[j] <= 10^9
"""
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
# exception case
assert isinstance(m, int) and m >= 0 and isinstance(n, int) and n >= 0 and m + n >= 1
assert isinstance(nums1, list) and len(nums1) == m + n
assert isinstance(nums2, list) and len(nums2) == n
# main method: (just merge, in-place modify nums1)
# note that: array is not like linked list, insert in the middle is not fast
# space optimize: rather than create nums1_valid, scan from the end and put bigger num to the end of nums1
return self._merge(nums1, m, nums2, n)
def _merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
nums1_valid = nums1[:m]
cursor_1, cursor_2 = 0, 0
while cursor_1 < m and cursor_2 < n: # scan and put in the smaller number at a time
if nums1_valid[cursor_1] <= nums2[cursor_2]:
nums1[cursor_1 + cursor_2] = nums1_valid[cursor_1]
cursor_1 += 1
else:
nums1[cursor_1 + cursor_2] = nums2[cursor_2]
cursor_2 += 1
while cursor_1 < m: # the rest part in nums1_valid
nums1[cursor_1 + cursor_2] = nums1_valid[cursor_1]
cursor_1 += 1
while cursor_2 < n: # the rest part in nums2
nums1[cursor_1 + cursor_2] = nums2[cursor_2]
cursor_2 += 1
def main():
# Example 1: Output: [1,2,2,3,5,6]
nums1 = [1, 2, 3, 0, 0, 0]
m = 3
nums2 = [2, 5, 6]
n = 3
# Example 2: Output: [1]
# nums1 = [1]
# m = 1
# nums2 = []
# n = 0
# Example 3: Output: [1]
# nums1 = [0]
# m = 0
# nums2 = [1]
# n = 1
# init instance
solution = Solution()
# run & time
start = time.process_time()
# ans = solution.merge(nums1, m, nums2, n)
solution.merge(nums1, m, nums2, n)
end = time.process_time()
# show answer
print('\nAnswer:')
# print(ans)
print(nums1)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| 33.534884
| 118
| 0.579519
|
b9719d3980459b485d7e943cc669f1e86b6c962f
| 3,268
|
py
|
Python
|
python/GafferSceneUI/SeedsUI.py
|
sebaDesmet/gaffer
|
47b2d093c40452bd77947e3b5bd0722a366c8d59
|
[
"BSD-3-Clause"
] | 1
|
2019-08-02T16:49:59.000Z
|
2019-08-02T16:49:59.000Z
|
python/GafferSceneUI/SeedsUI.py
|
rkoschmitzky/gaffer
|
ec6262ae1292767bdeb9520d1447d65a4a511884
|
[
"BSD-3-Clause"
] | 2
|
2017-08-23T21:35:45.000Z
|
2018-01-29T08:59:33.000Z
|
python/GafferSceneUI/SeedsUI.py
|
rkoschmitzky/gaffer
|
ec6262ae1292767bdeb9520d1447d65a4a511884
|
[
"BSD-3-Clause"
] | 1
|
2020-12-21T12:33:49.000Z
|
2020-12-21T12:33:49.000Z
|
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferScene
Gaffer.Metadata.registerNode(
GafferScene.Seeds,
"description",
"""
Scatters points evenly over the surface of meshes.
This can be particularly useful in conjunction with
the Instancer, which can then apply instances to
each point.
""",
plugs = {
"parent" : [
"description",
"""
The location of the mesh to scatter the
points over. The generated points will
be parented under this location.
""",
],
"name" : [
"description",
"""
The name given to the object generated -
this will be placed under the parent in
the scene hierarchy.
""",
],
"density" : [
"description",
"""
The number of points per unit area of the mesh,
measured in object space.
""",
],
"densityPrimitiveVariable" : [
"description",
"""
A float primitive variable used to specify a varying
point density across the surface of the mesh. Multiplied
with the density setting above.
""",
],
"pointType" : [
"description",
"""
The render type of the points. This defaults to
"gl:point" so that the points are rendered in a
lightweight manner in the viewport.
""",
"preset:GL Point", "gl:point",
"preset:Particle", "particle",
"preset:Sphere", "sphere",
"preset:Disk", "disk",
"preset:Patch", "patch",
"preset:Blobby", "blobby",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
]
}
)
| 27.008264
| 77
| 0.666157
|
5e9d100ffdb20907c47774e37825ac06a237621f
| 7,835
|
py
|
Python
|
lib/nms/nms.py
|
eyecan-dev/HRNet-for-Fashion-Landmark-Estimation.PyTorch
|
63dcb75836ebb8896307c9e5a62be8a475de8323
|
[
"MIT"
] | 76
|
2020-04-29T09:20:46.000Z
|
2022-02-23T11:54:45.000Z
|
lib/nms/nms.py
|
eyecan-dev/HRNet-for-Fashion-Landmark-Estimation.PyTorch
|
63dcb75836ebb8896307c9e5a62be8a475de8323
|
[
"MIT"
] | 10
|
2020-07-07T02:30:57.000Z
|
2021-12-28T06:57:01.000Z
|
lib/nms/nms.py
|
eyecan-dev/HRNet-for-Fashion-Landmark-Estimation.PyTorch
|
63dcb75836ebb8896307c9e5a62be8a475de8323
|
[
"MIT"
] | 19
|
2020-04-29T09:20:47.000Z
|
2022-02-26T22:31:49.000Z
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from .cpu_nms import cpu_nms
from .gpu_nms import gpu_nms
def py_nms_wrapper(thresh):
def _nms(dets):
return nms(dets, thresh)
return _nms
def cpu_nms_wrapper(thresh):
def _nms(dets):
return cpu_nms(dets, thresh)
return _nms
def gpu_nms_wrapper(thresh, device_id):
def _nms(dets):
return gpu_nms(dets, thresh, device_id)
return _nms
def nms(dets, thresh):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
:param dets: [[x1, y1, x2, y2 score]]
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
if dets.shape[0] == 0:
return []
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def oks_iou(g, d, a_g, a_d, sigmas=None, in_vis_thre=None):
if not isinstance(sigmas, np.ndarray):
# # for COCO
# sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0
# # for deepfashion2
sigmas = np.array([0.012 , 0.0158, 0.0169, 0.0165, 0.0169, 0.0158, 0.0298, 0.0329,
0.0321, 0.0323, 0.034 , 0.0388, 0.0452, 0.0574, 0.0492, 0.0352,
0.0492, 0.0574, 0.0452, 0.0388, 0.034 , 0.0323, 0.0321, 0.0329,
0.0298, 0.0194, 0.017 , 0.0185, 0.0193, 0.0185, 0.017 , 0.0286,
0.0471, 0.0547, 0.0526, 0.043 , 0.0392, 0.0513, 0.0566, 0.0509,
0.0564, 0.0542, 0.0604, 0.0599, 0.052 , 0.0599, 0.0604, 0.0542,
0.0564, 0.0509, 0.0566, 0.0513, 0.0392, 0.043 , 0.0526, 0.0547,
0.0471, 0.0286, 0.0074, 0.0085, 0.0165, 0.0248, 0.0165, 0.0085,
0.0156, 0.0231, 0.0296, 0.0137, 0.0195, 0.025 , 0.0347, 0.038 ,
0.0257, 0.037 , 0.0257, 0.038 , 0.0347, 0.025 , 0.0195, 0.0137,
0.0296, 0.0231, 0.0156, 0.0248, 0.0469, 0.0632, 0.037 , 0.0469,
0.0632, 0.0137, 0.0153, 0.0243, 0.0377, 0.0243, 0.0153, 0.0203,
0.0366, 0.0467, 0.0433, 0.0393, 0.0329, 0.0418, 0.0477, 0.0399,
0.0331, 0.042 , 0.0492, 0.0436, 0.0478, 0.0436, 0.0492, 0.042 ,
0.0331, 0.0399, 0.0477, 0.0418, 0.0329, 0.0393, 0.0433, 0.0467,
0.0366, 0.0203, 0.0377, 0.0645, 0.0573, 0.0478, 0.0645, 0.0573,
0.0352, 0.0158, 0.021 , 0.0214, 0.021 , 0.0158, 0.0196, 0.05 ,
0.0489, 0.0404, 0.0401, 0.0404, 0.0489, 0.05 , 0.0196, 0.0276,
0.0548, 0.0283, 0.0204, 0.0283, 0.0548, 0.0369, 0.0726, 0.0677,
0.064 , 0.0251, 0.064 , 0.0677, 0.0726, 0.0369, 0.0308, 0.0216,
0.0308, 0.0506, 0.0494, 0.0463, 0.0477, 0.0463, 0.0494, 0.0506,
0.0275, 0.0202, 0.0275, 0.0651, 0.0451, 0.035 , 0.028 , 0.0392,
0.0362, 0.0392, 0.028 , 0.035 , 0.0451, 0.0651, 0.0253, 0.0195,
0.0253, 0.0513, 0.0543, 0.0415, 0.0543, 0.0513, 0.0153, 0.023 ,
0.0167, 0.0145, 0.0167, 0.023 , 0.0332, 0.0391, 0.0391, 0.0396,
0.044 , 0.0452, 0.0498, 0.0514, 0.0585, 0.0655, 0.0635, 0.0602,
0.0635, 0.0655, 0.0585, 0.0514, 0.0498, 0.0452, 0.044 , 0.0396,
0.0391, 0.0391, 0.0332, 0.0121, 0.0134, 0.0158, 0.0162, 0.0158,
0.0134, 0.0246, 0.0406, 0.047 , 0.0404, 0.0463, 0.0466, 0.0435,
0.0499, 0.0455, 0.044 , 0.0411, 0.049 , 0.0576, 0.0685, 0.0618,
0.0483, 0.0618, 0.0685, 0.0576, 0.049 , 0.0411, 0.044 , 0.0486,
0.0499, 0.0435, 0.0466, 0.0463, 0.0404, 0.047 , 0.0406, 0.0246,
0.0116, 0.0167, 0.016 , 0.018 , 0.016 , 0.0167, 0.0196, 0.0385,
0.0421, 0.0497, 0.0562, 0.0528, 0.0428, 0.0528, 0.0562, 0.0497,
0.0421, 0.0385, 0.0196, 0.0244, 0.0297, 0.0244, 0.0208, 0.0244,
0.0297, 0.0173, 0.0616, 0.0659, 0.0712, 0.0707, 0.0685, 0.0339,
0.0685, 0.0707, 0.0712, 0.0659, 0.0616, 0.0173])
vars = (sigmas * 2) ** 2
xg = g[0::3]
yg = g[1::3]
vg = g[2::3]
ious = np.zeros((d.shape[0]))
for n_d in range(0, d.shape[0]):
xd = d[n_d, 0::3]
yd = d[n_d, 1::3]
vd = d[n_d, 2::3]
dx = xd - xg
dy = yd - yg
e = (dx ** 2 + dy ** 2) / vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2
if in_vis_thre is not None:
ind = list(vg > in_vis_thre) and list(vd > in_vis_thre)
e = e[ind]
ious[n_d] = np.sum(np.exp(-e)) / e.shape[0] if e.shape[0] != 0 else 0.0
return ious
def oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh, overlap = oks
:param kpts_db
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
if len(kpts_db) == 0:
return []
scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))])
kpts = np.array([kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))])
areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))])
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], sigmas, in_vis_thre)
inds = np.where(oks_ovr <= thresh)[0]
order = order[inds + 1]
return keep
def rescore(overlap, scores, thresh, type='gaussian'):
assert overlap.shape[0] == scores.shape[0]
if type == 'linear':
inds = np.where(overlap >= thresh)[0]
scores[inds] = scores[inds] * (1 - overlap[inds])
else:
scores = scores * np.exp(- overlap**2 / thresh)
return scores
def soft_oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh, overlap = oks
:param kpts_db
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
if len(kpts_db) == 0:
return []
scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))])
kpts = np.array([kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))])
areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))])
order = scores.argsort()[::-1]
scores = scores[order]
# max_dets = order.size
max_dets = 20
keep = np.zeros(max_dets, dtype=np.intp)
keep_cnt = 0
while order.size > 0 and keep_cnt < max_dets:
i = order[0]
oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]], sigmas, in_vis_thre)
order = order[1:]
scores = rescore(oks_ovr, scores[1:], thresh)
tmp = scores.argsort()[::-1]
order = order[tmp]
scores = scores[tmp]
keep[keep_cnt] = i
keep_cnt += 1
keep = keep[:keep_cnt]
return keep
# kpts_db = kpts_db[:keep_cnt]
# return kpts_db
| 35.292793
| 123
| 0.553542
|
e30cd8048d434f79b20b31fc753fe599a98a4b40
| 14,070
|
py
|
Python
|
kivy/tests/test_lang_pre_process_and_post_process.py
|
Galland/kivy
|
95a6bf279883d706f645e4629c16d5ee1038f0ec
|
[
"MIT"
] | 13,889
|
2015-01-01T06:43:41.000Z
|
2022-03-31T17:37:56.000Z
|
kivy/tests/test_lang_pre_process_and_post_process.py
|
Galland/kivy
|
95a6bf279883d706f645e4629c16d5ee1038f0ec
|
[
"MIT"
] | 4,570
|
2015-01-01T17:58:52.000Z
|
2022-03-31T18:42:16.000Z
|
kivy/tests/test_lang_pre_process_and_post_process.py
|
Galland/kivy
|
95a6bf279883d706f645e4629c16d5ee1038f0ec
|
[
"MIT"
] | 3,786
|
2015-01-01T09:20:45.000Z
|
2022-03-30T21:15:05.000Z
|
import unittest
import textwrap
from collections import defaultdict
class TrackCallbacks(object):
kv_pre_events = []
'''Stores values added during the pre event dispatched callbacks.
'''
kv_applied_events = []
'''Stores values added during the applied event dispatched callbacks.
'''
kv_post_events = []
'''Stores values added during the post event dispatched callbacks.
'''
events_in_pre = []
'''List of expected events that should be in kv_pre_events after all the
callbacks has been executed.
'''
events_in_applied = []
'''List of expected events that should be in kv_applied_events after all
the callbacks has been executed.
'''
events_in_post = []
'''List of expected events that should be in kv_post_events after all the
callbacks has been executed.
'''
instantiated_widgets = []
'''Whenever a widget of this class is instantiated, it is added to this
list, which is class specific.
It lets us iterate through all the instance of this class and assert for
all of them as needed.
'''
root_widget = None
'''The expected root widget in the kv rule as dispatched in on_kv_applied.
'''
base_widget = None
'''The expected base widget as dispatched in on_kv_post.
'''
actual_root_widget = None
'''The actual root widget in the kv rule as dispatched in on_kv_applied.
'''
actual_base_widget = None
'''The actual base widget as dispatched in on_kv_post.
'''
name = 'none'
'''Optional name given to the widget to help it identify during a test
failure.
'''
my_roots_expected_ids = {}
'''Dictionary containing the expected ids as stored in the root
widget's `ids`. The root being this widget's root widget from kv.
'''
actual_ids = {}
'''Dictionary containing the actual ids as stored in the root
widget's `ids`. The root being this widget's root widget from kv.
The ids is saved here during the `on_kv_post` callback.
'''
expected_prop_values = {}
'''A dict of property names and the values they are expected to have
during the on_kv_post dispatch.
'''
actual_prop_values = {}
'''A dict of property names and the values they actually had
during the on_kv_post dispatch.
'''
def __init__(self, name='none', **kwargs):
self.kv_pre_events = self.kv_pre_events[:]
self.kv_applied_events = self.kv_applied_events[:]
self.kv_post_events = self.kv_post_events[:]
self.events_in_pre = self.events_in_pre[:]
self.events_in_applied = self.events_in_applied[:]
self.events_in_post = self.events_in_post[:]
self.name = name
super(TrackCallbacks, self).__init__(**kwargs)
self.instantiated_widgets.append(self)
def add(self, name, event):
'''Add name to the list of the names added in the callbacks for this
event.
'''
events = getattr(self, 'kv_{}_events'.format(event))
events.append(name)
@classmethod
def check(cls, testcase):
'''Checks that all the widgets of this class pass all the assertions.
'''
for widget in cls.instantiated_widgets:
# check that all the events match
for event in ('pre', 'applied', 'post'):
cls.check_event(widget, event, testcase)
# check that the ids are properly saved during on_kv_post dispatch
expected = {
k: v.__self__ for k, v in widget.my_roots_expected_ids.items()}
actual = {k: v.__self__ for k, v in widget.actual_ids.items()}
testcase.assertEqual(expected, actual)
# check that the root widget is as expected
testcase.assertIs(
widget.root_widget and widget.root_widget.__self__,
widget.actual_root_widget and
widget.actual_root_widget.__self__,
'expected "{}", got "{}" instead for root_widget'.format(
widget.root_widget and widget.root_widget.name,
widget.actual_root_widget and
widget.actual_root_widget.name))
# check that the base widget is as expected
testcase.assertIs(
widget.base_widget and widget.base_widget.__self__,
widget.actual_base_widget and
widget.actual_base_widget.__self__,
'expected "{}", got "{}" instead for base_widget'.format(
widget.base_widget and widget.base_widget.name,
widget.actual_base_widget and
widget.actual_base_widget.name))
# check that the properties have expected values
testcase.assertEqual(
widget.expected_prop_values, widget.actual_prop_values)
@staticmethod
def check_event(widget, event_name, testcase):
'''Check that the names are added as expected for this event.
'''
events = getattr(widget, 'kv_{}_events'.format(event_name))
should_be_in = getattr(widget, 'events_in_{}'.format(event_name))
counter = defaultdict(int)
for name in events:
counter[name] += 1
for name, value in counter.items():
testcase.assertEqual(
value, 1,
'"{}" was present "{}" times for event "{}" for widget "{} '
'({})"'.format(name, value, event_name, widget.name, widget))
testcase.assertEqual(
set(should_be_in), set(events),
'Expected and actual event callbacks do not match for event "{}" '
'for widget "{} ({})"'.format(
event_name, widget.name, widget))
@staticmethod
def get_base_class():
'''The base class to use for widgets during testing so we can use
this class variables to ease testing.
'''
from kivy.uix.widget import Widget
class TestEventsBase(TrackCallbacks, Widget):
__events__ = ('on_kv_pre', 'on_kv_applied')
instantiated_widgets = []
events_in_pre = [1, ]
events_in_applied = [1, ]
events_in_post = [1, ]
def on_kv_pre(self):
self.add(1, 'pre')
def on_kv_applied(self, root_widget):
self.add(1, 'applied')
self.actual_root_widget = root_widget
def on_kv_post(self, base_widget):
self.add(1, 'post')
self.actual_base_widget = base_widget
self.actual_prop_values = {
k: getattr(self, k) for k in self.expected_prop_values}
if self.actual_root_widget is not None:
# make a copy of the ids at the current moment
self.actual_ids = dict(self.actual_root_widget.ids)
def apply_class_lang_rules(self, root=None, **kwargs):
self.dispatch('on_kv_pre')
super(TestEventsBase, self).apply_class_lang_rules(
root=root, **kwargs)
self.dispatch('on_kv_applied', root)
return TestEventsBase
def __repr__(self):
module = type(self).__module__
try:
qualname = type(self).__qualname__
except AttributeError: # python 2
qualname = ''
return '<Name: "{}" {}.{} object at {}>'.format(
self.name, module, qualname, hex(id(self)))
class TestKvEvents(unittest.TestCase):
def test_pure_python_auto_binding(self):
class TestEventsPureAuto(TrackCallbacks.get_base_class()):
instantiated_widgets = []
widget = TestEventsPureAuto()
widget.root_widget = None
widget.base_widget = widget
TestEventsPureAuto.check(self)
def test_pure_python_callbacks(self):
class TestEventsPure(TrackCallbacks.get_base_class()):
instantiated_widgets = []
events_in_pre = [1, 2]
events_in_applied = [1, 2]
events_in_post = [1, 2]
def __init__(self, **kwargs):
self.fbind('on_kv_pre', lambda _: self.add(2, 'pre'))
self.fbind(
'on_kv_applied', lambda _, x: self.add(2, 'applied'))
self.fbind('on_kv_post', lambda _, x: self.add(2, 'post'))
super(TestEventsPure, self).__init__(**kwargs)
widget = TestEventsPure()
widget.root_widget = None
widget.base_widget = widget
widget.fbind('on_kv_pre', lambda _: widget.add(3, 'pre'))
widget.fbind('on_kv_applied', lambda _, x: widget.add(3, 'applied'))
widget.fbind('on_kv_post', lambda _, x: widget.add(3, 'post'))
TestEventsPure.check(self)
def test_instantiate_from_kv(self):
from kivy.lang import Builder
class TestEventsFromKV(TrackCallbacks.get_base_class()):
instantiated_widgets = []
widget = Builder.load_string('TestEventsFromKV')
self.assertIsInstance(widget, TestEventsFromKV)
widget.root_widget = widget
widget.base_widget = widget
widget.check(self)
def test_instantiate_from_kv_with_event(self):
from kivy.lang import Builder
class TestEventsFromKVEvent(TrackCallbacks.get_base_class()):
instantiated_widgets = []
widget = Builder.load_string(textwrap.dedent("""
TestEventsFromKVEvent:
events_in_post: [1, 2]
on_kv_pre: self.add(2, 'pre')
on_kv_applied: self.add(2, 'applied')
on_kv_post: self.add(2, 'post')
root_widget: self
base_widget: self
"""))
self.assertIsInstance(widget, TestEventsFromKVEvent)
widget.check(self)
def test_instantiate_from_kv_with_child(self):
from kivy.lang import Builder
class TestEventsFromKVChild(TrackCallbacks.get_base_class()):
instantiated_widgets = []
widget = Builder.load_string(textwrap.dedent("""
TestEventsFromKVChild:
events_in_post: [1, 2]
on_kv_pre: self.add(2, 'pre')
on_kv_applied: self.add(2, 'applied')
on_kv_post: self.add(2, 'post')
root_widget: self
base_widget: self
name: 'root'
my_roots_expected_ids: {'child_widget': child_widget}
TestEventsFromKVChild:
events_in_post: [1, 2]
on_kv_pre: self.add(2, 'pre')
on_kv_applied: self.add(2, 'applied')
on_kv_post: self.add(2, 'post')
root_widget: root
base_widget: root
name: 'child'
id: child_widget
my_roots_expected_ids: {'child_widget': self}
"""))
self.assertIsInstance(widget, TestEventsFromKVChild)
widget.check(self)
def test_instantiate_from_kv_with_child_inherit(self):
from kivy.lang import Builder
class TestEventsFromKVChildInherit(TrackCallbacks.get_base_class()):
instantiated_widgets = []
widget = Builder.load_string(textwrap.dedent("""
<TestEventsFromKVChildInherit2@TestEventsFromKVChildInherit>:
on_kv_pre: self.add(3, 'pre')
on_kv_applied: self.add(3, 'applied')
on_kv_post: self.add(3, 'post')
<TestEventsFromKVChildInherit3@TestEventsFromKVChildInherit2>:
on_kv_pre: self.add(4, 'pre')
on_kv_applied: self.add(4, 'applied')
on_kv_post: self.add(4, 'post')
some_value: 'fruit'
TestEventsFromKVChildInherit2:
events_in_applied: [1, 2, 3]
events_in_post: [1, 2, 3, 4]
on_kv_pre: self.add(4, 'pre')
on_kv_applied: self.add(4, 'applied')
on_kv_post: self.add(4, 'post')
root_widget: root
base_widget: self.parent.parent
name: 'third child'
id: third_child
my_roots_expected_ids: {'third_child': self}
<TestEventsFromKVChildInherit>:
on_kv_pre: self.add(2, 'pre')
on_kv_applied: self.add(2, 'applied')
on_kv_post: self.add(2, 'post')
another_value: 'apple'
TestEventsFromKVChildInherit:
events_in_applied: [1, 2]
events_in_post: [1, 2, 3]
on_kv_pre: self.add(3, 'pre')
on_kv_applied: self.add(3, 'applied')
on_kv_post: self.add(3, 'post')
root_widget: self
base_widget: self
name: 'root'
my_roots_expected_ids: \
{'second_child': second_child, 'first_child': first_child}
TestEventsFromKVChildInherit:
events_in_applied: [1, 2]
events_in_post: [1, 2, 3]
on_kv_pre: self.add(3, 'pre')
on_kv_applied: self.add(3, 'applied')
on_kv_post: self.add(3, 'post')
root_widget: root
base_widget: root
name: 'first child'
id: first_child
my_roots_expected_ids: \
{'second_child': second_child, 'first_child': self}
TestEventsFromKVChildInherit3:
events_in_applied: [1, 2, 3, 4]
events_in_post: [1, 2, 3, 4, 5]
on_kv_pre: self.add(5, 'pre')
on_kv_applied: self.add(5, 'applied')
on_kv_post: self.add(5, 'post')
root_widget: root
base_widget: root
name: 'second child'
some_value: first_child.another_value
expected_prop_values: {'some_value': 'apple'}
id: second_child
my_roots_expected_ids: \
{'second_child': self, 'first_child': first_child}
"""))
widget.check(self)
| 35.620253
| 79
| 0.589055
|
27c92a2f9b87bd9b59602255de43786f4c456481
| 3,589
|
py
|
Python
|
test/ontic_meta_test.py
|
neoinsanity/ontic
|
2b313fb9fc45faf550791a797624c9997386c343
|
[
"Apache-2.0"
] | 2
|
2017-11-06T12:01:20.000Z
|
2021-03-01T23:52:41.000Z
|
test/ontic_meta_test.py
|
neoinsanity/ontic
|
2b313fb9fc45faf550791a797624c9997386c343
|
[
"Apache-2.0"
] | 1
|
2016-12-02T04:04:03.000Z
|
2016-12-02T04:04:03.000Z
|
test/ontic_meta_test.py
|
neoinsanity/ontic
|
2b313fb9fc45faf550791a797624c9997386c343
|
[
"Apache-2.0"
] | 2
|
2015-06-26T22:24:57.000Z
|
2016-12-01T02:15:36.000Z
|
"""OnticMeta unit tests."""
from copy import copy, deepcopy
from test import utils
from ontic.meta import Meta
class SubType(Meta):
"""Sub-class of Meta for testing purposes."""
pass
class OnticMetaTest(utils.BaseTestCase):
"""Meta test cases."""
def test_ontic_meta_instantiation(self):
"""Meta instantiation testing to confirm dict behaviour."""
ontic_meta1 = Meta()
self.assertIsNotNone(ontic_meta1)
# Test dictionary initialization.
ontic_meta2 = Meta({'prop1': 'val1', 'prop2': 'val2'})
self.assertIsNotNone(ontic_meta2)
self.assertEqual('val1', ontic_meta2['prop1'])
self.assertEqual('val2', ontic_meta2.prop2)
# Test initialization by property.
ontic_meta3 = Meta(prop1='val1', prop2='val2')
self.assertIsNotNone(ontic_meta3)
self.assertEqual('val1', ontic_meta3.prop1)
self.assertEqual('val2', ontic_meta3['prop2'])
# Test initialization by list.
ontic_meta4 = Meta([['prop1', 'val1'], ['prop2', 'val2']])
self.assertIsNotNone(ontic_meta4)
self.assertEqual('val1', ontic_meta4['prop1'])
self.assertEqual('val2', ontic_meta4.prop2)
def test_dynamic_access(self):
"""Ensure Meta property access as dict and attribute."""
ontic_meta = Meta()
self.assert_dynamic_accessing(ontic_meta)
# Create the test data.
def test_copy(self):
"""Ensure that Meta supports copy operations."""
# Create the test data.
sub_object = SubType(
int_prop=1,
str_prop='dog',
list_prop=[2, 'cat'],
dict_prop={
'int_key': 3,
'str_key': 'mouse',
'list_key': [4, 'fish'],
'dict_key': {
'key1': 'red',
'key2': 'blue',
'key3': 'green'
}
}
)
# Execute the test.
sub_copy = copy(sub_object)
# Validate the test results.
self.assertIsInstance(sub_copy, SubType)
self.assertIsNot(sub_object, sub_copy)
self.assertDictEqual(sub_object, sub_copy)
self.assertIs(sub_copy.int_prop, sub_object.int_prop)
self.assertIs(sub_copy.str_prop, sub_object.str_prop)
self.assertIs(sub_copy.list_prop, sub_object.list_prop)
self.assertIs(sub_copy.dict_prop, sub_object.dict_prop)
def test_deepcopy(self):
"""Ensure that Meta supports deepcopy operation."""
sub_object = SubType(
int_prop=1,
str_prop='dog',
list_prop=[2, 'cat'],
dict_prop={
'int_key': 3,
'str_key': 'mouse',
'list_key': [4, 'fish'],
'dict_key': {'key1': 'red',
'key2': 'blue',
'key3': 'green'}
}
)
# Execute the test.
sub_copy = deepcopy(sub_object)
# Validate the test results.
self.assertIsInstance(sub_copy, SubType)
self.assertIsNot(sub_object, sub_copy)
self.assertDictEqual(sub_object, sub_copy)
self.assertIs(sub_copy.int_prop, sub_object.int_prop)
self.assertIs(sub_copy.str_prop, sub_object.str_prop)
self.assertIsNot(sub_copy.list_prop, sub_object.list_prop)
self.assertIsNot(sub_copy.dict_prop, sub_object.dict_prop)
self.assertIsNot(sub_copy.dict_prop['list_key'],
sub_object.dict_prop['list_key'])
| 33.231481
| 67
| 0.580663
|
d72aa525f46a9af737f207a52a7c17772d907ae5
| 2,822
|
py
|
Python
|
No_0040_Combination Sum II/combination_sum_ii_by_dfs_and_pruning.py
|
coderMaruf/leetcode-1
|
20ffe26e43999e44c8acf9800acb371a49bb5853
|
[
"MIT"
] | 32
|
2020-01-05T13:37:16.000Z
|
2022-03-26T07:27:09.000Z
|
No_0040_Combination Sum II/combination_sum_ii_by_dfs_and_pruning.py
|
coderMaruf/leetcode-1
|
20ffe26e43999e44c8acf9800acb371a49bb5853
|
[
"MIT"
] | null | null | null |
No_0040_Combination Sum II/combination_sum_ii_by_dfs_and_pruning.py
|
coderMaruf/leetcode-1
|
20ffe26e43999e44c8acf9800acb371a49bb5853
|
[
"MIT"
] | 8
|
2020-06-18T16:17:27.000Z
|
2022-03-15T23:58:18.000Z
|
'''
Description:
Given a collection of candidate numbers (candidates) and a target number (target), find all unique combinations in candidates where the candidate numbers sums to target.
Each number in candidates may only be used once in the combination.
Note:
All numbers (including target) will be positive integers.
The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8,
A solution set is:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5,
A solution set is:
[
[1,2,2],
[5]
]
'''
from typing import List
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
# keep candidates in ascending order
candidates.sort()
# record of valid combination
result = []
def combination_find( target, start, end, path):
if target == 0:
# combination sum meets target
# update current valid combination
result.append( path )
return
for i in range(start, end+1):
if i > start and candidates[i] == candidates[i-1]:
# avoid repetition
continue
current = candidates[i]
if current > target:
# pruning:
# current minimal element is larger than target
# impossible to find valid combination
break
# update target in next round as remaining
remaining = target - current
# DFS search, update start index as i+1 and move forward
combination_find(remaining, i+1, end, path + [ current ] )
# DFS search
combination_find( target, 0, len(candidates)-1, [] )
return result
# n : the length of candidates
## Time Complexity: O( n * 2^n )
#
# The overhead in time is the cost of element seleciton, which is of O( n * 2^n )
## Space Complexity: O( 2^n )
#
# The overhead in space is the storage for result, which is of O( 2^n )
import unittest
class Testing( unittest.TestCase ):
def test_case_1( self ):
result = Solution().combinationSum2( candidates = [10,1,2,7,6,1,5], target = 8)
self.assertCountEqual(result, [[1, 7],[1, 2, 5],[2, 6],[1, 1, 6]] )
def test_case_2( self ):
result = Solution().combinationSum2( candidates = [2,5,2,1,2], target = 5 )
self.assertCountEqual(result, [[1,2,2],[5]] )
if __name__ == '__main__':
unittest.main()
| 24.754386
| 169
| 0.54713
|
6745526ccb572076077df7c7cbd55b62f7514c3d
| 3,244
|
py
|
Python
|
symmetricDistance/resources/manuscript/pyscripts/grid1_gt.py
|
CIDARLAB/stl_metrics
|
1ec645b3b688356a8286d037b8c7447db043a3b6
|
[
"BSD-3-Clause"
] | null | null | null |
symmetricDistance/resources/manuscript/pyscripts/grid1_gt.py
|
CIDARLAB/stl_metrics
|
1ec645b3b688356a8286d037b8c7447db043a3b6
|
[
"BSD-3-Clause"
] | null | null | null |
symmetricDistance/resources/manuscript/pyscripts/grid1_gt.py
|
CIDARLAB/stl_metrics
|
1ec645b3b688356a8286d037b8c7447db043a3b6
|
[
"BSD-3-Clause"
] | null | null | null |
from ana_STL import STL_computation
from ana_STL import directed_distance
F=STL_computation(2,105)
f_0 = F.add_predicate(1,"<",0.1)
f_1 = F.add_predicate(2,"<",0.6)
f_2 = F.Conj([f_0, f_1])
f_3 = F.add_predicate(2,">=",0.4)
f_4 = F.Conj([f_2, f_3])
f_5 = F.G(range(0, 10+1), f_4)
f_6 = F.add_predicate(1,">=",0.7)
f_7 = F.add_predicate(2,">=",0.8)
f_8 = F.add_predicate(2,"<",0.2)
f_9 = F.Disj([f_7, f_8])
f_10 = F.Conj([f_6, f_9])
f_11 = F.G(range(70, 100+1), f_10)
f_12 = F.Conj([f_5, f_11])
f_13 = F.add_predicate(1,"<=",0.65)
f_14 = F.G(range(0, 15+1), f_13)
f_15 = F.add_predicate(1,"<=",0.6)
f_16 = F.G(range(15, 30+1), f_15)
f_17 = F.Conj([f_14, f_16])
f_18 = F.add_predicate(1,"<=",0.75)
f_19 = F.G(range(30, 60+1), f_18)
f_20 = F.add_predicate(1,"<=",1.05)
f_21 = F.G(range(60, 75+1), f_20)
f_22 = F.Conj([f_19, f_21])
f_23 = F.Conj([f_17, f_22])
f_24 = F.add_predicate(1,"<=",1.05)
f_25 = F.G(range(75, 105+1), f_24)
f_26 = F.add_predicate(1,">=",0.0)
f_27 = F.G(range(0, 30+1), f_26)
f_28 = F.Conj([f_25, f_27])
f_29 = F.add_predicate(1,">=",0.3)
f_30 = F.G(range(30, 45+1), f_29)
f_31 = F.add_predicate(1,">=",0.45)
f_32 = F.G(range(45, 60+1), f_31)
f_33 = F.Conj([f_30, f_32])
f_34 = F.Conj([f_28, f_33])
f_35 = F.Conj([f_23, f_34])
f_36 = F.add_predicate(1,">=",0.6)
f_37 = F.G(range(60, 75+1), f_36)
f_38 = F.add_predicate(1,">=",0.5)
f_39 = F.G(range(75, 90+1), f_38)
f_40 = F.Conj([f_37, f_39])
f_41 = F.add_predicate(1,">=",0.6)
f_42 = F.G(range(90, 105+1), f_41)
f_43 = F.Conj([f_40, f_42])
f_44 = F.Conj([f_35, f_43])
f_45 = F.add_predicate(2,"<=",0.75)
f_46 = F.G(range(0, 15+1), f_45)
f_47 = F.add_predicate(2,"<=",0.75)
f_48 = F.G(range(15, 45+1), f_47)
f_49 = F.Conj([f_46, f_48])
f_50 = F.add_predicate(2,"<=",0.6)
f_51 = F.G(range(45, 60+1), f_50)
f_52 = F.add_predicate(2,"<=",0.35)
f_53 = F.G(range(60, 75+1), f_52)
f_54 = F.Conj([f_51, f_53])
f_55 = F.Conj([f_49, f_54])
f_56 = F.add_predicate(2,"<=",0.3)
f_57 = F.G(range(75, 105+1), f_56)
f_58 = F.add_predicate(2,">=",0.0)
f_59 = F.G(range(0, 15+1), f_58)
f_60 = F.Conj([f_57, f_59])
f_61 = F.add_predicate(2,">=",0.3)
f_62 = F.G(range(15, 30+1), f_61)
f_63 = F.add_predicate(2,">=",0.0)
f_64 = F.G(range(30, 105+1), f_63)
f_65 = F.Conj([f_62, f_64])
f_66 = F.Conj([f_60, f_65])
f_67 = F.Conj([f_55, f_66])
f_68 = F.add_predicate(2,"<=",0.75)
f_69 = F.G(range(0, 30+1), f_68)
f_70 = F.add_predicate(2,"<=",1.05)
f_71 = F.G(range(30, 45+1), f_70)
f_72 = F.Conj([f_69, f_71])
f_73 = F.add_predicate(2,"<=",1.05)
f_74 = F.G(range(45, 105+1), f_73)
f_75 = F.add_predicate(2,">=",0.0)
f_76 = F.G(range(0, 15+1), f_75)
f_77 = F.Conj([f_74, f_76])
f_78 = F.Conj([f_72, f_77])
f_79 = F.add_predicate(2,">=",0.3)
f_80 = F.G(range(15, 45+1), f_79)
f_81 = F.add_predicate(2,">=",0.45)
f_82 = F.G(range(45, 60+1), f_81)
f_83 = F.Conj([f_80, f_82])
f_84 = F.add_predicate(2,">=",0.6)
f_85 = F.G(range(60, 75+1), f_84)
f_86 = F.add_predicate(2,">=",0.75)
f_87 = F.G(range(75, 90+1), f_86)
f_88 = F.Conj([f_85, f_87])
f_89 = F.Conj([f_83, f_88])
f_90 = F.Conj([f_78, f_89])
f_91 = F.add_predicate(2,">=",0.6)
f_92 = F.G(range(90, 105+1), f_91)
f_93 = F.Conj([f_90, f_92])
f_94 = F.Disj([f_67, f_93])
f_95 = F.Conj([f_44, f_94])
r=directed_distance(F, f_95, f_12)
print(r)
| 30.317757
| 37
| 0.607275
|
9900002a9449fc8bdf4d2159f28f6aa7ce3210af
| 2,639
|
py
|
Python
|
bspump/declarative/expression/statement/whenexpr.py
|
LibertyAces/BitSwanPump
|
02301bfd4e807836403ce6a22030ad47058541d6
|
[
"BSD-3-Clause"
] | 17
|
2019-02-14T09:26:03.000Z
|
2022-03-11T09:23:52.000Z
|
bspump/declarative/expression/statement/whenexpr.py
|
LibertyAces/BitSwanPump
|
02301bfd4e807836403ce6a22030ad47058541d6
|
[
"BSD-3-Clause"
] | 91
|
2019-05-06T18:59:02.000Z
|
2022-01-11T06:22:32.000Z
|
bspump/declarative/expression/statement/whenexpr.py
|
LibertyAces/BitSwanPump
|
02301bfd4e807836403ce6a22030ad47058541d6
|
[
"BSD-3-Clause"
] | 10
|
2019-04-23T08:48:58.000Z
|
2022-02-13T14:24:28.000Z
|
from ...abc import Expression
from ..value.valueexpr import VALUE
class WHEN(Expression):
"""
Checks "if" condition passes - it is an `if` on steroids ;-)
!WHEN
- is:
!EQ
- !ITEM EVENT eggs
- 2
then: eggs
- is:
!LT
- 9
- !ITEM EVENT potatoes
- 11
then: potatoes
- else:
Nah
"""
Attributes = False # Filled during initialize() since attributes are dynamic
Category = 'Statements'
def __init__(self, app, *, sequence):
super().__init__(app)
self.Items = sequence
self.ItemsNormalized = []
self.Attributes = {}
self.OutletType = None # Will be determined in `initialize()`
self.Else = VALUE(self.App, value=None)
def set(self, key, value):
setattr(self, key, value)
if "Test" in key:
item_normalized = self.ItemsNormalized[int(key[4:])]
self.ItemsNormalized[int(key[4:])] = (value, item_normalized[1])
if "Then" in key:
item_normalized = self.ItemsNormalized[int(key[4:])]
self.ItemsNormalized[int(key[4:])] = (item_normalized[0], value)
def initialize(self):
item_counter = 0
for n, i in enumerate(self.Items):
# `test/then` branch
if 'test' in i and 'then' in i:
assert(len(i) == 2)
vtest = i['test']
if not isinstance(vtest, Expression):
vtest = VALUE(self.App, value=vtest)
attr_name = 'Test{}'.format(item_counter)
setattr(self, attr_name, vtest)
self.Attributes[attr_name] = [bool.__name__]
vthen = i['then']
if not isinstance(vthen, Expression):
vthen = VALUE(self.App, value=vthen)
attr_name = 'Then{}'.format(item_counter)
setattr(self, attr_name, vthen)
if self.OutletType is None:
self.OutletType = vthen.get_outlet_type()
self.Attributes[attr_name] = self.OutletType
self.ItemsNormalized.append((vtest, vthen))
item_counter += 1
# `else` branch
elif 'else' in i:
assert(len(i) == 1)
# TODO: Fix double-initialization when doing INCLUDE
# assert('Else' not in self.Attributes)
v = i['else']
if not isinstance(v, Expression):
v = VALUE(self.App, value=v)
attr_name = 'Else'
setattr(self, attr_name, v)
if self.OutletType is None:
self.OutletType = v.get_outlet_type()
self.Attributes[attr_name] = self.OutletType
else:
raise RuntimeError("Unexpected items in '!WHEN': {}".format(i.keys()))
def __call__(self, context, event, *args, **kwargs):
for test, then in self.ItemsNormalized:
res = test(context, event, *args, **kwargs)
if res:
return then(context, event, *args, **kwargs)
return self.Else(context, event, *args, **kwargs)
def get_outlet_type(self):
return self.OutletType
| 22.947826
| 78
| 0.656309
|
757d1bceb9d8c5508c05ffb26137eaa3bef020b2
| 6,532
|
py
|
Python
|
Support/_old/bin/flake8/mccabe.py
|
fish2000/__OST2__.tmbundle
|
a9b55c689e87ea94b373da99849512162c5071e6
|
[
"MIT"
] | null | null | null |
Support/_old/bin/flake8/mccabe.py
|
fish2000/__OST2__.tmbundle
|
a9b55c689e87ea94b373da99849512162c5071e6
|
[
"MIT"
] | null | null | null |
Support/_old/bin/flake8/mccabe.py
|
fish2000/__OST2__.tmbundle
|
a9b55c689e87ea94b373da99849512162c5071e6
|
[
"MIT"
] | null | null | null |
""" Meager code path measurement tool.
Ned Batchelder
http://nedbatchelder.com/blog/200803/python_code_complexity_microtool.html
MIT License.
"""
import compiler, optparse, sys
class PathNode:
def __init__(self, name, look="circle"):
self.name = name
self.look = look
def to_dot(self):
print 'node [shape=%s,label="%s"] %d;' % (self.look, self.name, self.dot_id())
def dot_id(self):
return id(self)
class PathGraph:
def __init__(self, name):
self.name = name
self.nodes = {}
def add_node(self, n):
assert n
self.nodes.setdefault(n, [])
def connect(self, n1, n2):
assert n1
assert n2
self.nodes.setdefault(n1, []).append(n2)
def to_dot(self):
print 'subgraph {'
for node in self.nodes:
node.to_dot()
for node, nexts in self.nodes.items():
for next in nexts:
print '%s -- %s;' % (node.dot_id(), next.dot_id())
print '}'
def complexity(self):
""" Return the McCabe complexity for the graph.
V-E+2
"""
num_edges = sum([len(n) for n in self.nodes.values()])
num_nodes = len(self.nodes)
return num_edges - num_nodes + 2
class PathGraphingAstVisitor(compiler.visitor.ASTVisitor):
""" A visitor for a parsed Abstract Syntax Tree which finds executable
statements.
"""
def __init__(self):
compiler.visitor.ASTVisitor.__init__(self)
self.classname = ""
self.graphs = {}
self.reset()
def reset(self):
self.graph = None
self.tail = None
def visitFunction(self, node):
if self.classname:
entity = '%s%s' % (self.classname, node.name)
else:
entity = node.name
name = '%d:1: %r' % (node.lineno, entity)
if self.graph is not None:
# closure
pathnode = self.appendPathNode(name)
self.tail = pathnode
self.default(node)
bottom = PathNode("", look='point')
self.graph.connect(self.tail, bottom)
self.graph.connect(pathnode, bottom)
self.tail = bottom
else:
self.graph = PathGraph(name)
pathnode = PathNode(name)
self.tail = pathnode
self.default(node)
self.graphs["%s%s" % (self.classname, node.name)] = self.graph
self.reset()
def visitClass(self, node):
old_classname = self.classname
self.classname += node.name + "."
self.default(node)
self.classname = old_classname
def appendPathNode(self, name):
if not self.tail:
return
pathnode = PathNode(name)
self.graph.add_node(pathnode)
self.graph.connect(self.tail, pathnode)
self.tail = pathnode
return pathnode
def visitSimpleStatement(self, node):
if node.lineno is None:
lineno = 0
else:
lineno = node.lineno
name = "Stmt %d" % lineno
self.appendPathNode(name)
visitAssert = visitAssign = visitAssTuple = visitPrint = \
visitPrintnl = visitRaise = visitSubscript = visitDecorators = \
visitPass = visitDiscard = visitGlobal = visitReturn = \
visitSimpleStatement
def visitLoop(self, node):
name = "Loop %d" % node.lineno
if self.graph is None:
# global loop
self.graph = PathGraph(name)
pathnode = PathNode(name)
self.tail = pathnode
self.default(node)
self.graphs["%s%s" % (self.classname, name)] = self.graph
self.reset()
else:
pathnode = self.appendPathNode(name)
self.tail = pathnode
self.default(node.body)
bottom = PathNode("", look='point')
self.graph.connect(self.tail, bottom)
self.graph.connect(pathnode, bottom)
self.tail = bottom
# TODO: else clause in node.else_
visitFor = visitWhile = visitLoop
def visitIf(self, node):
name = "If %d" % node.lineno
pathnode = self.appendPathNode(name)
if not pathnode:
return # TODO: figure out what to do with if's outside def's.
loose_ends = []
for t, n in node.tests:
self.tail = pathnode
self.default(n)
loose_ends.append(self.tail)
if node.else_:
self.tail = pathnode
self.default(node.else_)
loose_ends.append(self.tail)
else:
loose_ends.append(pathnode)
bottom = PathNode("", look='point')
for le in loose_ends:
self.graph.connect(le, bottom)
self.tail = bottom
# TODO: visitTryExcept
# TODO: visitTryFinally
# TODO: visitWith
def get_code_complexity(code, min=7, filename='stdin'):
complex = []
ast = compiler.parse(code)
visitor = PathGraphingAstVisitor()
visitor.preorder(ast, visitor)
for graph in visitor.graphs.values():
if graph is None:
# ?
continue
if graph.complexity() >= min:
msg = ':%s is too complex (%d)' % (
graph.name, graph.complexity())
complex.append(msg)
if len(complex) == 0:
return 0
print('\n'.join(complex))
return len(complex)
def get_module_complexity(module_path, min=7):
"""Returns the complexity of a module"""
code = open(module_path, "rU").read() + '\n\n'
return get_code_complexity(code, min, filename=module_path)
def main(argv):
opar = optparse.OptionParser()
opar.add_option("-d", "--dot", dest="dot", help="output a graphviz dot file", action="store_true")
opar.add_option("-m", "--min", dest="min", help="minimum complexity for output", type="int", default=2)
options, args = opar.parse_args(argv)
text = open(args[0], "rU").read()+'\n\n'
ast = compiler.parse(text)
visitor = PathGraphingAstVisitor()
visitor.preorder(ast, visitor)
if options.dot:
print 'graph {'
for graph in visitor.graphs.values():
if graph.complexity() >= options.min:
graph.to_dot()
print '}'
else:
for graph in visitor.graphs.values():
if graph.complexity() >= options.min:
print graph.name, graph.complexity()
if __name__ == '__main__':
main(sys.argv[1:])
| 28.902655
| 107
| 0.567361
|
5825c437fcef107d5589ede0bc47dc1cf9036e2e
| 846
|
py
|
Python
|
admit/bdp/Table_inherit_BDP.py
|
astroumd/admit
|
bbf3d79bb6e1a6f7523553ed8ede0d358d106f2c
|
[
"MIT"
] | 4
|
2017-03-01T17:26:28.000Z
|
2022-03-03T19:23:06.000Z
|
admit/bdp/Table_inherit_BDP.py
|
teuben/admit
|
1cae54d1937c9af3f719102838df716e7e6d655c
|
[
"MIT"
] | 48
|
2016-10-04T01:25:33.000Z
|
2021-09-08T14:51:10.000Z
|
admit/bdp/Table_inherit_BDP.py
|
teuben/admit
|
1cae54d1937c9af3f719102838df716e7e6d655c
|
[
"MIT"
] | 2
|
2016-11-10T14:10:22.000Z
|
2017-03-30T18:58:05.000Z
|
"""
An example of inheriting from Table_BDP
"""
from Table_BDP import Table_BDP
import admit.util.bdp_types as bt
class Table_inherit_BDP(Table_BDP):
def __init__(self,xmlFile=None):
Table_BDP.__init__(self,xmlFile)
self.item1 = ["a","b"]
self.item2 = 0.0
self._version= "0.1.0"
def testset(self):
self.set("taskid",5)
# set the column labels and units
self.table.columns = ["Frequency","Peak Intensity","FWHM"]
self.table.units = ["GHz","Jy/bm","km/s"]
# populate the table with some data
self.table.setData([[93.2,1.5,8.6],
[92.35,0.6,4.5],
[93.7,8.2,6.7]])
# add a row
self.table.addRow([92.6,1.04,7.3])
self.table.description = "Table of spectral lines"
| 30.214286
| 66
| 0.553191
|
2f4a45f9c166bbe630be9739baf430c2cab4e3fb
| 65,704
|
py
|
Python
|
pypey/pype.py
|
JoseLlarena/pypey
|
d61843e3a5df6363c2e868f98058546688312d30
|
[
"MIT"
] | 1
|
2021-06-30T16:05:21.000Z
|
2021-06-30T16:05:21.000Z
|
pypey/pype.py
|
JoseLlarena/pypey
|
d61843e3a5df6363c2e868f98058546688312d30
|
[
"MIT"
] | null | null | null |
pypey/pype.py
|
JoseLlarena/pypey
|
d61843e3a5df6363c2e868f98058546688312d30
|
[
"MIT"
] | null | null | null |
"""
Main class for building streaming pipelines
"""
from __future__ import annotations
import json
from collections import defaultdict, deque
from inspect import signature, Parameter
from logging import getLogger
from multiprocessing import Pool
from operator import eq
from os import PathLike
from pickle import PicklingError
from random import shuffle, seed
from more_itertools.more import windowed
from pypey import px
try: # _tuplegetter is only available from 3.8
from collections import _tuplegetter
except ImportError:
_tuplegetter = property
from pathos.multiprocessing import ProcessPool # type: ignore
from collections.abc import Sized
from functools import reduce
from heapq import nlargest
from itertools import chain, tee, accumulate, filterfalse, islice, dropwhile, takewhile, cycle, zip_longest, product
from more_itertools import tail, random_permutation, ichunked, unique_everseen, partition, unzip, \
always_reversible, interleave, interleave_longest, collapse, split_into
from sys import stdout
from typing import Iterator, Iterable, Tuple, Generic, Union, Any, Optional, List, AnyStr, IO, Sequence, NamedTuple, \
Deque, Dict
from pypey.func import Fn, ident, H, I, T, X, Y, require, require_val
__all__ = ['Pype', 'SPLIT_MODES', 'Total', 'TOTAL']
logger = getLogger(__name__)
flatten = chain.from_iterable
SPLIT_MODES = frozenset({'at', 'after', 'before'})
class Total(str):
def __str__(self):
return '_TOTAL_'
def __repr__(self):
return str(self)
#: Constant indicating the aggregated counts in :func:`Pype.freqs`
TOTAL = Total()
UNARY_WITHOUT_SIGNATURE = {__import__,
bool, bytearray, bytes,
classmethod,
dict, dir,
frozenset,
getattr,
int, iter,
map, max, min,
next,
print,
set, staticmethod, str, super,
type,
vars}
N_ARY_WITHOUT_SIGNATURE = {filter, slice, range, zip, breakpoint}
_sent = object()
class Pype(Generic[T]):
__slots__ = '_it',
def __getstate__(self: Pype[T]) -> Iterable[T]:
"""
Returns this pipe's backing ``Iterable`` as its state.
:return: this pipe's backing ``Iterable``
"""
return self._it
def __init__(self: Pype[T], it: Iterable[T]):
"""
Creates pipe from ``Iterable``. No argument validation is carried out.
:param it: an ``Iterable``
"""
self._it = it
def __iter__(self: Pype[T]) -> Iterator[T]:
"""
Returns iterator either by a call on this pipe or by calling built-in ``iter`` on it.
:return: an ``Iterator`` for this pipe's data
"""
return iter(self._data())
def __setstate__(self: Pype[T], state: Iterable[T]):
"""
Set this Pype's state to be the given iterable. This method is a counterpart :func:`Pype.__get_state__`.
:param state: an iterable to be the state of this Pype
:return: nothing
"""
self._it = state
def accum(self: Pype[T], fn: Fn[[X, T], X], init: Optional[X] = None) -> Pype[X]:
"""
Returns a pipe where each item is the result of combining a running total with the corresponding item in the
original pipe:
::
>>> from pypey import pype
>>> list(pype([1, 2, 3]).accum(lambda total, n: total+n))
[1, 3, 6]
When an initial value is given, the resulting pipe will have one more item than the original one:
::
>>> from pypey import pype
>>> list(pype([1, 2, 3]).accum(lambda total, n: total+n, init=0))
[0, 1, 3, 6]
Similar to ``itertools.accumulate``.
:param init: optional initial value to start the accumulation with
:param fn: function where the first argument is the running total and the second the current item
:return: a pipe with accumulated items
:raises: ``TypeError`` if ``fn`` is not a ``Callable``
"""
require(fn is not None, 'fn cannot be None')
# Try block necessary to support <3.8 as init argument has not been implemented in those versions
try:
return Pype(accumulate(self._data(), fn, initial=init))
except TypeError:
return Pype(_accumulate(self._data(), fn, init))
def broadcast(self: Pype[T], fn: Fn[[T], Iterable[X]]) -> Pype[Tuple[T, X]]:
"""
Returns the flattened Cartesian product of this pipe's items and the items returned by ``fn``.
Conceptually similar to ``numpy``-'s broadcasting.
::
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'day']).broadcast(tuple).map(lambda word, char: f'{word} -> {char}'))
['a -> a', 'fun -> f', 'fun -> u', 'fun -> n', 'day -> d', 'day -> a', 'day -> y']
:param fn: function to create ``Iterable`` from each of this pipe's items
:return: a pipe where each item is a pair with the first element being the nth instance of this pipe's items
and the second an element of ``fn``-s returned ``Iterable``
:raises: ``TypeError`` if ``fn`` is not a ``Callable``
"""
ufn, data = self._ufn(fn), self._data()
return Pype(flatten(product([item], ufn(item)) for item in data))
def cat(self: Pype[T], other: Iterable[X]) -> Pype[Union[T, X]]:
"""
Concatenates this pipe with the given ``Iterable``.
>>> list(pype([1, 2, 3]).cat([4, 5, 6]))
[1, 2, 3, 4, 5, 6]
:param other: ``Iterable`` to append to this one
:return: a concatenated pipe
:raises: ``TypeError`` if ``other`` is not an ``Iterable``
"""
require(isinstance(other, Iterable), 'other needs to be an Iterable')
return Pype(chain(self._data(), other))
def chunk(self: Pype[T], size: Union[int, Iterable[int]]) -> Pype[Pype[T]]:
"""
Breaks pipe into sub-pipes with up to ``size`` items each:
::
>>> from pypey import pype
>>> [list(chunk) for chunk in pype([1, 2, 3, 4]).chunk(2)]
[[1, 2], [3, 4]]
If this pipe's size is not a multiple of ``size``, the last chunk will have fewer items than ``size``:
::
>>> from pypey import pype
>>> [list(chunk) for chunk in pype([1, 2, 3]).chunk(2)]
[[1, 2], [3]]
If ``size`` is larger than this pipe's size, only one chunk will be returned:
::
>>> from pypey import pype
>>> [list(chunk) for chunk in pype([1, 2, 3, 4]).chunk(5)]
[[1, 2, 3, 4]]
If ``size`` is an iterable of ints, chunks will have corresponding sizes:
::
>>> [list(chunk) for chunk in pype([1, 2, 3, 4]).chunk([1, 3])]
[[1], [2, 3, 4]]
If the sum of sizes is smaller than this pipe's length, the remaining items will not be returned:
::
>>> from pypey import pype
>>> list(chunk) for chunk in pype([1, 2, 3, 4]).chunk([1, 2])]
[[1], [2, 3]]
If the sum of sizes is larger than this pipe's length, fewer items will be returned in the chunk that overruns
the pipe and further chunks will be empty:
::
>>> from pypey import pype
>>> [list(chunk) for chunk in pype([1, 2, 3, 4]).chunk([1, 2, 3, 4])]
[[1], [2, 3], [4], []]
This method tees the backing ``Iterable``.
Similar to ``more_itertools.ichunked`` and ``more_itertools.split_into``.
:param size: chunk size or sizes
:return: a pipe of pipes with up to `size` items each or with sizes specified by iterable of sizes
:raises: ``TypeError`` if ``size`` is not an ``int`` or an ``Iterable`` of ``int``-s
:raises: ``ValueError`` if ``size`` is not positive or if any of the iterable of sizes is not positive
"""
sizes = [s for s in size if isinstance(s, int)] if isinstance(size, Iterable) else []
require(isinstance(size, int) or bool(sizes), f'n must be an int or an iterable of ints but was [{type(size)}]')
require_val(size > 0 if isinstance(size, int) else all(s > 0 for s in sizes), f'n must be > 0 but was [{size}]')
fn = ichunked if isinstance(size, int) else split_into
return Pype(map(Pype, fn(self._data(), size)))
def clone(self: Pype[T]) -> Pype[T]:
"""
Lazily clones this pipe. This method tees the backing ``Iterable`` and replaces it with a new copy.
>>> from pypey import pype
>>> list(pype([1, 2, 3]).clone())
[1, 2, 3]
Similar to ``itertools.tee``.
:return: a copy of this pipe
"""
return Pype(self._data(teed=True))
def cycle(self: Pype[T], n: Optional[int] = None) -> Pype[T]:
"""
Returns items in pipe ``n`` times if ``n`` is not ``None``:
::
>>> from pypey import pype
>>> list(pype([1, 2, 3]).cycle(2))
[1, 2, 3, 1, 2, 3]
else it returns infinite copies:
::
>>> from pypey import pype
>>> list(pype([1, 2, 3]).cycle().take(6))
[1, 2, 3, 1, 2, 3]
Similar to ``itertools.cycle`` with ``n`` = ``None`` and to ``more_itertools.ncycles`` with integer ``n``.
:param n: number of concatenated copies or ``None`` for infinite copies
:return: a pipe that cycles either ``n`` or infinite times over the items of this one
:raises: ``TypeError`` if ``n`` is neither an ``int`` nor ``None``
:raises: ``ValueError`` if ``n`` is not negative
"""
require(n is None or isinstance(n, int), f'n needs to be an int or None but was [{n}]')
require_val(n is None or n >= 0, f'n needs to be non-negative [{n}]')
if n == 0:
return Pype(())
data = self._data()
return Pype(cycle(data) if n is None else _ncycles(data, n))
def dist(self: Pype[T], n: int) -> Pype[Pype[T]]:
"""
Returns a pipe with ``n`` items, each being smaller pipes containing this pipe's elements distributed equally
amongst them:
::
>>> from pypey import pype
>>> [list(segment) for segment in pype([1, 2, 3, 4, 5, 6]).dist(2)]
[[1, 3, 5], [2, 4, 6]]
If this pipe's size is not evenly divisible by ``n``, then the size of the returned ``Iterable``
items will not be identical:
::
>>> from pypey import pype
>>> [list(segment) for segment in pype([1, 2, 3, 4, 5]).dist(2)]
[[1, 3, 5], [2, 4]]
If this pipe's size is smaller than ``n``, the last pipes in the returned pipe will be empty:
::
>>> from pypey import pype
>>> [list(segment) for segment in pype([1, 2, 3, 4, 5]).dist(7)]
[[1], [2], [3], [4], [5], [], []]
This method tees the backing ``Iterable``.
Similar to ``more_itertools.distribute``.
:param n: the number of pipes with distributed elements
:return: a pipe with this pipe's items distributed amongst the contained pipes
:raises: ``TypeError`` if ``n`` is not an ``int``
:raises: ``ValueError`` if ``n`` is not positive
"""
require(isinstance(n, int), f'n needs to be an int but was [{type(n)}]')
require_val(n > 0, f'n needs to be greater than 0 but was [{n}]')
# implementation based on ``more_itertools.distribute``
return Pype(Pype(islice(child, idx, None, n)) for idx, child in enumerate(tee(self._data(), n)))
def divide(self: Pype[T], n: int) -> Pype[Pype[T]]:
"""
Breaks pipe into ``n`` sub-pipes:
::
>>> from pypey import pype
>>> [list(div) for div in pype([1, 2, 3, 4, 5, 6]).divide(2)]
[[1, 2, 3], [4, 5, 6]]
If this pipe's size is not a multiple of ``n``, the sub-pipes' sizes will be equal except the last one, which
will contain all excess items:
::
>>> from pypey import pype
>>> [list(div) for div in pype([1, 2, 3, 4, 5, 6, 7]).divide(3)]
[[1, 2], [3, 4], [5, 6, 7]]
If this pipe's size is smaller than ``n``, the resulting pipe will contain as many single-item pipes as there
are in it, followed by ``n`` minus this pipe's size empty pipes.
::
>>> from pypey import pype
>>> [list(div) for div in pype([1, 2, 3]).divide(4)]
[[1], [2], [3], []]
This method requires calculating the size of this pipe, and thus will eagerly consume the backing ``Iterable``
if it's lazy.
Similar to ``more_itertools.divide``.
:param n: number of segments
:return: a pipe of ``n`` pipes
:raises: ``TypeError`` if ``n`` is not an ``int``
:raises: ``ValueError`` if ``n`` is not positive
"""
require(isinstance(n, int), f'n needs to be an int but was [{type(n)}]')
require_val(n > 0, f'n needs to be greater than 0 but was [{n}]')
return Pype(map(Pype, _deferred_divide(self._data(), n)))
def do(self: Pype[T], fn: Fn[[T], Any], *, now: bool = False, workers: int = 0, chunk_size: int = 100) -> Pype[T]:
"""
Produces a side effect for each item, with the given function's return value ignored. It is typically used to
execute an operation that is not functionally pure such as printing to console, updating a GUI, writing to disk
or sending data over a network.
::
>>> from pypey import pype
>>> p = pype(iter([1, 2, 3])).do(lambda n: print(f'{n}'))
>>> list(p)
1
2
3
[1, 2, 3]
If ``now`` is set to ``True`` the side effect will take place immediately and the backing ``Iterable`` will be
consumed if lazy.
::
>>> from pypey import pype
>>> p = pype(iter([1, 2, 3])).do(lambda n: print(f'{n}'), now=True)
1
2
3
>>> list(p)
[]
If ``workers`` is greater than ``0`` the side effect will be parallelised using ``multiprocessing`` if
possible, or ``pathos`` if not. ``pathos`` multiprocessing implementation is slower and limited vs the built-in
multiprocessing but it does allow using lambdas and local functions. When using workers, the backing
``Iterable`` is teed to avoid consumption. Using a large ``chunk_size`` can greately speed up parallelisation;
it is ignored if ``workers`` is ``0``.
Also known as ``for_each``, ``tap`` and ``sink``.
Similar to ``more_itertools.side_effect``.
:param fn: a function taking a possibly unpacked item
:param now: ``False`` to defer the side effect until iteration, ``True`` to write immediately
:param workers: number of extra processes to parallelise this method's side effect function
:param chunk_size: size of subsequence of ``Iterable`` to be processed by workers
:return: this pipe
:raises: ``TypeError`` if ``fn`` is not a ``Callable`` or ``workers`` or ``chunk_size`` are not ``int``
:raises: ``ValueError`` if ``workers`` is negative or ``chunk_size`` is non-positive
"""
require(isinstance(workers, int), f'workers should be non-negative ints but were [{workers}]')
require(isinstance(chunk_size, int), f'chunk size should be a positive int but was [{chunk_size}]')
require_val(chunk_size > 0, f'chunk size should be a positive int but was [{chunk_size}]')
ufn = self._ufn(fn)
if workers:
mapping = _parallel_map(self._data(), ufn, workers, chunk_size)
if now:
for _ in mapping:
pass
return self
return Pype(_side_effect(ident, mapping))
if now:
for item in self._data():
ufn(item)
return self
return Pype(_side_effect(ufn, self._data()))
def drop(self: Pype[T], n: int) -> Pype[T]:
"""
Returns this pipe but with the first or last ``n`` items missing:
::
>>> from pypey import pype
>>> list(pype([1, 2, 3, 4]).drop(2))
[3, 4]
>>> from pypey import pype
>>> list(pype([1, 2, 3, 4]).drop(-2))
[1, 2]
:param n: number of items to skip, positive if at the beginning of the pipe, negative at the end
:return: pipe with ``n`` dropped items
:raises: ``TypeError`` if ``n`` is not an ``int``
"""
require(isinstance(n, int), f'n needs to be an int but was [{type(n)}]')
if n == 0:
return self
return Pype(islice(self._data(), n, None) if n > 0 else _clip(self.it(), -n))
def drop_while(self: Pype[T], pred: Fn[..., bool]) -> Pype[T]:
"""
Drops items as long as the given predicate is ``True``; afterwards, it returns every item:
>>> from pypey import pype
>>> list(pype([1, 2, 3, 4]).drop_while(lambda n: n < 3))
[3, 4]
Similar to ``itertools.dropwhile``.
:param pred: A function taking a possibly unpacked item and returning a boolean
:return: a pipe that is a subset of this one
:raises: ``TypeError`` if ``pred`` is not a ``Callable``
"""
return Pype(dropwhile(self._ufn(pred), self._data()))
def eager(self: Pype[T]) -> Pype[T]:
"""
Returns a pype with the same contents as this one but with an eager backing collection. This will trigger
reading the whole backing ``Iterable`` into memory.
>>> from pypey import pype
>>> p = pype(range(-5, 5)).map(abs)
>>> p.size()
10
>>> p.size()
0
>>> p = pype(range(-5, 5)).map(abs).eager()
>>> p.size()
10
>>> p.size()
10
:return: this pipe, but eager
"""
if isinstance(self._data(), Sequence) and not isinstance(self._data(), range):
return self
return Pype(tuple(self._data()))
def enum(self: Pype[T], start: int = 0, *, swap: bool = False) -> Pype[Union[Tuple[int, T], Tuple[T, int]]]:
"""
Pairs each item with an increasing integer index:
::
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'day']).enum())
[(0, 'a'), (1, 'fun'), (2, 'day')]
``swap`` = ``True`` will swap the index and item around:
::
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'day']).enum(swap=True))
[('a', 0), ('fun', 1), ('day', 2)]
Similar to built-in ``enumerate``.
:param start: start of the index sequence
:param swap: if ``True`` index will be returned second, else it will be returned first
:return: a pipe the same size as this one but with each item being ``tuple`` of index and original item
:raises: ``TypeError`` if ``start`` is not an ``int``
"""
enumerated = enumerate(self._data(), start=start)
return Pype(((item, idx) for idx, item in enumerated) if swap else enumerated)
def flat(self: Pype[I]) -> Pype[T]:
"""
Flattens iterable items into a collection of their elements:
::
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'day']).flat())
['a', 'f', 'u', 'n', 'd', 'a', 'y']
Similar to ``itertools.chain.from_iterable``
:return: a pipe with the elements of its ``Iterable`` items as items
:raises: ``TypeError`` if items are not ``Iterable``
"""
return Pype(flatten(self._data()))
def flatmap(self: Pype[I], fn: Fn[..., I]) -> Pype[X]:
"""
Maps ``Iterable`` items and then flattens the result into their elements.
Equivalent to :func:`Pype.map` followed by :func:`Pype.flat`
:param fn: function taking a possibly unpacked item and returning a value
:return: a pipe with mapped flattened items
:raises: ``TypeError`` if items are not ``Iterable`` or ``fn`` is not a ``Callable``
"""
return Pype(flatten(map(self._ufn(fn), self._data())))
def freqs(self: Pype[T], total: bool = True) -> Pype[Tuple[Union[T, object], int, float]]:
"""
Computes this pipe's items' absolute and relative frequencies and optionally the total:
::
>>> from pypey import pype
>>> tuple(pype('AAB').freqs())
(('A', 2, 0.6666666666666666), ('B', 1, 0.3333333333333333), (_TOTAL_, 3, 1.0))
If `total` is `False`, the total is left out:
::
>>> from pypey import pype
>>> tuple(pype('AAB').freqs(total=False))
(('A', 2, 0.6666666666666666), ('B', 1, 0.3333333333333333))
:return: a pype containing tuples with this pipe's uniques items, plus the total as the ``pype.TOTAL`` item,
with their absolute and relative frequencies
"""
return Pype(_deferred_freqs(self._data(), total))
def group_by(self: Pype[T], key: Fn[..., Y]) -> Pype[Tuple[Y, List[T]]]:
"""
Groups items according to the key returned by the ``key`` function:
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'day']).group_by(len))
[(1, ['a']), (3, ['fun', 'day'])]
This method is eager and will consume the backing ``Iterable`` if it's lazy.
Similar to ``itertools.groupby`` except elements don't need to be sorted
:param key: function taking a possibly unpacked item and returning a grouping key
:return: a pipe made up of pairs of keys and lists of items
:raises: ``TypeError`` if ``fn`` is not a ``Callable``
"""
ufn, data = self._ufn(key), self._data()
return Pype(_deferred_group_by(data, ufn))
def interleave(self: Pype[T], other: Iterable[X], n: int = 1, trunc: bool = True) -> Pype[Union[T, X, Any]]:
"""
Returns a pipe where items in this pipe are interleaved with items in the given ``Iterable``, in order.
If either this pipe or the other ``Iterable`` are exhausted the interleaving stops:
::
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'fun', 'day']).interleave([1, 2, 3]))
['a', 1, 'fun', 2, 'fun', 3]
Setting ``trunc`` to ``True`` will keep adding the left over items in the ``Iterable`` that hasn't been
exhausted after the other one is:
::
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'fun', 'day']).interleave([1, 2, 3], trunc=False))
['a', 1, 'fun', 2, 'fun', 3, 'day']
The number of items in this pipe's to leave between the items in the ``Iterable`` can be varied:
::
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'fun', 'day']).interleave([1, 2, 3], n=2))
['a', 'fun', 1, 'fun', 'day', 2]
This operation is lazy.
A cross between ``more_itertools.interleave_longest``, ``more_itertools.interleave`` and
``more_itertools.intersperse``.
:param other: the ``Iterable`` whose items will be interleaved with this pipe's
:param n: the number of this pipe's items to leave between each of the ``Iterable``-'s ones
:param trunc: ``True`` if the unexhausted ``Iterable`` should be truncated once the other one is
:return: A pipe with this pipe's elements and the given ``Iterable``-'s in order
:raises: ``TypeError`` if ``other`` is not an ``Iterable`` or ``n` is not an ``int``
:raises: ``ValueError`` if ``n`` is less than one
"""
require(isinstance(n, int), f'n needs to be a positive integer but was [{n}]')
require_val(n >= 1, f'n needs to be an int but was [{type(n)}]')
this_data = self._data() if n == 1 else ichunked(self._data(), n)
if trunc:
return Pype(collapse(interleave(this_data, other), levels=1))
return Pype(collapse(interleave_longest(this_data, other), levels=1))
def it(self: Pype[T]) -> Iterator[T]:
"""
Returns an ``Iterator`` for this pipe's items. It's a more concise version of, and functionally identical
to, :func:`Pype.__iter__`
>>> from pypey import pype
>>> list(iter(pype([1, 2, 3]))) == list(pype([1, 2, 3]).it())
True
:return: an ``Iterator`` for this pipe's items
"""
return iter(self)
def map(self: Pype[T], fn: Fn[..., Y], *other_fns: Fn[..., X], workers: int = 0, chunk_size: int = 100) \
-> Pype[Union[X, Y]]:
"""
Transforms this pipe's items according to the given function(s):
::
>>> from math import sqrt
>>> from pypey import pype
>>> list(pype([1, 2, 3]).map(sqrt))
[1.0, 1.4142135623730951, 1.7320508075688772]
If more than one function is provided, they will be chained into a single one before being applied to each item:
::
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'day']).map(str.upper, reversed, list))
[['A'], ['N', 'U', 'F'], ['Y', 'A', 'D']]
If ``workers`` is greater than ``0`` the mapping will be parallelised using ``multiprocessing`` if possible
or ``pathos`` if not. ``pathos`` multiprocessing implementation is slower and has different limitations than the
built-in multiprocessing but it does allow using lambdas. When using workers, the backing ``Iterable`` is teed
to avoid consumption. Using a large ``chunk_size`` can greately speed up parallelisation; it is ignored if
``workers`` is ``0``.
Similar to built-in ``map``.
:param fn: a function taking a possibly unpacked item and returning a value
:param other_fns: other functions to be chained with ``fn``, taking a possibly unpacked item and returning a
value
:param workers: number of extra processes to parallelise this method's mapping function(s)
:param chunk_size: size of subsequence of ``Iterable`` to be processed by workers
:return: a pipe with this pipe's items mapped to values
:raises: ``TypeError`` if ``fn`` is not a ``Callable`` or ``other_fns`` is not a ``tuple`` of ``Callable`` or
if ``workers`` or ``chunk_size`` are not ``int``
:raises: ``ValueError`` if ``workers`` is negative or ``chunk_size`` is non-positive
"""
require(isinstance(workers, int), f'workers should be non-negative ints but where [{workers}]')
require(isinstance(chunk_size, int), f'chunk size should be a positive int but was [{chunk_size}]')
require_val(chunk_size > 0, f'chunk size should be a positive int but was [{chunk_size}]')
combo_fn = self._ufn(fn) if not other_fns else px(_pipe, functions=[self._ufn(fn) for fn in (fn,) + other_fns])
if workers:
return Pype(_parallel_map(self._data(), combo_fn, workers, chunk_size))
return Pype(map(combo_fn, self._data()))
def partition(self: Pype[T], pred: Fn[..., bool]) -> Tuple[Pype[T], Pype[T]]:
"""
Splits this pipe's items into two pipes, according to whether the given
predicate returns ``True`` or ``False``:
>>> from pypey import pype
>>> [list(p) for p in pype([1, 2, 3, 4]).partition(lambda n: n%2)]
[[2, 4], [1, 3]]
This method tees the backing ``Iterable``.
:param pred: A function taking a possibly unpacked item and returning a boolean
:return: a 2-``tuple`` with the first item being a pipe with items for which ``pred`` is ``False`` and the
second a pipe with items for which it is ``True``
:raises: ``TypeError`` if ``pred`` is not a ``Callable``
"""
falses, trues = partition(self._ufn(pred), self._data())
return Pype(falses), Pype(trues)
def pick(self: Pype[Union[Sequence, NamedTuple]], key: Any) -> Pype[Any]:
"""
Maps each item to the given ``key``. Allowed keys are any supported by ``object.__item__`` :
::
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'day']).pick(0))
['a', 'f', 'd']
as well as ``@property``-defined object properties and ``namedtuple`` attributes:
::
>>> from collections import namedtuple
>>> from pypey import pype
>>> Person = namedtuple('Person', ['age'])
>>> list(pype([Person(42), Person(24)]).pick(Person.age))
[42, 24]
Equivalent to :func:`Pype.map(lambda item: item.key)` and :func:`Pype.map(lambda item: item[key])`.
:param key: key to pick from each item
:return: a pipe where each item in this pipe has been replaced with the given key
"""
is_prop = isinstance(key, property) or isinstance(key, _tuplegetter)
return Pype(key.__get__(item) if is_prop else item[key] for item in self._data())
def print(self: Pype[T],
fn: Fn[..., str] = str,
*,
sep: str = ' ',
end: str = '\n',
file: IO = stdout,
flush: bool = False,
now: bool = True) -> Pype[T]:
"""
Prints string returned by given function using built-in ``print``:
::
>>> from pypey import pype
>>> p = pype(iter([1, 2, 3])).print()
>>> list(p)
1
2
3
[1, 2, 3]
If ``now`` is set to ``True``, the printing takes place immediately and the backing ``Iterable`` is consumed:
::
>>> from pypey import pype
>>> p = pype(iter([1, 2, 3])).print(now=True)
1
2
3
>>> list(p)
[]
The keyword-only parameters are the same as the built-in ``print`` (minus the ``now`` flag).
:param fn: A function taking a possibly unpacked item and returning a string
:param sep: separator as per built-in ``print``
:param end: terminator, as per built-in ``print``
:param file: text stream, as per built-in ``print``
:param flush: flush, as per built-in ``print``
:param now: ``False`` if the printing should be deferred, ``True`` otherwise
:return: this pipe, with a possibly consumed backing ``Iterable`` if ``now`` is set to ``True``
:raises: ``TypeError`` if ``fn`` is not a ``Callable``
"""
ufn, data = (fn if fn == str else self._ufn(fn), self._data())
if now:
for item in data:
print(ufn(item), sep=sep, end=end, file=file, flush=flush)
return self
return self.do(px(_print_fn, ufn=ufn, sep=sep, end=end, file=file, flush=flush))
def reduce(self: Pype[T], fn: Fn[[H, T], H], init: Optional[X] = None) -> H:
"""
Reduces this pipe to a single value through the application of the given aggregating function
to each item:
::
>>> from operator import add
>>> from pypey import pype
>>> pype([1, 2, 3]).reduce(add)
6
If ``init`` is not ``None``, it will be placed as the start of the returned pipe and
serve as a default value in case the pipe is empty:
::
>>> from pypey import pype
>>> pype([1, 2, 3]).reduce(add, init=-1)
5
This function is eager and immediate.
Similar to ``functools.reduce``.
:param fn: a function taking an aggregate of the previous items as its first argument
and the current item as its second, and returning a new aggregate
:param init: a value to be placed before all other items if it's not ``None``
:return: a value of the same type as the return of the given function, or ``init`` if the pipe is empty
:raises: ``TypeError`` if ``fn`` is not a ``Callable``
:raises: ``ValueError`` if this pipe is empty and ``init`` is ``None``
"""
data = self._data()
return reduce(fn, data) if init is None else reduce(fn, data, init)
def reject(self: Pype[T], pred: Fn[..., bool]) -> Pype[T]:
"""
Returns a pipe with only the items for each the given predicate returns ``False``:
::
>>> from pypey import pype
>>> list(pype(['a', 'FUN', 'day']).reject(str.isupper))
['a', 'day']
Opposite of :func:`Pype.select`.
Similar to built-in ``filterfalse``.
:param pred: a function taking a possibly unpacked item and returning a boolean
:return: a pipe with the subset of this pipe's items for which ``pred`` returns ``False``
:raises: ``TypeError`` if ``pred`` is not a ``Callable``
"""
return Pype(filterfalse(self._ufn(pred), self._data()))
def reverse(self: Pype[T]) -> Pype[T]:
"""
Returns a pipe where this pipe's items appear in reversed order:
::
>>> from pypey import pype
>>> list(pype([1, 2, 3]).reverse())
[3, 2, 1]
This operation is eager but deferred.
Similar to built-in ``reversed``.
:return: a pipe with items in reversed order
"""
return Pype(_deferred_reverse(self._data()))
def roundrobin(self: Pype[I]) -> Pype[T]:
"""
Returns a pipe where each item is taken from each of this pipe's elements' in turn:
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'day']).roundrobin())
['a', 'f', 'd', 'u', 'a', 'n', 'y']
This operation is eager but deferred.
Similar to ``more_itertools.interleave_longest``.
:return: A pipe with items taken from this pipe's ``Iterable`` items
:raises: ``TypeError`` if any of this pipe's items is not an ``Iterable``
"""
# implementation based on ``more_itertools.interleave_longest``
return Pype(_deferred_roundrobin(self._data()))
def sample(self: Pype[T], k: int, seed_: Optional[Any] = None) -> Pype[T]:
"""
Returns a pipe with ``k`` items sampled without replacement from this pipe:
>>> from pypey import pype
>>> list(pype([1, 2, 3, 4, 5]).sample(2))
[1, 3]
This operation is eager but deferred.
Similar to ``random.sample``.
:param k: a non negative ``int`` specifying how many items to sample
:param seed_: an value to seed the random number generator
:return: a pipe with sampled items from this pipe
:raises: ``TypeError`` if k is not an ``int``
:raises: ``ValueError`` if ``k`` is negative
"""
if seed_:
seed(seed_)
require(isinstance(k, int), f'k needs to be an int but was [{type(k)}]')
return Pype(_deferred_sample(self._data(), k))
def select(self: Pype[T], pred: Fn[..., bool]) -> Pype[T]:
"""
Returns a pipe with only the items for each ``pred`` returns ``True``, opposite of :func:`Pype.reject`:
::
>>> from pypey import pype
>>> list(pype(['a', 'FUN', 'day']).select(str.isupper))
['FUN']
Also known as ``filter``.
Similar to built-in ``filter``.
:param pred: a function taking a possibly unpacked item and returning a boolean
:return: a pipe with the subset of this pipe's items for which the given ``pred`` returns ``True``
:raises: ``TypeError`` if ``pred`` is not a ``Callable``
"""
return Pype(filter(self._ufn(pred), self._data()))
def shuffle(self: Pype[T], seed_: Optional[Any] = None) -> Pype[T]:
"""
Returns a shuffled version of this pipe:
::
>>> from pypey import pype
>>> list(pype([1, 2, 3, 4, 5]).shuffle())
[3, 2, 1, 5, 4]
This method is eager but deferred.
Similar to ``random.shuffle``
:param seed_: a value to seed the random generator
:return: This pipe, but with its items shuffled
"""
if seed_ is not None:
seed(seed_)
return Pype(_deferred_shuffle(self._data()))
def size(self: Pype[T]) -> int:
"""
Returns number of items in this pipe:
::
>>> from pypey import pype
>>> pype([1, 2, 3]).size()
3
This operation is eager and immediate.
:return: an ``int`` correpsonding to the cardinality of this pipe
"""
if isinstance(self._it, Sized):
return len(self._it)
return sum(1 for _ in self._data())
def slice(self: Pype[T], start: int, end: int) -> Pype[T]:
"""
Returns a slice of this pipe between items at positions ``start`` and ``end``, exclusive:
::
>>> from pypey import pype
>>> list(pype([1, 2, 3, 4]).slice(1, 3))
[2, 3]
Similar to ``itertools.islice``.
:param start: index of first element to be returned
:param end: index of element after the last element to be returned
:return: pipe with a slice of the items of this pipe
:raises: ``TypeError`` if ``start`` or ``end`` are not ``int``-s
:raises: ``ValueError`` if ``start`` or ``end`` are negative or if ``end`` is smaller than ``start``
"""
require(isinstance(start, int), f'start needs to be an int but was [{type(start)}]')
require(isinstance(end, int), f'start needs to be an int but was [{type(end)}]')
require_val(start <= end, f'start cannot be larger than end but was [{start}] > [{end}]')
return Pype(islice(self._data(), start, end))
def sort(self: Pype[T], key: Optional[Fn[..., Y]] = None, *, rev: bool = False) -> Pype[T]:
"""
Sorts this pipe's items, using the return value of ``key`` if not ``None``:
::
>>> from pypey import pype
>>> list(pype(['a', 'funny', 'day']).sort(len))
['a', 'day', 'funny']
This method is eager but deferred.
Similar to builtin ``sorted``.
:param key: a function possibly taking a unpacked item and returning a value to sort by, or ``None``
:param rev: ``True`` if the sort order should be reversed, ``False`` otherwise.
:return: a sorted pipe
:raises: ``TypeError`` if ``key`` is not a ``Callable``
"""
ufn, data = (None if key is None else self._ufn(key), self._data())
return Pype(_deferred_sort(data, ufn, rev))
def split(self: Pype[T], when: Fn[..., bool], mode: str = 'after') -> Pype[Pype[T]]:
"""
Returns a pipe containing sub-pipes split off this pipe where the given ``when`` predicate is ``True``:
::
>>> from pypey import pype
>>> [list(split) for split in pype(list('afunday')).split(lambda char: char == 'a')
[['a'], ['f', 'u', 'n', 'd', 'a'], ['y']]
The default mode is to split after every item for which the predicate is ``True``. When ``mode`` is set to
``before``, the split is done before:
::
>>> from pypey import pype
>>> [list(split) for split in pype(list('afunday')).split(lambda char: char == 'a', 'before')]
[['a', 'f', 'u', 'n', 'd'], ['a', 'y']]
And when ``mode`` is set to ``at``, the pipe will be split both before and after, leaving the splitting item
out:
::
>>> from pypey import pype
>>> [list(split) for split in pype(list('afunday')).split(lambda char: char == 'a', 'at')]
[[], ['f', 'u', 'n', 'd'], ['y']]
Similar to ``more_itertools.split_before``, ``more_itertools.split_after`` and ``more_itertools.split_at``.
:param when: A function possibly taking a unpacked item and returning ``True`` if this pipe should be split
before this item
:param mode: which side of the splitting item the pipe is split, one of ``after``, ``at`` or ``before``
:return: a pipe of pipes split off this pipe at items where ``when`` returns ``True``
:raises: ``TypeError`` if ``when`` is not a ``Callable`` or ``mode` is not a ``str``
:raises: ``ValueError`` if ``mode`` is a ``str`` but not one the supported ones
"""
require(isinstance(mode, str), f'mode should be a str but was [{mode}] instead')
require_val(mode in SPLIT_MODES, f'mode should be on of {SPLIT_MODES} but was [{mode}]')
# implementation based on ``more_itertools``'s ``split_before``, ``split_after`` and ``split_at``
splitting = _split_before if mode == 'before' else _split_after if mode == 'after' else _split_at
ufn, data = self._ufn(when), self._data()
return Pype(map(Pype, splitting(data, ufn)))
def take(self: Pype[T], n: int) -> Pype[T]:
"""
Returns a pipe containing the first or last ``n`` items of this pipe, depending on the sign of ``n``:
::
>>> from pypey import pype
>>> list(pype([1, 2, 3, 4]).take(-2))
[3, 4]
>>> from pypey import pype
>>>list(pype([1, 2, 3, 4]).take(2))
[1, 2]
This operation is eager but deferred when ``n`` is negative else it's lazy.
Also know as `head` and `tail`.
:param n: a negative ``int`` specifying the number of items of this pipe's tail or a positive ``int`` for the
first ``n`` elements
:return: a pipe with this pipe's first or last ``n`` items
:raises: ``TypeError`` if ``n`` is not an ``int``
"""
require(isinstance(n, int), f'n needs to be an int but was [{type(n)}]')
slicing = islice if n >= 0 else _deferred_tail
return Pype(slicing(self._data(), abs(n)))
def take_while(self: Pype[T], pred: Fn[..., bool]) -> Pype[T]:
"""
Returns a pipe containing this pipe's items until ``pred`` returns ``False`` :
::
>>> from pypey import pype
>>> list(pype([1, 2, 3, 4]).take_while(lambda n: n < 4))
[1, 2, 3]
Similar to ``itertools.takewhile``.
:param pred: a function taking a possibly unpacked item and returning a boolean
:return: a pipe that is a subset of this one minus the items after ``pred`` returns ``True``
:raises: ``TypeError`` if ``pred`` is not a ``Callable``
"""
return Pype(takewhile(self._ufn(pred), self._data()))
def tee(self: Pype[T], n: int) -> Pype[Pype[T]]:
"""
Returns ``n`` lazy copies of this pipe:
>>> from pypey import pype
>>> [list(copy) for copy in pype([1, 2, 3]).tee(2)]
[[1, 2, 3], [1, 2, 3]]
This method tees the backing ``Iterable`` but does not replace it (unlike :func:`Pype.clone`).
Similar to ``itertools.tee``.
:return: a pipe containing ``n`` copies of this pipe
:raises: ``TypeError`` if ``n`` is not an ``int``
:raises: ``ValueError`` if ``n`` is non-positive
"""
require_val(n > 0, f'n should be greater than 0 but was [{n}]')
return Pype(map(Pype, tee(self._it, n)))
def to(self: Pype[T], fn: Fn[[Iterable[T]], Y], *other_fns: Fn[..., X]) -> Union[Y, X]:
"""
Applies given function to this pipe:
::
>>> from pypey import pype
>>> pype(['a', 'fun', 'day']).to(list)
['a', 'fun', 'day']
This method is eager if the given function is eager and lazy if it's lazy:
::
>>> from pypey import pype
>>> p = pype(['a', 'fun', 'day']).to(enumerate)
>>> p
<enumerate object at 0x7fdb743003c0>
>>> list(p)
[(0, 'a'), (1, 'fun'), (2, 'day')]
If provided with more than one function, it will pipe them together:
::
>>> from pypey import pype
>>> pype(['a', 'fun', 'day']).to(list, len)
3
Equivalent to ``fn_n(...fn2(fn1(pipe)))``.
:param fn: function to apply to this pipe
:param other_fns: other functions to be chained with ``fn``
:return: the return value of the given function(s)
:raises: ``TypeError`` if any of the provided functions is not a ``Callable``
"""
return px(_pipe, functions=(fn,) + other_fns)(self)
def to_file(self: Pype[T],
target: Union[AnyStr, PathLike, int],
*,
mode: str = 'w',
buffering: int = -1,
encoding: Optional[str] = 'utf8',
errors: Optional[str] = None,
newline: Optional[str] = None,
closefd: bool = True,
opener: Optional[Fn[..., int]] = None,
eol: bool = True,
now: bool = True) -> Pype[T]:
"""
Writes items to file:
::
>>> from tempfile import gettempdir
>>> from os.path import join
>>> from pypey import pype
>>> p = pype(['a', 'fun', 'day']).to_file(join(gettempdir(), 'afunday.txt'), eol=False)
>>>list(p)
['a', 'fun', 'day']
>>> list(pype.file(join(gettempdir(), 'afunday.txt')))
['afunday']
The first eight parameters are identical to built-in ``open``. If ``eol`` is set to ``True``, each item will be
converted to string and a line terminator will be appended to it:
::
>>> from pypey import pype
>>> p = pype([1, 2, 3]).to_file(join(gettempdir(), '123.txt', eol=True))
>>> list(p)
[1, 2, 3]
>>> list(pype.file(join(gettempdir(), '123.txt')))
['1', '2', '3']
This method is intrinsically lazy but it's set to immediate/eager by default. As such, if ``now`` is set to
``True`` and the backing ``Iterable`` is lazy, it will be consumed and this method will return an empty pipe:
::
>>> from pypey import pype
>>> p = pype(iter([1, 2, 3])).to_file(join(gettempdir(), '123.txt'), now=True)
>>> list(p)
[]
>>> list(pype.file(join(gettempdir(), '123.txt')))
['1', '2', '3']
:param target: target to write this pipe's items to
:param mode: mode as per built-in ``open``, except no read modes are allowed
:param buffering: buffering as per built-in ``open``
:param encoding: encoding as per built-in ``open`` except the default value is ``utf8`` instead of None
:param errors: errors as per built-in ``open``
:param newline: newline as per built-in ``open``
:param closefd: closefd as per built-in ``open``
:param opener: opener as per built-in ``open``
:param eol: ``True`` if a line separator should be added to each item, ``False`` otherwise
:param now: ``False`` to defer writing until pipe is iterated through, ``True`` to write immediately
:return: this pipe, possibly after writing its items to file
:raises: ``ValueError`` if ``mode`` has `r` or `+`
"""
require_val('r' not in mode and '+' not in mode, f'mode cannot be read, was {mode}')
_lines = _lines_to(target,
self._data(),
eol,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
closefd=closefd,
opener=opener)
if now:
for _ in _lines:
pass
return self if now else Pype(_lines)
def to_json(self: Pype[T],
target: Union[AnyStr, PathLike, int],
*,
mode: str = 'w',
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
cls=None,
indent=None,
separators=None,
default=None,
sort_keys=False,
as_dict: bool = True):
"""
Writes items to a file as a json value:
::
>>> from tempfile import gettempdir
>>> from os.path import join
>>> from pypey import pype
>>> p = pype(['a', 'fun', 'day']).to_json(join(gettempdir(), 'afunday.json'))
<pypey.pype.Pype object at 0x7f7c1971a8d0>
>>> list(pype.json(join(gettempdir(), 'afunday.json')))
['a', 'fun', 'day']
The first parameter is the same to built-in ``open``, and the rest are identical to the ones in ``json.dump``
excep the last one which specifies if pairs should be written as dict or as a list. This method will never
write single primitives if the pipe contains a single value.
This method is eager and immediate
:param target: target to write this pipe's items to
:param mode: mode as per built-in ``open``, except no read modes are allowed
:param skipkeys: skipkeys as per built-in ``json.dump``
:param ensure_ascii: ensure_ascii as per built-in ``json.dump``
:param check_circular: check_circular as per built-in ``json.dump``
:param allow_nan: allow_nan as per built-in ``json.dump``
:param cls: cls as per built-in ``json.dump``
:param indent: indent as per built-in ``json.dump``
:param separators: separators as per built-in ``json.dump``
:param default: default as per built-in ``json.dump``
:param sort_keys: sort_keys as per built-in ``json.dump``
:param as_dict: True if item pairs should be written as key-value pairs in an object, False if as a list
:return: this pipe, after writing its items to a file as a json value
:raises: ``ValueError`` if ``mode`` has `r` or `+`
:raises: ``TypeError`` if ``as_dict`` is ``True`` and items are not pairs
"""
require_val('r' not in mode and '+' not in mode, f'mode cannot be read, was {mode}')
data = tuple(self._data())
if as_dict:
try:
data = dict(data)
except TypeError:
raise TypeError(f'items cannot be written as dictionary because they are not pairs {data[0:3]}...')
with open(target, mode=mode) as json_file:
json.dump(data,
json_file,
skipkeys=skipkeys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
cls=cls,
indent=indent,
separators=separators,
default=default,
sort_keys=sort_keys)
return self
def top(self: Pype[T], n: int, key: Fn[[T], Any] = ident) -> Pype[T]:
"""
Returns a pipe with the ``n`` items having the highest value, as defined by the ``key`` function.
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'day']).top(2, len))
['fun', 'day']
This method is eager but deferred.
:param n: the number of items to return
:param key: the function defining the value to find the top elements for
:return: a pipe with the top ``n`` elements
:raises: ``TypeError` if ``n`` is not an ``int`` or ``key`` is not a ``Callable``
:raises: ``ValueError`` if ``n`` is non-positive
"""
require_val(n > 0, f'n needs to be non-negative but was [{n}]')
ufn, data = self._ufn(key), self._data()
return Pype(_deferred_top(data, n, ufn))
def uniq(self: Pype[T]) -> Pype[T]:
"""
Returns unique number of items:
::
>>> from pypey import pype
>>> list(pype(['a', 'b', 'b', 'a']).uniq())
['a', 'b']
This method tees the backing ``Iterable``.
Similar to ``more_itertools.unique_everseen``.
:return: A pipe with the unique items in this pipe
"""
return Pype(unique_everseen(self._data()))
def unzip(self: Pype[I]) -> Pype[Pype[Any]]:
"""
Returns a pipe of pipes each with the items of this pipe's ``Iterable`` items:
>>> from pypey import pype
>>> [list(p) for p in pype(['any', 'fun', 'day']).unzip()]
[['a', 'f', 'd'], ['n', 'u', 'a'], ['y', 'n', 'y']]
This method is eager but deferred.
Similar to ``more_itertools.unzip``
:return: a pipe of pipes with the unzipped items in this pipe's ``Iterable`` items
:raises: ``TyperError`` if any of this pipe's items is not an ``Iterable``
"""
return Pype(map(Pype, _deferred_unzip(self._data())))
def window(self: Pype[T], size: int, *, shift: int = 1, pad: Optional[Any] = None) -> Pype[Tuple[Optional[T], ...]]:
"""
Returns a pipe containing pipes, each being a sliding window over this pipe's items:
::
>>> from pypey import pype
>>> list(pype(iter([1, 2, 3])).window(size=2))
[(1, 2), (2, 3)]
If ``size`` is larger than this pipe, ``pad`` is used fill in the missing values:
::
>>> from pypey import pype
>>> list(pype(iter([1, 2, 3])).window(size=4, pad=-1))
[(1, 2, 3, -1)]
Similar to ``more_itertools.windowed``.
:param size: the size of the window
:param shift: the shift between successive windows
:param pad: the value to use to fill missing values
:return: a pipe of pipes, each being a sliding window over this pipe
:raises: ``TypeError`` if either ```size`` or ``shift`` is not an ``int``
:raises: ``ValueError`` if ``size`` is negative or ``shift`` is non-positive
"""
return Pype(windowed(self._data(), n=size, fillvalue=pad, step=shift))
def zip(self: Pype[I],
*others: Iterable[Any],
trunc: bool = True,
pad: Optional[Any] = None) -> Pype[Tuple[T, ...]]:
"""
Zips items in this pipe with each other or with items in each of the given ``Iterable``-s. If no ``Iterable``-s
are provided, the items in this pipe will be zipped with each other:
::
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'day']).zip(trunc=False, pad='?'))
[('a', 'f', 'd'), ('?', 'u', 'a'), ('?', 'n', 'y')]
Self-zipping will consume the backing ``Iterable`` if it's lazy. If other ``Iterable``-s are provided, the items
in this pipe will be zipped with the items in those:
::
>>> from pypey import pype
>>> list(pype(['a', 'fun', 'day']).zip([1, 2, 3, 4]))
[('a', 1), ('fun', 2), ('day', 3)]
Similar to built-in ``zip`` and ``itertools.zip_longest``.
:param others: ``Iterables`` to be zipped with this with this pipe
:param trunc: ``True`` to truncate all ``Iterable``-s to the size of the shortest one, ``False``
to pad all to the size of the longest one
:param pad: value to pad shorter ``Iterable``-s with if ``trunc`` is ``False``; if it's ``True``
it's ignored
:return: a pipe with the zipped items of this pipe with each other or with the given ``Iterable``-s' ones
:raises: ``TypeError`` any of ``others`` is not an ``Iterable``
"""
data = self._data()
if others == ():
return Pype(_deferred_zip(data, trunc, pad))
require(all(isinstance(p, Iterable) for p in others), 'Inputs to be zipped should be all Iterables')
return Pype(zip(data, *others) if trunc else zip_longest(data, *others, fillvalue=pad))
def zip_with(self: Pype[T], fn: Fn[..., Y]) -> Pype[Tuple[T, Y]]:
"""
Returns a pipe where each item is a 2-``tuple`` with this pipe's item as the first and the output of ``fn``
as the second. This is useful for adding an extra piece of data to the current pipeline:
::
>>> from pypey import pype
>>> list(pype(['a','fun', 'day']).zip_with(len))
[('a', 1), ('fun', 3), ('day', 3)]
and it's a more concise version of:
::
>>> from pypey import pype
>>> list(pype(['a','fun', 'day']).map(lambda w: (w, len(w))))
[('a', 1), ('fun', 3), ('day', 3)]
:param fn: a function taking a possibly unpacked item and returning a value to be zipped with this pipe's item
:return: a new pipe with zipped items
"""
ufn = self._ufn(fn)
return Pype(map(lambda item: (item, ufn(item)), self._data()))
def _data(self: Pype[T], teed: bool = False) -> Iterable[T]:
if teed and not isinstance(self._it, Sized):
self._it, copy = tee(self._it)
return copy
return self._it
def _ufn(self: Pype[T], fn: Fn[[T], Y]) -> Fn[[T], Y]:
head, tail = _head_tail(self._it)
if head is _sent:
return fn
self._it = head + tail if isinstance(tail, tuple) else chain([head], tail)
return _unpack_fn(fn, head)
def _accumulate(data: Iterable[T], func: Fn[[H, T], H], initial: Optional[H] = None) -> Iterator[H]:
it = iter(data)
total = initial
if initial is None:
total = next(it, _sent)
if total == _sent:
return None
yield total
for element in it:
total = func(total, element)
yield total
def _clip(data: Iterator[T], n: int) -> Iterable[T]:
n_last: Deque = deque()
try:
for _ in range(n):
n_last.append(next(data))
except StopIteration:
return data
while True:
try:
n_last.append(next(data))
yield n_last.popleft()
except StopIteration:
break
def _deferred_divide(data: Iterable[T], n: int) -> Iterator[Iterable[T]]:
_data = data if isinstance(data, Sequence) else tuple(data)
size, rem = divmod(len(_data), n)
if not rem:
yield from chain((_data[s * size: (s + 1) * size] for s in range(n)))
elif len(_data) > n:
yield from chain((_data[s * size: (s + 1) * size] for s in range(n - 1)), (_data[-(size + rem):],))
else:
yield from chain(((item,) for item in _data), (() for _ in range(n - len(_data))))
def _deferred_freqs(data: Iterable[T], total: bool) -> Iterator[Tuple[Union[T, Total], int, float]]:
item_to_freq: Dict[Union[T, Total], int] = defaultdict(int)
n = 0
for item in data:
item_to_freq[item] += 1
n += 1
if total:
item_to_freq[TOTAL] = n
yield from ((item, freq, freq / (n or 1)) for item, freq in item_to_freq.items())
def _deferred_group_by(data: Iterable[T], key: Fn[..., Y]) -> Iterator[Tuple[Y, List[T]]]:
key_to_group = defaultdict(list)
for element in data:
key_to_group[key(element)].append(element)
yield from key_to_group.items()
def _deferred_reverse(data: Iterable[T]) -> Iterator[T]:
yield from always_reversible(data)
def _deferred_roundrobin(data: Iterable[I]) -> Iterator[T]:
yield from filterfalse(px(eq, _sent), flatten(zip_longest(*data, fillvalue=_sent)))
def _deferred_sample(data: Iterable[T], k: int) -> Iterator[T]:
yield from random_permutation(data, k)
def _deferred_shuffle(data: Iterable[T]) -> Iterator[T]:
data = list(data)
shuffle(data)
yield from data
def _deferred_sort(data: Iterable[T], key: Optional[Fn[..., Y]], rev: bool) -> Iterator[T]:
yield from sorted(data, key=key, reverse=rev)
def _deferred_tail(data: Iterable[T], n: int) -> Iterator[T]:
yield from tail(n, data)
def _deferred_top(data: Iterable[T], n: int, key: Fn[..., Any]) -> Iterator[T]:
yield from nlargest(n, data, key) if n > 1 else [max(data, key=key)]
def _deferred_unzip(data: Iterable[I]) -> Iterator[Any]:
yield from unzip(data)
def _deferred_zip(data: Iterable[I], trunc: bool, pad: Any) -> Iterator[Tuple[Any, ...]]:
yield from zip(*data) if trunc else zip_longest(*data, fillvalue=pad)
def _lines_to(target: Union[AnyStr, PathLike, int], lines: Iterable[T], eol: bool, **kwargs) -> Iterator[T]:
""""""
with open(target, **kwargs) as out:
for line in lines:
out.write(f'{str(line)}\n' if eol else line)
yield line
def _ncycles(data: Iterable[T], n: int) -> Iterator[T]:
saved = []
for item in data:
yield item
saved.append(item)
n_done = 1
while saved and n_done < n:
for item in saved:
yield item
n_done += 1
def _parallel_map(data: Iterable[T], fn: Fn[..., Y], workers: int, chunk_size: int) -> Iterable[Y]:
try:
# This tries to prevent the most common cause of PicklingError as that would lead to the consumption of `data`
# if it's lazy and then the imap in the except clause will get a `data` with missing items
func = fn.func if hasattr(fn, 'func') else fn
if '<lambda>' in func.__qualname__ or '<locals>' in func.__qualname__:
raise PicklingError
with Pool(workers) as pool:
yield from pool.imap(fn, data, chunksize=chunk_size)
except (PicklingError, AttributeError):
logger.warning('multiprocessing with pickle failed, using pathos with dill instead.')
with ProcessPool(workers) as pool:
yield from pool.imap(fn, data, chunksize=chunk_size)
def _print_fn(item: T, ufn: Fn[..., Y], sep: str, end: str, file: IO, flush: bool):
# created as global function to avoid issues with multiprocessing
print(ufn(item), sep=sep, end=end, file=file, flush=flush)
def _side_effect(fn: Fn[[T], Y], data: Iterable[T]) -> Iterable[T]:
for item in data:
fn(item)
yield item
def _split_after(data: Iterable[T], pred: Fn[..., bool]) -> Iterator[List[T]]:
chunk = []
for item in data:
chunk.append(item)
if pred(item) and chunk:
yield chunk
chunk = []
if chunk:
yield chunk
def _split_at(data: Iterable[T], pred: Fn[..., bool]) -> Iterator[List[T]]:
chunk: List[T] = []
for item in data:
if pred(item):
yield chunk
chunk = []
else:
chunk.append(item)
if chunk:
yield chunk
def _split_before(data: Iterable[T], pred: Fn[..., bool]) -> Iterator[List[T]]:
chunk: List[T] = []
for item in data:
if pred(item) and chunk:
yield chunk
chunk = []
chunk.append(item)
if chunk:
yield chunk
def _head_tail(data: Iterable[T]) -> Tuple[Union[T, object, Tuple], Iterable[T]]:
if isinstance(data, Sequence):
if len(data) > 1:
return tuple(data[:1]), tuple(data[1:])
elif len(data) == 1:
return tuple(data[:1]), tuple(data[0:0])
else:
return _sent, data
try:
data = iter(data)
return next(data), data
except StopIteration:
return _sent, data
def _unpack_fn(fn: Fn[..., T], item: T) -> Fn[..., T]:
require(callable(fn), f'this method takes a function but [{fn}] was found instead')
if not hasattr(item, '__iter__') or fn == Pype:
return fn
# These two conditionals deal with built-in functions as they often don't have a __code__ attribute
if fn in UNARY_WITHOUT_SIGNATURE or hasattr(fn, 'func') and fn.func in UNARY_WITHOUT_SIGNATURE:
return fn
if fn in N_ARY_WITHOUT_SIGNATURE or hasattr(fn, 'func') and fn.func in N_ARY_WITHOUT_SIGNATURE:
return lambda iter_item: fn(*iter_item)
try:
num_args = 0
for name, param in signature(fn).parameters.items():
if param.default == Parameter.empty and \
param.kind != Parameter.KEYWORD_ONLY and \
param.kind != Parameter.VAR_KEYWORD and \
param.kind != Parameter.VAR_POSITIONAL and \
name != 'self' and \
type(fn) != type:
num_args += 1
elif param.kind == Parameter.VAR_POSITIONAL:
num_args += 2
elif name == 'self':
num_args = 1
break
if num_args > 1:
break
if num_args > 1:
return lambda iter_item: fn(*iter_item)
return fn
except Exception:
return fn
def _pipe(*arg: Any, functions: Tuple[Fn[..., Any], ...]) -> Any:
# created as global function to avoid issues with multiprocessing
result = arg if len(arg) > 1 else arg[0]
for fn in functions:
result = fn(result)
return result
| 36.401108
| 120
| 0.557516
|
7a18f4926517edc8b7c5f1c7852d4fcb39876715
| 5,683
|
py
|
Python
|
image_classification/RepMLP/config.py
|
no-name-xiaosheng/PaddleViT
|
50226a3be5095b3727d3c62d2eab23ef1e9612ec
|
[
"Apache-2.0"
] | 2
|
2021-11-23T02:01:52.000Z
|
2021-11-23T02:02:03.000Z
|
image_classification/RepMLP/config.py
|
Dongsheng-Bi/PaddleViT
|
c90a6c8dc3787e69cef3a37b9a260bd59eeff1f7
|
[
"Apache-2.0"
] | null | null | null |
image_classification/RepMLP/config.py
|
Dongsheng-Bi/PaddleViT
|
c90a6c8dc3787e69cef3a37b9a260bd59eeff1f7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration
Configuration for data, model archtecture, and training, etc.
Config can be set by .yaml file or by argparser(limited usage)
"""
import os
from yacs.config import CfgNode as CN
import yaml
_C = CN()
_C.BASE = ['']
# data settings
_C.DATA = CN()
_C.DATA.BATCH_SIZE = 8 #1024 batch_size for single GPU
_C.DATA.BATCH_SIZE_EVAL = 8 #1024 batch_size for single GPU
_C.DATA.DATA_PATH = '/dataset/imagenet/' # path to dataset
_C.DATA.DATASET = 'imagenet2012' # dataset name
_C.DATA.IMAGE_SIZE = 224 # input image size
_C.DATA.CROP_PCT = 0.9 # input image scale ratio, scale is applied before centercrop in eval mode
_C.DATA.NUM_WORKERS = 4 # number of data loading threads
# model settings
_C.MODEL = CN()
_C.MODEL.TYPE = 'RepMLP_ResNet'
_C.MODEL.NAME = 'repmlpres50_light_224_train'
_C.MODEL.RESUME = None
_C.MODEL.PRETRAINED = None
_C.MODEL.NUM_CLASSES = 1000
# transformer settings
_C.MODEL.MIXER = CN()
_C.MODEL.MIXER.NUM_BLOCKS=[3,4,6,3]
_C.MODEL.MIXER.BLOCK_TYPE='light'
_C.MODEL.MIXER.IMG_H=224
_C.MODEL.MIXER.IMG_W=224
_C.MODEL.MIXER.H=7
_C.MODEL.MIXER.W=7
_C.MODEL.MIXER.REPARAM_CONV_K=(1,3,5)
_C.MODEL.MIXER.FC1_FC2_REDUCTION=1
_C.MODEL.MIXER.FC3_GROUPS=4
_C.MODEL.MIXER.DEPLOY=False
# training settings
_C.TRAIN = CN()
_C.TRAIN.LAST_EPOCH = 0
_C.TRAIN.NUM_EPOCHS = 300
_C.TRAIN.WARMUP_EPOCHS = 20
_C.TRAIN.WEIGHT_DECAY = 0.05
_C.TRAIN.BASE_LR = 5e-4
_C.TRAIN.WARMUP_START_LR = 5e-7
_C.TRAIN.END_LR = 5e-6
_C.TRAIN.GRAD_CLIP = 5.0
_C.TRAIN.ACCUM_ITER = 1
_C.TRAIN.LR_SCHEDULER = CN()
_C.TRAIN.LR_SCHEDULER.NAME = 'warmupcosine'
_C.TRAIN.LR_SCHEDULER.MILESTONES = "30, 60, 90" # only used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30 # only used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1 # only used in StepLRScheduler
_C.TRAIN.OPTIMIZER = CN()
_C.TRAIN.OPTIMIZER.NAME = 'AdamW'
_C.TRAIN.OPTIMIZER.EPS = 1e-8
_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999) # for adamW
_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
# train augmentation
_C.TRAIN.MIXUP_ALPHA = 0.8
_C.TRAIN.CUTMIX_ALPHA = 1.0
_C.TRAIN.CUTMIX_MINMAX = None
_C.TRAIN.MIXUP_PROB = 1.0
_C.TRAIN.MIXUP_SWITCH_PROB = 0.5
_C.TRAIN.MIXUP_MODE = 'batch'
_C.TRAIN.SMOOTHING = 0.1
_C.TRAIN.COLOR_JITTER = 0.4
_C.TRAIN.AUTO_AUGMENT = True #'rand-m9-mstd0.5-inc1'
_C.TRAIN.RANDOM_ERASE_PROB = 0.25
_C.TRAIN.RANDOM_ERASE_MODE = 'pixel'
_C.TRAIN.RANDOM_ERASE_COUNT = 1
_C.TRAIN.RANDOM_ERASE_SPLIT = False
# augmentation
_C.AUG = CN()
_C.AUG.COLOR_JITTER = 0.4 # color jitter factor
_C.AUG.AUTO_AUGMENT = 'rand-m9-mstd0.5-inc1'
_C.AUG.RE_PROB = 0.25 # random earse prob
_C.AUG.RE_MODE = 'pixel' # random earse mode
_C.AUG.RE_COUNT = 1 # random earse count
_C.AUG.MIXUP = 0.8 # mixup alpha, enabled if >0
_C.AUG.CUTMIX = 1.0 # cutmix alpha, enabled if >0
_C.AUG.CUTMIX_MINMAX = None # cutmix min/max ratio, overrides alpha
_C.AUG.MIXUP_PROB = 1.0 # prob of mixup or cutmix when either/both is enabled
_C.AUG.MIXUP_SWITCH_PROB = 0.5 # prob of switching cutmix when both mixup and cutmix enabled
_C.AUG.MIXUP_MODE = 'batch' #how to apply mixup/curmix params, per 'batch', 'pair', or 'elem'
# misc
_C.SAVE = "./output"
_C.TAG = "default"
_C.SAVE_FREQ = 1 # freq to save chpt
_C.REPORT_FREQ = 50 # freq to logging info
_C.VALIDATE_FREQ = 10 # freq to do validation
_C.SEED = 0
_C.EVAL = False # run evaluation only
_C.AMP = False
_C.LOCAL_RANK = 0
_C.NGPUS = -1
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as infile:
yaml_cfg = yaml.load(infile, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(
config, os.path.join(os.path.dirname(cfg_file), cfg)
)
print('merging config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze()
def update_config(config, args):
"""Update config by ArgumentParser
Args:
args: ArgumentParser contains options
Return:
config: updated config
"""
if args.cfg:
_update_config_from_file(config, args.cfg)
config.defrost()
if args.dataset:
config.DATA.DATASET = args.dataset
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
if args.image_size:
config.DATA.IMAGE_SIZE = args.image_size
if args.data_path:
config.DATA.DATA_PATH = args.data_path
if args.ngpus:
config.NGPUS = args.ngpus
if args.eval:
config.EVAL = True
config.DATA.BATCH_SIZE_EVAL = args.batch_size
if args.pretrained:
config.MODEL.PRETRAINED = args.pretrained
if args.resume:
config.MODEL.RESUME = args.resume
if args.last_epoch:
config.TRAIN.LAST_EPOCH = args.last_epoch
if args.amp: # only during training
if config.EVAL is True:
config.AMP = False
#config.freeze()
return config
def get_config(cfg_file=None):
"""Return a clone of config or load from yaml file"""
config = _C.clone()
if cfg_file:
_update_config_from_file(config, cfg_file)
return config
| 30.553763
| 97
| 0.716171
|
a4974215afb79a1aa6fefe2f7a177de04784369e
| 173
|
py
|
Python
|
configs/litehrnet/hyperseg_litehr18_512x1024_160k_cityscapes.py
|
evgeniya-egupova/mmsegmentation
|
3857f19321ad6af41c8a6af364898ee050225f4c
|
[
"Apache-2.0"
] | 3
|
2021-12-21T07:25:13.000Z
|
2022-02-07T01:59:19.000Z
|
configs/litehrnet/hyperseg_litehr18_512x1024_160k_cityscapes.py
|
evgeniya-egupova/mmsegmentation
|
3857f19321ad6af41c8a6af364898ee050225f4c
|
[
"Apache-2.0"
] | 13
|
2021-12-10T15:08:56.000Z
|
2022-03-23T08:58:03.000Z
|
configs/litehrnet/hyperseg_litehr18_512x1024_160k_cityscapes.py
|
evgeniya-egupova/mmsegmentation
|
3857f19321ad6af41c8a6af364898ee050225f4c
|
[
"Apache-2.0"
] | 3
|
2021-11-11T23:16:51.000Z
|
2021-12-08T23:49:29.000Z
|
_base_ = [
'../_base_/models/hyperseg_litehr18.py', '../_base_/datasets/cityscapes.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_cos_160k.py'
]
| 34.6
| 80
| 0.693642
|
0a055f58b8a9a22f7ca382c1a67ee7f7f71b515f
| 6,602
|
py
|
Python
|
terrainbento/boundary_handlers/capture_node_baselevel_handler.py
|
alexmitchell/terrainbento
|
b8f6dd7fd9c96d3a40e0d1f8cd41b1dfe8d0b5da
|
[
"MIT"
] | 18
|
2018-03-06T01:17:12.000Z
|
2022-02-11T16:10:31.000Z
|
terrainbento/boundary_handlers/capture_node_baselevel_handler.py
|
alexmitchell/terrainbento
|
b8f6dd7fd9c96d3a40e0d1f8cd41b1dfe8d0b5da
|
[
"MIT"
] | 105
|
2018-03-05T23:36:05.000Z
|
2021-03-22T01:31:06.000Z
|
terrainbento/boundary_handlers/capture_node_baselevel_handler.py
|
alexmitchell/terrainbento
|
b8f6dd7fd9c96d3a40e0d1f8cd41b1dfe8d0b5da
|
[
"MIT"
] | 7
|
2018-03-13T03:46:38.000Z
|
2021-03-07T01:03:56.000Z
|
# coding: utf8
# !/usr/env/python
"""
**CaptureNodeBaselevelHandler** implements "external" stream capture.
"""
class CaptureNodeBaselevelHandler(object):
"""Turn a closed boundary node into an open, lowering, boundary node.
A **CaptureNodeBaselevelHandler** turns a given node into an open boundary
and lowers its elevation over time. This is meant as a simple approach to
model stream capture external to the modeled basin.
Note that **CaptureNodeBaselevelHandler** increments time at the end of the
**run_one_step** method.
"""
def __init__(
self,
grid,
capture_node=None,
capture_start_time=0,
capture_stop_time=None,
capture_incision_rate=-0.01,
post_capture_incision_rate=None,
**kwargs
):
"""
Parameters
----------
grid : landlab model grid
capture_node : int
Node id of the model grid node that should be captured.
capture_start_time : float, optional
Time at which capture should begin. Default is at onset of model
run.
capture_stop_time : float, optional
Time at which capture ceases. Default is the entire duration of
model run.
capture_incision_rate : float, optional
Rate of capture node elevation change. Units are implied by the
model grids spatial scale and the time units of ``step``. Negative
values mean the outlet lowers. Default value is -0.01.
post_capture_incision_rate : float, optional
Rate of captured node elevation change after capture ceases. Units
are implied by the model grids spatial scale and the time units of
``step``. Negative values mean the outlet lowers. Default value is 0.
Examples
--------
Start by creating a landlab model grid and set its boundary conditions.
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((5, 5))
>>> z = mg.add_zeros("node", "topographic__elevation")
>>> mg.set_closed_boundaries_at_grid_edges(bottom_is_closed=True,
... left_is_closed=True,
... right_is_closed=True,
... top_is_closed=True)
>>> mg.set_watershed_boundary_condition_outlet_id(
... 0, mg.at_node["topographic__elevation"], -9999.)
>>> print(z.reshape(mg.shape))
[[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
Now import the **CaptureNodeBaselevelHandler** and instantiate.
>>> from terrainbento.boundary_handlers import (
... CaptureNodeBaselevelHandler)
>>> bh = CaptureNodeBaselevelHandler(mg,
... capture_node = 3,
... capture_incision_rate = -3.0,
... capture_start_time = 10,
... capture_stop_time = 20,
... post_capture_incision_rate = -0.1)
>>> for _ in range(10):
... bh.run_one_step(1)
The capture has not yet started, so we should expect that the
topography is still all zeros.
>>> print(z.reshape(mg.shape))
[[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
Running forward another 10 time units, we should
see node 3 lower by 30.
>>> for _ in range(10):
... bh.run_one_step(1)
>>> print(z.reshape(mg.shape))
[[ 0. 0. 0. -30. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> bh.model_time
20.0
Now that model time has reached 20, lowering will occur at the post-
capture incision rate. The node should lower by 1 to -31 in the next
10 time units.
>>> for _ in range(10):
... bh.run_one_step(1)
>>> print(z.reshape(mg.shape))
[[ 0. 0. 0. -31. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
"""
self.model_time = 0.0
self.grid = grid
self.z = grid.at_node["topographic__elevation"]
self.node = capture_node
self.start = capture_start_time
self.rate = capture_incision_rate
if capture_stop_time is None:
self.capture_ends = False
else:
self.capture_ends = True
self.stop = capture_stop_time
if post_capture_incision_rate is None:
self.post_capture_incision_rate = 0
else:
self.post_capture_incision_rate = post_capture_incision_rate
self.grid.status_at_node[self.node] = self.grid.BC_NODE_IS_FIXED_VALUE
def run_one_step(self, step):
"""Run **CaptureNodeBaselevelHandler** to update captured node
elevation.
The **run_one_step** method provides a consistent interface to update
the terrainbento boundary condition handlers.
In the **run_one_step** routine, the **CaptureNodeBaselevelHandler**
will determine if capture is occuring and change the elevation of the
captured node based on the amount specified in instantiation.
Note that **CaptureNodeBaselevelHandler** increments time at the end of
the **run_one_step** method.
Parameters
----------
step : float
Duration of model time to advance forward.
"""
# lower the correct amount.
if self.model_time >= self.start:
if self.capture_ends:
if self.model_time < self.stop:
self.z[self.node] += self.rate * step
else:
self.z[self.node] += self.post_capture_incision_rate * step
else:
self.z[self.node] += self.rate * step
# increment model time
self.model_time += step
| 38.608187
| 82
| 0.522872
|
be1003415b8c7bd45a7842dc03d51c91f456d879
| 1,463
|
py
|
Python
|
vsts/vsts/gallery/v4_0/models/review_summary.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
vsts/vsts/gallery/v4_0/models/review_summary.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
vsts/vsts/gallery/v4_0/models/review_summary.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ReviewSummary(Model):
"""ReviewSummary.
:param average_rating: Average Rating
:type average_rating: int
:param rating_count: Count of total ratings
:type rating_count: long
:param rating_split: Split of count accross rating
:type rating_split: list of :class:`RatingCountPerRating <gallery.v4_0.models.RatingCountPerRating>`
"""
_attribute_map = {
'average_rating': {'key': 'averageRating', 'type': 'int'},
'rating_count': {'key': 'ratingCount', 'type': 'long'},
'rating_split': {'key': 'ratingSplit', 'type': '[RatingCountPerRating]'}
}
def __init__(self, average_rating=None, rating_count=None, rating_split=None):
super(ReviewSummary, self).__init__()
self.average_rating = average_rating
self.rating_count = rating_count
self.rating_split = rating_split
| 43.029412
| 104
| 0.570745
|
de05bd5286c801fd6bc56dd9cce961d8da2f363e
| 3,980
|
py
|
Python
|
bottleneck_dist/calculate_wasserstein_distance.py
|
duxiaodan/topological-autoencoders
|
e3c71719942bb50f8c646f7df65bb0f4ba38cb44
|
[
"BSD-3-Clause"
] | 69
|
2020-07-14T15:07:19.000Z
|
2022-03-27T09:33:54.000Z
|
bottleneck_dist/calculate_wasserstein_distance.py
|
duxiaodan/topological-autoencoders
|
e3c71719942bb50f8c646f7df65bb0f4ba38cb44
|
[
"BSD-3-Clause"
] | 6
|
2021-02-02T16:07:49.000Z
|
2022-01-18T08:50:57.000Z
|
bottleneck_dist/calculate_wasserstein_distance.py
|
duxiaodan/topological-autoencoders
|
e3c71719942bb50f8c646f7df65bb0f4ba38cb44
|
[
"BSD-3-Clause"
] | 17
|
2020-07-14T18:40:35.000Z
|
2022-03-17T00:19:48.000Z
|
#!/usr/bin/env python3
import collections
import glob
import os
import subprocess
import sys
import numpy as np
import pandas as pd
batch_size = int(sys.argv[1])
n_iterations = 3
if __name__ == '__main__':
for root, dirs, files in os.walk('./'):
data_sets = dirs
for data_set in data_sets:
original_data = pd.read_csv(
os.path.join(root, data_set, 'data.csv'),
header=None
)
original_data = original_data.values
files = sorted(glob.glob(
os.path.join(root, data_set, '*_latents.csv')
)
)
bottlenecks = collections.defaultdict(list)
for i in range(n_iterations):
# Ensures that we never take more than the number of
# samples, regardless of the batch size parameter.
if original_data.shape[0] < batch_size:
batch_size = original_data.shape[0]
random_indices = np.random.choice(
original_data.shape[0],
batch_size,
replace=False
)
X_sample = original_data[random_indices]
np.savetxt('/tmp/Xw.csv', X_sample, delimiter=' ')
diagram = subprocess.run(
['vietoris_rips',
'-n',
'/tmp/Xw.csv',
'1e8',
'1'],
capture_output=True,
)
diagram = diagram.stdout
diagram = diagram.decode('utf-8')
with open('/tmp/D1w.txt', 'w') as f:
f.write(diagram)
D1 = np.genfromtxt('/tmp/D1w.txt')
for filename in files:
name = os.path.basename(filename)
name = name[:name.find('_')]
latent_space = pd.read_csv(
filename,
header=0
)
latent_space = latent_space[['0', '1']]
latent_space = latent_space.values
Y_sample = latent_space[random_indices]
np.savetxt('/tmp/Yw.csv', Y_sample, delimiter=' ')
diagram = subprocess.run(
['vietoris_rips',
'-n',
'/tmp/Yw.csv',
'1e8',
'1'],
capture_output=True,
)
diagram = diagram.stdout
diagram = diagram.decode('utf-8')
with open('/tmp/D2w.txt', 'w') as f:
f.write(diagram)
D2 = np.genfromtxt('/tmp/D2w.txt')
bottleneck = subprocess.run(
['topological_distance',
'-w',
'-p',
'1',
'/tmp/D1w.txt',
'/tmp/D2w.txt'
],
capture_output=True,
)
bottleneck = bottleneck.stdout
bottleneck = bottleneck.decode('utf-8')
bottleneck = bottleneck.split('\n')[0]
bottleneck = bottleneck.split(' ')
bottleneck = float(bottleneck[1])
bottlenecks[name].append(bottleneck)
#l2 = np.linalg.norm(D1 - D2)
#print(data_set, name, l2)
for name in sorted(bottlenecks.keys()):
print(batch_size,
data_set,
name,
np.mean(bottlenecks[name]),
np.std(bottlenecks[name])
)
sys.stdout.flush()
print('')
| 29.264706
| 70
| 0.41608
|
fadaa8662e4aca8df4a45ed8d280cc08136b732e
| 4,390
|
py
|
Python
|
parser/peass-parser.py
|
ParikhKadam/privilege-escalation-awesome-scripts-suite
|
50cc5fc3d2250d55318ef74157acf4006bb86553
|
[
"MIT"
] | null | null | null |
parser/peass-parser.py
|
ParikhKadam/privilege-escalation-awesome-scripts-suite
|
50cc5fc3d2250d55318ef74157acf4006bb86553
|
[
"MIT"
] | null | null | null |
parser/peass-parser.py
|
ParikhKadam/privilege-escalation-awesome-scripts-suite
|
50cc5fc3d2250d55318ef74157acf4006bb86553
|
[
"MIT"
] | 1
|
2021-10-03T16:37:54.000Z
|
2021-10-03T16:37:54.000Z
|
#!/usr/bin/env python3
import sys
import re
import json
# Pattern to identify main section titles
TITLE1_PATTERN = r"════════════════════════════════════╣"
TITLE2_PATTERN = r"╔══════════╣"
TITLE3_PATTERN = r"══╣"
INFO_PATTERN = r"╚ "
TITLE_CHARS = ['═', '╔', '╣', '╚']
# Patterns for colors
## The order is important, the first string colored with a color will be the one selected (the same string cannot be colored with different colors)
COLORS = {
"REDYELLOW": [r"\x1b\[1;31;103m"],
"RED": [r"\x1b\[1;31m"],
"GREEN": [r"\x1b\[1;32m"],
"YELLOW": [r"\x1b\[1;33m"],
"BLUE": [r"\x1b\[1;34m"],
"LIGHT_MAGENTA": [r"\x1b\[1;95m"],
"MAGENTA": [r"\x1b\[1;35m"],
"CYAN": [r"\x1b\[1;36m"],
"LIGHT_CYAN": [r"\x1b\[1;96m"],
"LIGHT_GREY": [r"\x1b\[1;37m"],
"DARKGREY": [r"\x1b\[1;90m"],
}
# Final JSON structure
FINAL_JSON = {}
#Constructing the structure
C_SECTION = FINAL_JSON
C_MAIN_SECTION = FINAL_JSON
C_2_SECTION = FINAL_JSON
C_3_SECTION = FINAL_JSON
def is_section(line: str, pattern: str) -> bool:
"""Returns a boolean
Checks if line matches the pattern and returns True or False
"""
return line.find(pattern) > -1
def get_colors(line: str) -> dict:
"""Given a line return the colored strings"""
colors = {}
for c,regexs in COLORS.items():
colors[c] = []
for reg in regexs:
for re_found in re.findall(reg+"(.+?)\x1b|$", line):
re_found = clean_colors(re_found.strip())
#Avoid having the same color for the same string
if re_found and not any(re_found in values for values in colors.values()):
colors[c].append(re_found)
if not colors[c]:
del colors[c]
return colors
def clean_title(line: str) -> str:
"""Given a title clean it"""
for c in TITLE_CHARS:
line = line.replace(c,"")
line = line.encode("ascii", "ignore").decode() #Remove non ascii chars
line = line.strip()
return line
def clean_colors(line: str) -> str:
"""Given a line clean the colors inside of it"""
for reg in re.findall(r'\x1b[^ ]+\dm', line):
line = line.replace(reg,"")
line = line.replace('\x1b',"") #Sometimes that byte stays
line = line.strip()
return line
def parse_title(line: str) -> str:
""" Given a title, clean it"""
return clean_colors(clean_title(line))
def parse_line(line: str):
"""Parse the given line adding it to the FINAL_JSON structure"""
global FINAL_JSON, C_SECTION, C_MAIN_SECTION, C_2_SECTION, C_3_SECTION
if is_section(line, TITLE1_PATTERN):
title = parse_title(line)
FINAL_JSON[title] = { "sections": {}, "lines": [], "infos": [] }
C_MAIN_SECTION = FINAL_JSON[title]
C_SECTION = C_MAIN_SECTION
elif is_section(line, TITLE2_PATTERN):
title = parse_title(line)
C_MAIN_SECTION["sections"][title] = { "sections": {}, "lines": [], "infos": [] }
C_2_SECTION = C_MAIN_SECTION["sections"][title]
C_SECTION = C_2_SECTION
elif is_section(line, TITLE3_PATTERN):
title = parse_title(line)
C_2_SECTION["sections"][title] = { "sections": {}, "lines": [], "infos": [] }
C_3_SECTION = C_2_SECTION["sections"][title]
C_SECTION = C_3_SECTION
elif is_section(line, INFO_PATTERN):
title = parse_title(line)
C_SECTION["infos"].append(title)
#If here, then it's text
else:
#If no main section parsed yet, pass
if C_SECTION == {}:
return
C_SECTION["lines"].append({
"raw_text": line,
"clean_text": clean_colors(line),
"colors": get_colors(line)
})
def main():
for line in open(OUTPUT_PATH, 'r').readlines():
line = line.strip()
if not line or not clean_colors(line): #Remove empty lines or lines just with colors hex
continue
parse_line(line)
with open(JSON_PATH, "w") as f:
json.dump(FINAL_JSON, f)
# Start execution
if __name__ == "__main__":
try:
OUTPUT_PATH = sys.argv[1]
JSON_PATH = sys.argv[2]
except IndexError as err:
print("Error: Please pass the peas.out file and the path to save the json\n./peas-parser.py <output_file> <json_file.json>")
sys.exit(1)
main()
| 28.141026
| 147
| 0.592255
|
b4ce35e7a355a54a699e441bc348c3df53fe8305
| 574
|
py
|
Python
|
model/group.py
|
lukasz-nieweglowski86/py_pol_23
|
0831def794a5c8f849a6538799bb6c3cfd961bd8
|
[
"Apache-2.0"
] | null | null | null |
model/group.py
|
lukasz-nieweglowski86/py_pol_23
|
0831def794a5c8f849a6538799bb6c3cfd961bd8
|
[
"Apache-2.0"
] | null | null | null |
model/group.py
|
lukasz-nieweglowski86/py_pol_23
|
0831def794a5c8f849a6538799bb6c3cfd961bd8
|
[
"Apache-2.0"
] | null | null | null |
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return "%s: %s: %s: %s " % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 24.956522
| 103
| 0.578397
|
b3feb04796b39c43eb95bbe4a450c1d97d1c4dfa
| 11,538
|
py
|
Python
|
grr/server/grr_response_server/sequential_collection_test.py
|
nkrios/grr
|
399e078ed522bf0555a2666fb086aa7809d54971
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/sequential_collection_test.py
|
nkrios/grr
|
399e078ed522bf0555a2666fb086aa7809d54971
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/sequential_collection_test.py
|
nkrios/grr
|
399e078ed522bf0555a2666fb086aa7809d54971
|
[
"Apache-2.0"
] | 1
|
2020-07-09T01:08:48.000Z
|
2020-07-09T01:08:48.000Z
|
#!/usr/bin/env python
"""Tests for SequentialCollection and related subclasses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import threading
import time
from absl import app
from future.builtins import range
from future.utils import iterkeys
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_server import data_store
from grr_response_server import sequential_collection
from grr.test_lib import aff4_test_lib
from grr.test_lib import test_lib
class TestSequentialCollection(sequential_collection.SequentialCollection):
RDF_TYPE = rdfvalue.RDFInteger
class SequentialCollectionTest(aff4_test_lib.AFF4ObjectTest):
def _TestCollection(self, collection_id):
return TestSequentialCollection(rdfvalue.RDFURN(collection_id))
def testAddScan(self):
collection = self._TestCollection("aff4:/sequential_collection/testAddScan")
with data_store.DB.GetMutationPool() as pool:
for i in range(100):
collection.Add(rdfvalue.RDFInteger(i), mutation_pool=pool)
i = 0
last_ts = 0
for (ts, v) in collection.Scan():
last_ts = ts
self.assertEqual(i, v)
i += 1
with data_store.DB.GetMutationPool() as pool:
for j in range(100):
collection.Add(rdfvalue.RDFInteger(j + 100), mutation_pool=pool)
for (ts, v) in collection.Scan(after_timestamp=last_ts):
self.assertEqual(i, v)
i += 1
self.assertEqual(i, 200)
def testDuplicateTimestamps(self):
collection = self._TestCollection(
"aff4:/sequential_collection/testDuplicateTimestamps")
t = rdfvalue.RDFDatetime.Now()
with data_store.DB.GetMutationPool() as pool:
for i in range(10):
ts = collection.Add(
rdfvalue.RDFInteger(i), timestamp=t, mutation_pool=pool)
self.assertEqual(ts[0], t)
i = 0
for (ts, _) in collection.Scan():
self.assertEqual(ts, t)
i += 1
self.assertEqual(i, 10)
def testMultiResolve(self):
collection = self._TestCollection("aff4:/sequential_collection/testAddScan")
records = []
with data_store.DB.GetMutationPool() as pool:
for i in range(100):
ts, suffix = collection.Add(rdfvalue.RDFInteger(i), mutation_pool=pool)
records.append(
data_store.Record(
queue_id=collection.collection_id,
timestamp=ts,
suffix=suffix,
subpath="Results",
value=None))
even_results = sorted([r for r in collection.MultiResolve(records[::2])])
self.assertLen(even_results, 50)
self.assertEqual(even_results[0], 0)
self.assertEqual(even_results[49], 98)
def testDelete(self):
collection = self._TestCollection("aff4:/sequential_collection/testDelete")
with data_store.DB.GetMutationPool() as pool:
for i in range(100):
collection.Add(rdfvalue.RDFInteger(i), mutation_pool=pool)
collection.Delete()
collection = self._TestCollection("aff4:/sequential_collection/testDelete")
for _ in collection.Scan():
self.fail("Deleted and recreated SequentialCollection should be empty")
class TestIndexedSequentialCollection(
sequential_collection.IndexedSequentialCollection):
RDF_TYPE = rdfvalue.RDFInteger
class IndexedSequentialCollectionTest(aff4_test_lib.AFF4ObjectTest):
def _TestCollection(self, collection_id):
return TestIndexedSequentialCollection(rdfvalue.RDFURN(collection_id))
def setUp(self):
super(IndexedSequentialCollectionTest, self).setUp()
# Create a new background thread for each test. In the default
# configuration, this thread can sleep for quite a long time and
# might therefore be unavailable in further tests so we just
# create a new one for each test we run.
biu = sequential_collection.BackgroundIndexUpdater()
try:
sequential_collection.BACKGROUND_INDEX_UPDATER.ExitNow()
except AttributeError:
pass
sequential_collection.BACKGROUND_INDEX_UPDATER = biu
self.worker_thread = threading.Thread(target=biu.UpdateLoop)
self.worker_thread.daemon = True
self.worker_thread.start()
def tearDown(self):
super(IndexedSequentialCollectionTest, self).tearDown()
sequential_collection.BACKGROUND_INDEX_UPDATER.ExitNow()
self.worker_thread.join()
def testAddGet(self):
collection = self._TestCollection("aff4:/sequential_collection/testAddGet")
self.assertEqual(collection.CalculateLength(), 0)
with data_store.DB.GetMutationPool() as pool:
for i in range(100):
collection.Add(rdfvalue.RDFInteger(i), mutation_pool=pool)
for i in range(100):
self.assertEqual(collection[i], i)
self.assertEqual(collection.CalculateLength(), 100)
self.assertLen(collection, 100)
def testStaticAddGet(self):
aff4_path = "aff4:/sequential_collection/testStaticAddGet"
collection = self._TestCollection(aff4_path)
self.assertEqual(collection.CalculateLength(), 0)
with data_store.DB.GetMutationPool() as pool:
for i in range(100):
TestIndexedSequentialCollection.StaticAdd(
rdfvalue.RDFURN(aff4_path),
rdfvalue.RDFInteger(i),
mutation_pool=pool)
for i in range(100):
self.assertEqual(collection[i], i)
self.assertEqual(collection.CalculateLength(), 100)
self.assertLen(collection, 100)
def testIndexCreate(self):
spacing = 10
with utils.Stubber(sequential_collection.IndexedSequentialCollection,
"INDEX_SPACING", spacing):
urn = "aff4:/sequential_collection/testIndexCreate"
collection = self._TestCollection(urn)
# TODO(amoser): Without using a mutation pool, this test is really
# slow on MySQL data store.
with data_store.DB.GetMutationPool() as pool:
for i in range(10 * spacing):
collection.StaticAdd(urn, rdfvalue.RDFInteger(i), mutation_pool=pool)
# It is too soon to build an index, check that we don't.
self.assertEqual(collection._index, None)
self.assertEqual(collection.CalculateLength(), 10 * spacing)
self.assertEqual(list(iterkeys(collection._index)), [0])
now = time.time() * 1e6
twenty_seconds_ago = (time.time() - 20) * 1e6
# Push the clock forward 10m, and we should build an index on access.
with test_lib.FakeTime(rdfvalue.RDFDatetime.Now() +
rdfvalue.Duration("10m")):
# Read from start doesn't rebuild index (lazy rebuild)
_ = collection[0]
self.assertEqual(list(iterkeys(collection._index)), [0])
self.assertEqual(collection.CalculateLength(), 10 * spacing)
self.assertEqual(
sorted(iterkeys(collection._index)),
[i * spacing for i in range(10)])
for index in collection._index:
if not index:
continue
timestamp, suffix = collection._index[index]
self.assertLessEqual(twenty_seconds_ago, timestamp)
self.assertLessEqual(timestamp, now)
self.assertBetween(suffix, 0, 0xFFFFFF)
# Now check that the index was persisted to aff4 by re-opening
# and checking that a read from head does load full index
# (optimistic load):
collection = self._TestCollection(
"aff4:/sequential_collection/testIndexCreate")
self.assertEqual(collection._index, None)
_ = collection[0]
self.assertEqual(
sorted(iterkeys(collection._index)), [i * spacing for i in range(10)])
for index in collection._index:
if not index:
continue
timestamp, suffix = collection._index[index]
self.assertLessEqual(twenty_seconds_ago, timestamp)
self.assertLessEqual(timestamp, now)
self.assertBetween(suffix, 0, 0xFFFFFF)
def testIndexedReads(self):
spacing = 10
with utils.Stubber(sequential_collection.IndexedSequentialCollection,
"INDEX_SPACING", spacing):
urn = "aff4:/sequential_collection/testIndexedReads"
collection = self._TestCollection(urn)
data_size = 4 * spacing
# TODO(amoser): Without using a mutation pool, this test is really
# slow on MySQL data store.
with data_store.DB.GetMutationPool() as pool:
for i in range(data_size):
collection.StaticAdd(
rdfvalue.RDFURN(urn), rdfvalue.RDFInteger(i), mutation_pool=pool)
with test_lib.FakeTime(rdfvalue.RDFDatetime.Now() +
rdfvalue.Duration("10m")):
for i in range(data_size - 1, data_size - 20, -1):
self.assertEqual(collection[i], i)
for i in [spacing - 1, spacing, spacing + 1]:
self.assertEqual(collection[i], i)
for i in range(data_size - spacing + 5, data_size - spacing - 5, -1):
self.assertEqual(collection[i], i)
def testListing(self):
test_urn = "aff4:/sequential_collection/testIndexedListing"
collection = self._TestCollection(test_urn)
timestamps = []
with data_store.DB.GetMutationPool() as pool:
for i in range(100):
timestamps.append(
collection.Add(rdfvalue.RDFInteger(i), mutation_pool=pool))
with test_lib.Instrument(sequential_collection.SequentialCollection,
"Scan") as scan:
self.assertLen(list(collection), 100)
# Listing should be done using a single scan but there is another one
# for calculating the length.
self.assertEqual(scan.call_count, 2)
def testAutoIndexing(self):
indexing_done = threading.Event()
def UpdateIndex(_):
indexing_done.set()
# To reduce the time for the test to run, reduce the delays, so that
# indexing should happen instantaneously.
isq = sequential_collection.IndexedSequentialCollection
biu = sequential_collection.BACKGROUND_INDEX_UPDATER
with utils.MultiStubber((biu, "INDEX_DELAY", 0),
(isq, "INDEX_WRITE_DELAY", rdfvalue.Duration("0s")),
(isq, "INDEX_SPACING", 8),
(isq, "UpdateIndex", UpdateIndex)):
urn = "aff4:/sequential_collection/testAutoIndexing"
collection = self._TestCollection(urn)
# TODO(amoser): Without using a mutation pool, this test is really
# slow on MySQL data store.
with data_store.DB.GetMutationPool() as pool:
for i in range(2048):
collection.StaticAdd(
rdfvalue.RDFURN(urn), rdfvalue.RDFInteger(i), mutation_pool=pool)
# Wait for the updater thread to finish the indexing.
if not indexing_done.wait(timeout=10):
self.fail("Indexing did not finish in time.")
class GeneralIndexedCollectionTest(aff4_test_lib.AFF4ObjectTest):
def testAddGet(self):
collection = sequential_collection.GeneralIndexedCollection(
rdfvalue.RDFURN("aff4:/sequential_collection/testAddGetIndexed"))
with data_store.DB.GetMutationPool() as pool:
collection.Add(rdfvalue.RDFInteger(42), mutation_pool=pool)
collection.Add(
rdfvalue.RDFString("the meaning of life"), mutation_pool=pool)
self.assertEqual(collection[0].__class__, rdfvalue.RDFInteger)
self.assertEqual(collection[0], 42)
self.assertEqual(collection[1].__class__, rdfvalue.RDFString)
self.assertEqual(collection[1], "the meaning of life")
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| 37.219355
| 80
| 0.690414
|
5e316d35aa4cc9c98548b2d0808738baef58daa0
| 66,074
|
py
|
Python
|
tensorflow/python/tpu/tpu_embedding_v2.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 1
|
2020-08-06T03:25:47.000Z
|
2020-08-06T03:25:47.000Z
|
tensorflow/python/tpu/tpu_embedding_v2.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 5
|
2020-07-17T17:36:44.000Z
|
2020-08-05T20:18:02.000Z
|
tensorflow/python/tpu/tpu_embedding_v2.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 3
|
2017-05-17T08:44:52.000Z
|
2021-08-18T05:37:12.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mid level API for TPU Embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
from absl import logging
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import sharded_variable
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_embedding_v2_utils
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training.saving import saveable_hook
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
_HOOK_KEY = "TPUEmbedding_saveable"
_NAME_KEY = "_tpu_embedding_layer"
# TODO(bfontain): Cleanup and remove this once there is an implementation of
# sharded variables that can be used in the PSStrategy with optimizers.
# We implement just enough of the of a tf.Variable so that this could be passed
# to an optimizer.
class TPUShardedVariable(sharded_variable.ShardedVariable):
"""A ShardedVariable class for TPU."""
@property
def _in_graph_mode(self):
return self.variables[0]._in_graph_mode # pylint: disable=protected-access
@property
def _unique_id(self):
return self.variables[0]._unique_id # pylint: disable=protected-access
@property
def _distribute_strategy(self):
return self.variables[0]._distribute_strategy # pylint: disable=protected-access
@property
def _shared_name(self):
return self._name
def _add_key_attr(op, name):
op._set_attr(_NAME_KEY, attr_value_pb2.AttrValue(s=compat.as_bytes(name))) # pylint: disable=protected-access
@tf_export("tpu.experimental.embedding.TPUEmbedding")
class TPUEmbedding(tracking.AutoTrackable):
"""The TPUEmbedding mid level API.
NOTE: When instantiated under a TPUStrategy, this class can only be created
once per call to `tf.tpu.experimental.initialize_tpu_system`. If you wish to
re-initialize the embedding engine you must re-initialize the tpu as well.
Doing this will clear any variables from TPU, so ensure you have checkpointed
before you do this. If a further instances of the class are needed,
set the `initialize_tpu_embedding` argument to `False`.
This class can be used to support training large embeddings on TPU. When
creating an instance of this class, you must specify the complete set of
tables and features you expect to lookup in those tables. See the
documentation of `tf.tpu.experimental.embedding.TableConfig` and
`tf.tpu.experimental.embedding.FeatureConfig` for more details on the complete
set of options. We will cover the basic usage here.
NOTE: multiple `FeatureConfig` objects can use the same `TableConfig` object,
allowing different features to share the same table:
```python
table_config_one = tf.tpu.experimental.embedding.TableConfig(
vocabulary_size=...,
dim=...)
table_config_two = tf.tpu.experimental.embedding.TableConfig(
vocabulary_size=...,
dim=...)
feature_config = {
'feature_one': tf.tpu.experimental.embedding.FeatureConfig(
table=table_config_one),
'feature_two': tf.tpu.experimental.embedding.FeatureConfig(
table=table_config_one),
'feature_three': tf.tpu.experimental.embedding.FeatureConfig(
table=table_config_two)}
```
There are two modes under which the `TPUEmbedding` class can used. This
depends on if the class was created under a `TPUStrategy` scope or not.
Under `TPUStrategy`, we allow access to the method `enqueue`, `dequeue` and
`apply_gradients`. We will show examples below of how to use these to train
and evaluate your model. Under CPU, we only access to the `embedding_tables`
property which allow access to the embedding tables so that you can use them
to run model evaluation/prediction on CPU.
First lets look at the `TPUStrategy` mode. Initial setup looks like:
```python
strategy = tf.distribute.TPUStrategy(...)
with strategy.scope():
embedding = tf.tpu.experimental.embedding.TPUEmbedding(
feature_config=feature_config,
optimizer=tf.tpu.experimental.embedding.SGD(0.1))
```
When creating a distributed dataset that is to be passed to the enqueue
operation a special input option must be specified:
```python
distributed_dataset = (
strategy.experimental_distribute_datasets_from_function(
dataset_fn=...,
options=tf.distribute.InputOptions(
experimental_prefetch_to_device=False))
dataset_iterator = iter(distributed_dataset)
```
NOTE: All batches passed to the layer must have the same batch size for each
input, more over once you have called the layer with one batch size all
subsequent calls must use the same batch_size. In the event that the batch
size cannot be automatically determined by the enqueue method, you must call
the build method with the batch size to initialize the layer.
To use this API on TPU you should use a custom training loop. Below is an
example of a training and evaluation step:
```python
@tf.function
def training_step(dataset_iterator, num_steps):
def tpu_step(tpu_features):
with tf.GradientTape() as tape:
activations = embedding.dequeue()
tape.watch(activations)
model_output = model(activations)
loss = ... # some function of labels and model_output
embedding_gradients = tape.gradient(loss, activations)
embedding.apply_gradients(embedding_gradients)
# Insert your model gradient and optimizer application here
for _ in tf.range(num_steps):
embedding_features, tpu_features = next(dataset_iterator)
embedding.enqueue(embedding_features, training=True)
strategy.run(tpu_step, args=(embedding_features, ))
@tf.function
def evalution_step(dataset_iterator, num_steps):
def tpu_step(tpu_features):
activations = embedding.dequeue()
model_output = model(activations)
# Insert your evaluation code here.
for _ in tf.range(num_steps):
embedding_features, tpu_features = next(dataset_iterator)
embedding.enqueue(embedding_features, training=False)
strategy.run(tpu_step, args=(embedding_features, ))
```
NOTE: The calls to `enqueue` have `training` set to `True` when
`embedding.apply_gradients` is used and set to `False` when
`embedding.apply_gradients` is not present in the function. If you don't
follow this pattern you may cause an error to be raised or the tpu may
deadlock.
In the above examples, we assume that the user has a dataset which returns
a tuple where the first element of the tuple matches the structure of what
was passed as the `feature_config` argument to the object initializer. Also we
utilize `tf.range` to get a `tf.while_loop` in order to increase performance.
When checkpointing your model, you should include your
`tf.tpu.experimental.embedding.TPUEmbedding` object in the checkpoint. It is a
trackable object and saving it will save the embedding tables and their
optimizer slot variables:
```python
checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)
checkpoint.save(...)
```
On CPU, only the `embedding_table` property is usable. This will allow you to
restore a checkpoint to the object and have access to the table variables:
```python
model = model_fn(...)
embedding = tf.tpu.experimental.embedding.TPUEmbedding(
feature_config=feature_config,
batch_size=1024,
optimizer=tf.tpu.experimental.embedding.SGD(0.1))
checkpoint = tf.train.Checkpoint(model=model, embedding=embedding)
checkpoint.restore(...)
tables = embedding.embedding_tables
```
You can now use table in functions like `tf.nn.embedding_lookup` to perform
your embedding lookup and pass to your model.
"""
def __init__(self, feature_config, optimizer,
pipeline_execution_with_tensor_core=False):
"""Creates the TPUEmbedding mid level API object.
```python
strategy = tf.distribute.TPUStrategy(...)
with strategy.scope():
embedding = tf.tpu.experimental.embedding.TPUEmbedding(
feature_config=tf.tpu.experimental.embedding.FeatureConfig(
table=tf.tpu.experimental.embedding.TableConfig(
dim=...,
vocabulary_size=...)))
```
Args:
feature_config: A nested structure of
`tf.tpu.experimental.embedding.FeatureConfig` configs.
optimizer: An instance of one of `tf.tpu.experimental.embedding.SGD`,
`tf.tpu.experimental.embedding.Adagrad` or
`tf.tpu.experimental.embedding.Adam`. When not created under
TPUStrategy may be set to None to avoid the creation of the optimizer
slot variables, useful for optimizing memory consumption when exporting
the model for serving where slot variables aren't needed.
pipeline_execution_with_tensor_core: If True, the TPU embedding
computations will overlap with the TensorCore computations (and hence
will be one step old). Set to True for improved performance.
Raises:
ValueError: If optimizer is not one of tf.tpu.experimental.embedding.(SGD,
Adam or Adagrad) or None when created under a TPUStrategy.
"""
self._strategy = distribution_strategy_context.get_strategy()
self._using_tpu = isinstance(self._strategy, (tpu_strategy.TPUStrategy,
tpu_strategy.TPUStrategyV2))
self._pipeline_execution_with_tensor_core = (
pipeline_execution_with_tensor_core)
self._feature_config = feature_config
# The TPU embedding ops are slightly inconsistent with how they refer to
# tables:
# * The enqueue op takes a parallel list of tensors for input, one of those
# is the table id for the feature which matches the integer index of the
# table in the proto created by _create_config_proto().
# * The recv_tpu_embedding_activations op emits lookups per table in the
# order from the config proto.
# * The send_tpu_embedding_gradients expects input tensors to be per table
# in the same order as the config proto.
# * Per optimizer load and retrieve ops are specified per table and take the
# table name rather than the table id.
# Thus we must fix a common order to tables and ensure they have unique
# names.
# Set table order here
self._table_config = list(
{feature.table for feature in nest.flatten(feature_config)})
# Ensure tables have unique names. Also error check the optimizer as we
# specifically don't do that in the TableConfig class to allow high level
# APIs that are built on this to use strings/other classes to represent
# optimizers (before they are passed to this class).
table_names = []
for i, table in enumerate(self._table_config):
if table.optimizer is None:
# TODO(bfontain) Should we allow some sort of optimizer merging here?
table.optimizer = optimizer
if ((table.optimizer is not None or self._using_tpu) and
not isinstance(table.optimizer, tpu_embedding_v2_utils._Optimizer)): # pylint: disable=protected-access
raise ValueError("{} is an unsupported optimizer class. Please pass an "
"instance of one of the optimizer classes under "
"tf.tpu.experimental.embedding.".format(
type(table.optimizer)))
if table.name is None:
table.name = "table_{}".format(i)
if table.name in table_names:
raise ValueError("Multiple tables with name {} found.".format(
table.name))
table_names.append(table.name)
if self._using_tpu:
# Extract a list of callable learning rates also in fixed order. Each
# table in the confix proto will get a index into this list and we will
# pass this list in the same order after evaluation to the
# send_tpu_embedding_gradients op.
self._dynamic_learning_rates = list({
table.optimizer.learning_rate for table in self._table_config if
callable(table.optimizer.learning_rate)})
# We need to list of host devices for the load/retrieve operations.
self._hosts = get_list_of_hosts(self._strategy)
self._built = False
def build(self, per_replica_batch_size=None):
"""Create the underlying variables and initializes the TPU for embeddings.
This method creates the underlying variables (including slot variables). If
created under a TPUStrategy, this will also initialize the TPU for
embeddings.
This function will automatically get called by enqueue, which will try to
determine your batch size automatically. If this fails, you must manually
call this method before you call enqueue.
Args:
per_replica_batch_size: The per replica batch size that you intend to use.
Note that is fixed and the same batch size must be used for both
training and evaluation. If you want to calculate this from the global
batch size, you can use `num_replicas_in_sync` property of your strategy
object. May be set to None if not created under a TPUStrategy.
Raises:
ValueError: If per_replica_batch_size is None and object was created in a
TPUStrategy scope.
"""
if self._built:
return
if self._using_tpu:
if per_replica_batch_size is None:
raise ValueError("You must specify a per_replica_batch_size when "
"calling build if object is created under a "
"TPUStrategy.")
self._batch_size = per_replica_batch_size
self._config_proto = self._create_config_proto()
logging.info("Initializing TPU Embedding engine with config: %s",
self._config_proto)
@def_function.function
def load_config():
tpu.initialize_system_for_tpu_embedding(self._config_proto)
load_config()
logging.info("Done initializing TPU Embedding engine.")
# Create and load variables and slot variables into the TPU.
# Note that this is a dict of dicts. Keys to the first dict are table names.
# We would prefer to use TableConfigs, but then these variables won't be
# properly tracked by the tracking API.
self._variables = self._create_variables_and_slots()
if self._using_tpu:
self._load_variables()
self._built = True
def _maybe_build(self, batch_size):
if not self._built:
# This can be called while tracing a function, so we wrap the
# initialization code with init_scope so it runs eagerly, this means that
# it will not be included the function graph generated by tracing so that
# we can be sure that we only initialize the TPU for embeddings exactly
# once.
with ops.init_scope():
self.build(batch_size)
@property
def embedding_tables(self):
"""Returns a dict of embedding tables, keyed by `TableConfig`.
This property only works when the `TPUEmbedding` object is created under a
non-TPU strategy. This is intended to be used to for CPU based lookup when
creating a serving checkpoint.
Returns:
A dict of embedding tables, keyed by `TableConfig`.
Raises:
RuntimeError: If object was created under a `TPUStrategy`.
"""
# We don't support returning tables on TPU due to their sharded nature and
# the fact that when using a TPUStrategy:
# 1. Variables are stale and are only updated when a checkpoint is made.
# 2. Updating the variables won't affect the actual tables on the TPU.
if self._using_tpu:
raise RuntimeError("Unable to retrieve embedding tables when using a TPU "
"strategy. If you need access, save your model, "
"create this object under a CPU strategy and restore.")
self._maybe_build(None)
# Only return the tables and not the slot variables. On CPU this are honest
# tf.Variables.
return {table: self._variables[table.name]["parameters"]
for table in self._table_config}
def _create_config_proto(self):
"""Creates the TPUEmbeddingConfiguration proto.
This proto is used to initialize the TPU embedding engine.
Returns:
A TPUEmbeddingConfiguration proto.
"""
config_proto = tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration()
# There are several things that need to be computed here:
# 1. Each table has a num_features, which corresponds to the number of
# output rows per example for this table. Sequence features count for
# their maximum sequence length.
# 2. Learning rate index: the index of the dynamic learning rate for this
# table (if it exists) in the list we created at initialization.
# We don't simply create one learning rate index per table as this has
# extremely bad performance characteristics. The more separate
# optimization configurations we have, the worse the performance will be.
num_features = {table: 0 for table in self._table_config}
for feature in nest.flatten(self._feature_config):
num_features[feature.table] += (1 if feature.max_sequence_length == 0
else feature.max_sequence_length)
# Map each callable dynamic learning rate to its in index in the list.
learning_rate_index = {r: i for i, r in enumerate(
self._dynamic_learning_rates)}
for table in self._table_config:
table_descriptor = config_proto.table_descriptor.add()
table_descriptor.name = table.name
# For small tables, we pad to the number of hosts so that at least one
# id will be assigned to each host.
table_descriptor.vocabulary_size = max(table.vocabulary_size,
self._strategy.extended.num_hosts)
table_descriptor.dimension = table.dim
table_descriptor.num_features = num_features[table]
parameters = table_descriptor.optimization_parameters
# We handle the learning rate separately here and don't allow the
# optimization class to handle this, as it doesn't know about dynamic
# rates.
if callable(table.optimizer.learning_rate):
parameters.learning_rate.dynamic.tag = (
learning_rate_index[table.optimizer.learning_rate])
else:
parameters.learning_rate.constant = table.optimizer.learning_rate
# Use optimizer to handle the rest of the parameters.
table.optimizer._set_optimization_parameters(parameters) # pylint: disable=protected-access
# Always set mode to training, we override the mode during enqueue.
config_proto.mode = (
tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.TRAINING)
config_proto.batch_size_per_tensor_core = self._batch_size
config_proto.num_hosts = self._strategy.extended.num_hosts
config_proto.num_tensor_cores = self._strategy.num_replicas_in_sync
# TODO(bfontain): Allow users to pick MOD for the host sharding.
config_proto.sharding_strategy = (
tpu_embedding_configuration_pb2.TPUEmbeddingConfiguration.DIV_DEFAULT)
config_proto.pipeline_execution_with_tensor_core = (
self._pipeline_execution_with_tensor_core)
return config_proto
def _compute_per_table_gradients(self, gradients):
"""Computes a dict of lists of gradients, keyed by table name.
Args:
gradients: A nested structure of Tensors (and Nones) with the same
structure as the feature config.
Returns:
A dict of lists of tensors, keyed by the table names, containing the
gradients in the correct order with None gradients repalaced by zeros.
"""
nest.assert_same_structure(self._feature_config, gradients)
per_table_gradients = {table: [] for table in self._table_config}
for (path, gradient), feature in zip(
nest.flatten_with_joined_string_paths(gradients),
nest.flatten(self._feature_config)):
if gradient is not None and not isinstance(gradient, ops.Tensor):
raise ValueError(
"Found {} at path {} in gradients. Expected Tensor.".format(
type(gradient), path))
# Expected tensor shape differs for sequence and non-sequence features.
if feature.max_sequence_length > 0:
shape = [self._batch_size, feature.max_sequence_length,
feature.table.dim]
else:
shape = [self._batch_size, feature.table.dim]
if gradient is not None:
if gradient.shape != shape:
raise ValueError("Found gradient of shape {} at path {}. Expected "
"shape {}.".format(gradient.shape, path, shape))
# We expand dims on non-sequence features so that all features are
# of rank 3 and we can concat on axis=1.
if len(shape) == 2:
gradient = array_ops.expand_dims(gradient, axis=1)
else:
# No gradient for this feature, since we must give a gradient for all
# features, pass in a zero tensor here. Note that this is not correct
# for all optimizers.
logging.warn("No gradient passed for feature %s, sending zero "
"gradient. This may not be correct behavior for certain "
"optimizers like Adam.", path)
# Create a shape to mimic the expand_dims above for non-sequence
# features.
if len(shape) == 2:
shape = [shape[0], 1, shape[1]]
gradient = array_ops.zeros(shape, dtype=dtypes.float32)
per_table_gradients[feature.table].append(gradient)
return per_table_gradients
def apply_gradients(self, gradients, name=None):
"""Applies the gradient update to the embedding tables.
If a gradient of `None` is passed in any position of the nested structure,
then an gradient update with a zero gradient is applied for that feature.
For optimizers like SGD or Adagrad, this is the same as applying no update
at all. For lazy Adam and other sparsely applied optimizers with decay,
ensure you understand the effect of applying a zero gradient.
```python
strategy = tf.distribute.TPUStrategy(...)
with strategy.scope():
embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)
distributed_dataset = (
strategy.experimental_distribute_datasets_from_function(
dataset_fn=...,
options=tf.distribute.InputOptions(
experimental_prefetch_to_device=False))
dataset_iterator = iter(distributed_dataset)
@tf.function
def training_step():
def tpu_step(tpu_features):
with tf.GradientTape() as tape:
activations = embedding.dequeue()
tape.watch(activations)
loss = ... # some computation involving activations
embedding_gradients = tape.gradient(loss, activations)
embedding.apply_gradients(embedding_gradients)
embedding_features, tpu_features = next(dataset_iterator)
embedding.enqueue(embedding_features, training=True)
strategy.run(tpu_step, args=(embedding_features, ))
training_step()
```
Args:
gradients: A nested structure of gradients, with structure matching the
`feature_config` passed to this object.
name: A name for the underlying op.
Raises:
RuntimeError: If called when object wasn't created under a `TPUStrategy`
or if not built (either by manually calling build or calling enqueue).
ValueError: If a non-`tf.Tensor` non-`None` gradient is passed in, or a
`tf.Tensor` of the incorrect shape is passed in. Also if
the size of any sequence in `gradients` does not match corresponding
sequence in `feature_config`.
TypeError: If the type of any sequence in `gradients` does not match
corresponding sequence in `feature_config`.
"""
if not self._using_tpu:
raise RuntimeError("apply_gradients is not valid when TPUEmbedding "
"object is not created under a TPUStrategy.")
if not self._built:
raise RuntimeError("apply_gradients called on unbuilt TPUEmbedding "
"object. Please either call enqueue first or manually "
"call the build method.")
# send_tpu_embedding_gradients requires per table gradient, if we only have
# one feature per table this isn't an issue. When multiple features share
# the same table, the order of the features in per table tensor returned by
# recv_tpu_embedding_activations matches the order in which they were passed
# to enqueue.
# In all three places, we use the fixed order given by nest.flatten to have
# a consistent feature order.
# First construct a dict of tensors one for each table.
per_table_gradients = self._compute_per_table_gradients(gradients)
# Now that we have a list of gradients we can compute a list of gradients
# in the fixed order of self._table_config which interleave the gradients of
# the individual features. We concat on axis 1 and then reshape into a 2d
# tensor. The send gradients op expects a tensor of shape
# [num_features*batch_size, dim] for each table.
interleaved_gradients = []
for table in self._table_config:
interleaved_gradients.append(array_ops.reshape(
array_ops.concat(per_table_gradients[table], axis=1),
[-1, table.dim]))
op = tpu_ops.send_tpu_embedding_gradients(
inputs=interleaved_gradients,
learning_rates=[math_ops.cast(fn(), dtype=dtypes.float32)
for fn in self._dynamic_learning_rates],
config=self._config_proto.SerializeToString())
# Apply the name tag to the op.
if name is not None:
_add_key_attr(op, name)
def dequeue(self, name=None):
"""Get the embedding results.
Returns a nested structure of `tf.Tensor` objects, matching the structure of
the `feature_config` argument to the `TPUEmbedding` class. The output shape
of the tensors is `(batch_size, dim)`, where `batch_size` is the per core
batch size, `dim` is the dimension of the corresponding `TableConfig`. If
the feature's corresponding `FeatureConfig` has `max_sequence_length`
greater than 0, the output will be a sequence of shape
`(batch_size, max_sequence_length, dim)` instead.
```python
strategy = tf.distribute.TPUStrategy(...)
with strategy.scope():
embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)
distributed_dataset = (
strategy.experimental_distribute_datasets_from_function(
dataset_fn=...,
options=tf.distribute.InputOptions(
experimental_prefetch_to_device=False))
dataset_iterator = iter(distributed_dataset)
@tf.function
def training_step():
def tpu_step(tpu_features):
with tf.GradientTape() as tape:
activations = embedding.dequeue()
tape.watch(activations)
loss = ... # some computation involving activations
embedding_gradients = tape.gradient(loss, activations)
embedding.apply_gradients(embedding_gradients)
embedding_features, tpu_features = next(dataset_iterator)
embedding.enqueue(embedding_features, training=True)
strategy.run(tpu_step, args=(embedding_features, ))
training_step()
```
Args:
name: A name for the underlying op.
Returns:
A nested structure of tensors, with the same structure as `feature_config`
passed to this instance of the `TPUEmbedding` object.
Raises:
RuntimeError: If called when object wasn't created under a `TPUStrategy`
or if not built (either by manually calling build or calling enqueue).
"""
if not self._using_tpu:
raise RuntimeError("dequeue is not valid when TPUEmbedding object is not "
"created under a TPUStrategy.")
if not self._built:
raise RuntimeError("dequeue called on unbuilt TPUEmbedding object. "
"Please either call enqueue first or manually call "
"the build method.")
# The activations returned by this op are per table. So we must separate
# them out into per feature activations. The activations are interleaved:
# for each table, we expect a [num_features*batch_size, dim] tensor.
# E.g. we expect the slice [:num_features, :] to contain the lookups for the
# first example of all features using this table.
activations = tpu_ops.recv_tpu_embedding_activations(
num_outputs=len(self._table_config),
config=self._config_proto.SerializeToString())
# Apply the name tag to the op.
if name is not None:
_add_key_attr(activations[0].op, name)
# Compute the number of features for this table.
num_features = {table: 0 for table in self._table_config}
for feature in nest.flatten(self._feature_config):
num_features[feature.table] += (1 if feature.max_sequence_length == 0
else feature.max_sequence_length)
# Activations are reshaped so that they are indexed by batch size and then
# by the 'feature' index within the batch. The final dimension should equal
# the dimension of the table.
table_to_activation = {
table: array_ops.reshape(activation,
[self._batch_size, num_features[table], -1])
for table, activation in zip(self._table_config, activations)}
# We process the features in the same order we enqueued them.
# For each feature we take the next slice of the activations, so need to
# track the activations and the current position we are in.
table_to_position = {table: 0 for table in self._table_config}
per_feature_activations = []
for feature in nest.flatten(self._feature_config):
activation = table_to_activation[feature.table]
feature_index = table_to_position[feature.table]
# We treat non-sequence and sequence features differently here as sequence
# features have rank 3 while non-sequence features have rank 2.
if feature.max_sequence_length == 0:
per_feature_activations.append(
activation[:, feature_index, :])
table_to_position[feature.table] += 1
else:
per_feature_activations.append(
activation[:, feature_index:(
feature_index+feature.max_sequence_length), :])
table_to_position[feature.table] += feature.max_sequence_length
# Pack the list back into the same nested structure as the features.
return nest.pack_sequence_as(self._feature_config, per_feature_activations)
def _create_variables_and_slots(self):
"""Create variables for TPU embeddings.
Note under TPUStrategy this will ensure that all creations happen within a
variable creation scope of the sharded variable creator.
Returns:
A dict of dicts. The outer dict is keyed by the table names and the inner
dicts are keyed by 'parameters' and the slot variable names.
"""
def create_variables(table):
"""Create all variables."""
shape = (table.vocabulary_size, table.dim)
def getter(name, shape, dtype, initializer, trainable):
# TODO(bfontain): make CheckpointInitialValue a callable rather than
# something that inherits from tensor.
if not isinstance(initializer, base.CheckpointInitialValue):
initial_value = functools.partial(initializer, shape, dtype=dtype)
else:
initial_value = initializer
return tf_variables.Variable(
name=name,
initial_value=initial_value,
trainable=trainable)
def variable_creator(name, initializer, trainable=True):
# use add_variable_with_custom_getter here so that we take advantage of
# the checkpoint loading to allow restore before the variables get
# created which avoids double initialization.
return self._add_variable_with_custom_getter(
name=name,
initializer=initializer,
shape=shape,
dtype=dtypes.float32,
getter=getter,
trainable=trainable)
parameters = variable_creator(table.name, table.initializer,
trainable=not self._using_tpu)
def slot_creator(name, initializer):
return variable_creator(table.name + "/" + name,
initializer,
False)
if table.optimizer is not None:
slot_vars = table.optimizer._create_slots(parameters, slot_creator) # pylint: disable=protected-access
else:
slot_vars = {}
slot_vars["parameters"] = parameters
return slot_vars
# Store tables based on name rather than TableConfig as we can't track
# through dicts with non-string keys, i.e. we won't be able to save.
variables = {}
for table in self._table_config:
if not self._using_tpu:
variables[table.name] = create_variables(table)
else:
with variable_scope.variable_creator_scope(
make_sharded_variable_creator(self._hosts)):
variables[table.name] = create_variables(table)
return variables
@def_function.function
def _load_variables(self):
"""Load embedding tables to onto TPU for each table and host."""
def select_fn(host_id):
return lambda x: x.variables[host_id]
num_hosts = self._strategy.extended.num_hosts
config = self._config_proto.SerializeToString()
for host_id, host in enumerate(self._hosts):
variables = nest.map_structure(select_fn(host_id), self._variables)
with ops.device(host):
for table in self._table_config:
table.optimizer._load()( # pylint: disable=protected-access
table_name=table.name,
num_shards=num_hosts,
shard_id=host_id,
config=config,
**variables[table.name])
# Ensure that only the first table/first host gets a config so that we
# don't bloat graph by attaching this large string to each op.
# We have num tables * num hosts of these so for models with a large
# number of tables training on a large slice, this can be an issue.
config = None
@def_function.function
def _retrieve_variables(self):
"""Retrieve embedding tables from TPU to host memory."""
num_hosts = self._strategy.extended.num_hosts
config = self._config_proto.SerializeToString()
for host_id, host in enumerate(self._hosts):
with ops.device(host):
for table in self._table_config:
retrieved = table.optimizer._retrieve()( # pylint: disable=protected-access
table_name=table.name,
num_shards=num_hosts,
shard_id=host_id,
config=config)
# When there are no slot variables (e.g with SGD) this returns a
# single tensor rather than a tuple. In this case we put the tensor in
# a list to make the following code easier to write.
if not isinstance(retrieved, tuple):
retrieved = (retrieved,)
for i, slot in enumerate(["parameters"] +
table.optimizer._slot_names()): # pylint: disable=protected-access
# We must assign the CPU variables the values of tensors that were
# returned from the TPU.
self._variables[table.name][slot].variables[host_id].assign(
retrieved[i])
# Ensure that only the first table/first host gets a config so that we
# don't bloat graph by attaching this large string to each op.
# We have num tables * num hosts of these so for models with a large
# number of tables training on a large slice, this can be an issue.
config = None
def _gather_saveables_for_checkpoint(self):
"""Overrides default Trackable implementation to add load/retrieve hook."""
# This saveable should be here in both TPU and CPU checkpoints, so when on
# CPU, we add the hook with no functions.
# TODO(bfontain): Update restore logic in saver so that these hooks are
# always executed. Once that is done, we can output an empty list when on
# CPU.
def _load_variables():
if self._using_tpu and self._built:
self._load_variables()
def _retrieve_variables():
if self._using_tpu and self._built:
self._retrieve_variables()
def factory(name=_HOOK_KEY):
return TPUEmbeddingSaveable(name, _load_variables, _retrieve_variables)
return {_HOOK_KEY: factory}
# Some helper functions for the below enqueue function.
def _add_data_for_tensor(self, tensor, weight, indices, values, weights,
int_zeros, float_zeros, path):
if weight is not None:
raise ValueError(
"Weight specified for dense input {}, which is not allowed. "
"Weight will always be 1 in this case.".format(path))
# For tensors, there are no indices and no weights.
indices.append(int_zeros)
values.append(math_ops.cast(tensor, dtypes.int32))
weights.append(float_zeros)
def _add_data_for_sparse_tensor(self, tensor, weight, indices, values,
weights, int_zeros, float_zeros, path):
indices.append(math_ops.cast(tensor.indices, dtypes.int32))
values.append(math_ops.cast(tensor.values, dtypes.int32))
# If we have weights they must be a SparseTensor.
if weight is not None:
if not isinstance(weight, sparse_tensor.SparseTensor):
raise ValueError("Weight for {} is type {} which does not match "
"type input which is SparseTensor.".format(
path, type(weight)))
weights.append(math_ops.cast(weight.values, dtypes.float32))
else:
weights.append(float_zeros)
def _add_data_for_ragged_tensor(self, tensor, weight, indices, values,
weights, int_zeros, float_zeros, path):
indices.append(math_ops.cast(tensor.row_splits, dtypes.int32))
values.append(math_ops.cast(tensor.values, dtypes.int32))
# If we have weights they must be a RaggedTensor.
if weight is not None:
if not isinstance(weight, ragged_tensor.RaggedTensor):
raise ValueError("Weight for {} is type {} which does not match "
"type input which is RaggedTensor.".format(
path, type(weight)))
weights.append(math_ops.cast(weight.values, dtypes.float32))
else:
weights.append(float_zeros)
def _generate_enqueue_op(self, flat_inputs, flat_weights, flat_features,
device_ordinal, mode_override):
"""Outputs a the enqueue op given the inputs and weights.
Args:
flat_inputs: A list of input tensors.
flat_weights: A list of input weights (or None) of the same length as
flat_inputs.
flat_features: A list of FeatureConfigs of the same length as flat_inputs.
device_ordinal: The device to create the enqueue op for.
mode_override: A tensor containing the string "train" or "inference".
Returns:
The enqueue op.
"""
# First we need to understand which op to use. This depends on if sparse
# or ragged tensors are in the flat_inputs.
sparse = False
ragged = False
for inp in flat_inputs:
if isinstance(inp, sparse_tensor.SparseTensor):
sparse = True
elif isinstance(inp, ragged_tensor.RaggedTensor):
ragged = True
if sparse and ragged:
raise ValueError(
"Found both SparseTensors and RaggedTensors in the input to the "
"enqueue operation. Please ensure that your data does not include "
"both SparseTensors and RaggedTensors. It is ok to have Tensors in "
"combination with one of the previous types.")
# Combiners are per table, list in the same order as the table order.
combiners = [table.combiner for table in self._table_config]
# Reverse mapping of self._table_config, so that we can lookup the table
# index.
table_to_id = {table: i for i, table in enumerate(self._table_config)}
# These parallel arrays will be the inputs to the enqueue op.
indices = [] # sample_indices for sparse, sample_splits for ragged.
values = []
weights = []
table_ids = []
max_sequence_lengths = []
# We have to supply a empty/zero tensor in a list position where we don't
# have data (e.g. indices for standard Tensor input, weight when no weight
# is specified). We create one op here per call, so that we reduce the
# graph size.
int_zeros = array_ops.zeros((0,), dtype=dtypes.int32)
float_zeros = array_ops.zeros((0,), dtype=dtypes.float32)
# In the following loop we insert casts so that everything is either int32
# or float32. This is because op inputs which are lists of tensors must be
# of the same type within the list. Moreover the CPU implementions of these
# ops cast to these types anyway, so we don't lose any data by casting
# early.
for inp, weight, (path, feature) in zip(
flat_inputs, flat_weights, flat_features):
table_ids.append(table_to_id[feature.table])
max_sequence_lengths.append(feature.max_sequence_length)
if isinstance(inp, ops.Tensor):
self._add_data_for_tensor(inp, weight, indices, values, weights,
int_zeros, float_zeros, path)
elif isinstance(inp, sparse_tensor.SparseTensor):
self._add_data_for_sparse_tensor(inp, weight, indices, values, weights,
int_zeros, float_zeros, path)
elif isinstance(inp, ragged_tensor.RaggedTensor):
self._add_data_for_ragged_tensor(inp, weight, indices, values, weights,
int_zeros, float_zeros, path)
else:
raise ValueError("Input {} is of unknown type {}. Please only pass "
"Tensor, SparseTensor or RaggedTensor as input to "
"enqueue.".format(path, type(inp)))
if ragged:
return tpu_ops.enqueue_tpu_embedding_ragged_tensor_batch(
sample_splits=indices,
embedding_indices=values,
aggregation_weights=weights,
mode_override=mode_override,
device_ordinal=device_ordinal,
combiners=combiners,
table_ids=table_ids,
max_sequence_lengths=max_sequence_lengths)
return tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch(
sample_indices=indices,
embedding_indices=values,
aggregation_weights=weights,
mode_override=mode_override,
device_ordinal=device_ordinal,
combiners=combiners,
table_ids=table_ids,
max_sequence_lengths=max_sequence_lengths)
def _raise_error_for_incorrect_control_flow_context(self):
"""Raises an error if we are not in the TPUReplicateContext."""
# Do not allow any XLA control flow (i.e. control flow in between a
# TPUStrategy's run call and the call to this function), as we can't
# extract the enqueue from the head when in XLA control flow.
graph = ops.get_default_graph()
in_tpu_ctx = False
while graph is not None:
ctx = graph._get_control_flow_context() # pylint: disable=protected-access
while ctx is not None:
if isinstance(ctx, tpu.TPUReplicateContext):
in_tpu_ctx = True
break
ctx = ctx.outer_context
if in_tpu_ctx:
break
graph = getattr(graph, "outer_graph", None)
if graph != ops.get_default_graph() and in_tpu_ctx:
raise RuntimeError(
"Current graph {} does not match graph which contains "
"TPUReplicateContext {}. This is most likely due to the fact that "
"enqueueing embedding data is called inside control flow or a "
"nested function inside `strategy.run`. This is not supported "
"because outside compilation fails to extract the enqueue ops as "
"head of computation.".format(ops.get_default_graph(), graph))
return in_tpu_ctx
def _raise_error_for_non_direct_inputs(self, features):
"""Checks all tensors in features to see if they are a direct input."""
# expand_composites here is important: as composite tensors pass through
# tpu.replicate, they get 'flattened' into their component tensors and then
# repacked before being passed to the tpu function. In means that it is the
# component tensors which are produced by an op with the
# "_tpu_input_identity" attribute.
for path, input_tensor in nest.flatten_with_joined_string_paths(
features, expand_composites=True):
if input_tensor.op.type == "Placeholder":
continue
try:
is_input = input_tensor.op.get_attr("_tpu_input_identity")
except ValueError:
is_input = False
if not is_input:
raise ValueError(
"Received input tensor {} which is the output of op {} (type {}) "
"which does not have the `_tpu_input_identity` attr. Please "
"ensure that the inputs to this layer are taken directly from "
"the arguments of the function called by "
"strategy.run. Two possible causes are: dynamic batch size "
"support or you are using a keras layer and are not passing "
"tensors which match the dtype of the `tf.keras.Input`s."
"If you are triggering dynamic batch size support, you can "
"disable it by passing tf.distribute.RunOptions("
"experimental_enable_dynamic_batch_size=False) to the options "
"argument of strategy.run().".format(path,
input_tensor.op.name,
input_tensor.op.type))
def _raise_error_for_inputs_not_on_cpu(self, features):
"""Checks all tensors in features to see are placed on the CPU."""
def check_device(path, device_string):
spec = tf_device.DeviceSpec.from_string(device_string)
if spec.device_type == "TPU":
raise ValueError(
"Received input tensor {} which is on a TPU input device {}. Input "
"tensors for TPU embeddings must be placed on the CPU. Please "
"ensure that your dataset is prefetching tensors to the host by "
"setting the 'experimental_prefetch_to_device' option of the "
"dataset distribution function. See the documentation of the "
"enqueue method for an example.".format(
path, device_string))
# expand_composites here is important, we need to check the device of each
# underlying tensor.
for path, input_tensor in nest.flatten_with_joined_string_paths(
features, expand_composites=True):
if (input_tensor.op.type == "Identity" and
input_tensor.op.inputs[0].op.type == "TPUReplicatedInput"):
for tensor in input_tensor.op.inputs[0].op.inputs:
check_device(path, tensor.device)
else:
check_device(path, input_tensor.device)
def enqueue(self, features, weights=None, training=True, name=None):
"""Enqueues id tensors for embedding lookup.
This function enqueues a structure of features to be looked up in the
embedding tables. We expect that the batch size of each of the tensors in
features matches the per core batch size. This will automatically happen if
your input dataset is batched to the global batch size and you use
`tf.distribute.TPUStrategy`'s `experimental_distribute_dataset`
or if you use `experimental_distribute_datasets_from_function` and batch
to the per core batch size computed by the context passed to your input
function.
```python
strategy = tf.distribute.TPUStrategy(...)
with strategy.scope():
embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)
distributed_dataset = (
strategy.experimental_distribute_datasets_from_function(
dataset_fn=...,
options=tf.distribute.InputOptions(
experimental_prefetch_to_device=False))
dataset_iterator = iter(distributed_dataset)
@tf.function
def training_step():
def tpu_step(tpu_features):
with tf.GradientTape() as tape:
activations = embedding.dequeue()
tape.watch(activations)
loss = ... # some computation involving activations
embedding_gradients = tape.gradient(loss, activations)
embedding.apply_gradients(embedding_gradients)
embedding_features, tpu_features = next(dataset_iterator)
embedding.enqueue(embedding_features, training=True)
strategy.run(tpu_step, args=(embedding_features,))
training_step()
```
NOTE: You should specify `training=True` when using
`embedding.apply_gradients` as above and `training=False` when not using
`embedding.apply_gradients` (e.g. for frozen embeddings or when doing
evaluation).
Args:
features: A nested structure of `tf.Tensor`s, `tf.SparseTensor`s or
`tf.RaggedTensor`s, with the same structure as `feature_config`. Inputs
will be downcast to `tf.int32`. Only one type out of `tf.SparseTensor`
or `tf.RaggedTensor` is supported per call.
weights: If not `None`, a nested structure of `tf.Tensor`s,
`tf.SparseTensor`s or `tf.RaggedTensor`s, matching the above, except
that the tensors should be of float type (and they will be downcast to
`tf.float32`). For `tf.SparseTensor`s we assume the `indices` are the
same for the parallel entries from `features` and similarly for
`tf.RaggedTensor`s we assume the row_splits are the same.
training: Defaults to `True`. If `False`, enqueue the batch as inference
batch (forward pass only). Do not call `apply_gradients` when this is
`False` as this may lead to a deadlock.
name: A name for the underlying op.
Raises:
ValueError: When called inside a strategy.run call and input is not
directly taken from the args of the `strategy.run` call. Also if
the size of any sequence in `features` does not match corresponding
sequence in `feature_config`. Similarly for `weights`, if not `None`.
If batch size of features is unequal or different from a previous call.
RuntimeError: When called inside a strategy.run call and inside XLA
control flow. If batch_size is not able to be determined and build was
not called.
TypeError: If the type of any sequence in `features` does not match
corresponding sequence in `feature_config`. Similarly for `weights`, if
not `None`.
"""
if not self._using_tpu:
raise RuntimeError("enqueue is not valid when TPUEmbedding object is not "
"created under a TPUStrategy.")
in_tpu_context = self._raise_error_for_incorrect_control_flow_context()
# Should we also get batch_size from weights if they exist?
# Since features is assumed to be batched at the per replica batch size
# the returned batch size here is per replica an not global.
batch_size = self._get_batch_size(features, in_tpu_context)
if batch_size is None and not self._built:
raise RuntimeError("Unable to determine batch size from input features."
"Please call build() with global batch size to "
"initialize the TPU for embeddings.")
if batch_size is not None:
self._maybe_build(batch_size)
if self._batch_size != batch_size:
raise ValueError("Multiple calls to enqueue with different batch sizes "
"{} and {}.".format(self._batch_size,
batch_size))
nest.assert_same_structure(self._feature_config, features)
flat_inputs = nest.flatten(features)
flat_weights = [None] * len(flat_inputs)
if weights is not None:
nest.assert_same_structure(self._feature_config, weights)
flat_weights = nest.flatten(weights)
flat_features = nest.flatten_with_joined_string_paths(self._feature_config)
self._raise_error_for_inputs_not_on_cpu(features)
# If we are in a tpu_context, automatically apply outside compilation.
if in_tpu_context:
self._raise_error_for_non_direct_inputs(features)
def generate_enqueue_ops():
"""Generate enqueue ops for outside compilation."""
# Note that we put array_ops.where_v2 rather than a python if so that
# the op is explicitly create and the constant ops are both in the graph
# even though we don't expect training to be a tensor (and thus generate
# control flow automatically). This need to make it easier to re-write
# the graph later if we need to fix which mode needs to be used.
mode_override = array_ops.where_v2(training,
constant_op.constant("train"),
constant_op.constant("inference"))
# Device ordinal is -1 here, a later rewrite will fix this once the op
# is expanded by outside compilation.
enqueue_op = self._generate_enqueue_op(
flat_inputs, flat_weights, flat_features, device_ordinal=-1,
mode_override=mode_override)
# Apply the name tag to the op.
if name is not None:
_add_key_attr(enqueue_op, name)
# Ensure that this op has outbound control flow, otherwise it won't be
# executed.
ops.get_default_graph().control_outputs.append(enqueue_op)
tpu.outside_compilation(generate_enqueue_ops)
else:
mode_override = "train" if training else "inference"
# We generate enqueue ops per device, so we need to gather the all
# features for a single device in to a dict.
# We rely here on the fact that the devices in the PerReplica value occur
# in the same (standard) order as self._strategy.extended.worker_devices.
enqueue_ops = []
for replica_id in range(self._strategy.num_replicas_in_sync):
replica_inputs = distribute_utils.select_replica(replica_id,
flat_inputs)
replica_weights = distribute_utils.select_replica(replica_id,
flat_weights)
tpu_device = self._strategy.extended.worker_devices[replica_id]
# TPU devices string are like /job:worker/replica:0/task:0/device:TPU:0
# the device ordinal is the last number
device_ordinal = int(tpu_device.rsplit(":", 1)[1])
with ops.device(device_util.get_host_for_device(tpu_device)):
enqueue_op = self._generate_enqueue_op(
replica_inputs, replica_weights, flat_features,
device_ordinal=device_ordinal, mode_override=mode_override)
# Apply the name tag to the op.
if name is not None:
_add_key_attr(enqueue_op, name)
enqueue_ops.append(enqueue_op)
ops.get_default_graph().control_outputs.extend(enqueue_ops)
def _get_batch_size(self, tensors, in_tpu_context):
"""Gets the batch size from a nested structure of features."""
batch_size = None
for path, maybe_tensor in nest.flatten_with_joined_string_paths(tensors):
tensor_list = []
if not in_tpu_context:
# if we are not in a context, then this is PerReplica and we need to
# check each replica's batch size.
for replica_id in range(self._strategy.num_replicas_in_sync):
tensor_list.append(distribute_utils.select_replica(replica_id,
maybe_tensor))
else:
tensor_list = [maybe_tensor]
for tensor in tensor_list:
if tensor.shape.rank < 1:
raise ValueError(
"Input {} has rank 0, rank must be at least 1.".format(path))
shape = tensor.shape.as_list()
if shape[0] is not None:
if batch_size is None:
batch_size = shape[0]
elif batch_size != shape[0]:
raise ValueError("Found multiple batch sizes {} and {}. All inputs "
"must have the same batch dimensions size.".format(
batch_size, shape[0]))
return batch_size
class TPUEmbeddingSaveable(saveable_hook.SaveableHook):
"""Save/Restore hook to Retrieve/Load TPUEmbedding variables."""
def __init__(self, name, load, retrieve):
self._load = load
self._retrieve = retrieve
super(TPUEmbeddingSaveable, self).__init__(name=name)
def before_save(self):
if self._retrieve is not None:
self._retrieve()
def after_restore(self):
if self._load is not None:
self._load()
def _ragged_embedding_lookup_with_reduce(table, ragged, weights, combiner):
"""Compute a ragged lookup followed by a reduce on axis 1.
Args:
table: The embedding table.
ragged: A RaggedTensor of ids to look up.
weights: A RaggedTensor of weights (or None).
combiner: One of "mean", "sum", "sqrtn".
Returns:
A Tensor.
"""
if weights is None:
weights = array_ops.ones_like(ragged, dtype=table.dtype)
weights = array_ops.expand_dims(weights, axis=2)
ragged_result = embedding_ops.embedding_lookup_ragged(table, ragged)
ragged_result = math_ops.reduce_sum(ragged_result * weights, axis=1)
if combiner == "mean":
ragged_result = ragged_result / math_ops.reduce_sum(weights, axis=1)
elif combiner == "sqrtn":
ragged_result = ragged_result, math_ops.sqrt(math_ops.reduce_sum(
weights*weights, axis=1))
return ragged_result
def cpu_embedding_lookup(inputs, weights, tables, feature_config):
"""Uses CPU embedding lookup for embedding ids in features.
Args:
inputs: a nested structure of Tensors, SparseTensors or RaggedTensors.
weights: a nested structure of Tensors, SparseTensors or RaggedTensors or
None for no weights.
tables: a dict of mapping TableConfig objects to Variables.
feature_config: a nested structure of FeatureConfig objects with the same
structure as inputs.
Returns:
A nested structure of Tensors with the same structure as inputs.
"""
nest.assert_same_structure(inputs, feature_config)
flat_inputs = nest.flatten(inputs)
flat_weights = [None] * len(flat_inputs)
if weights is not None:
nest.assert_same_structure(inputs, weights)
flat_weights = nest.flatten(weights)
flat_features = nest.flatten_with_joined_string_paths(feature_config)
outputs = []
for inp, weight, (path, feature) in zip(
flat_inputs, flat_weights, flat_features):
table = tables[feature.table]
if feature.max_sequence_length > 0:
raise ValueError("Sequence features unsupported at this time.")
if weight is not None:
if isinstance(inp, ops.Tensor):
raise ValueError(
"Weight specified for {}, but input is dense.".format(path))
elif type(weight) is not type(inp):
raise ValueError(
"Weight for {} is of type {} but it does not match type of the "
"input which is {}.".format(path, type(weight), type(inp)))
if isinstance(inp, ops.Tensor):
outputs.append(embedding_ops.embedding_lookup_v2(table, inp))
elif isinstance(inp, sparse_tensor.SparseTensor):
outputs.append(embedding_ops.safe_embedding_lookup_sparse_v2(
table, inp, sparse_weights=weight, combiner=feature.table.combiner))
elif isinstance(inp, ragged_tensor.RaggedTensor):
outputs.append(_ragged_embedding_lookup_with_reduce(
table, inp, weight, feature.table.combiner))
else:
raise ValueError("Input {} is type {}. Tensor, SparseTensor or "
"RaggedTensor expected.".format(path, type(inp)))
return nest.pack_sequence_as(feature_config, outputs)
def get_list_of_hosts(strategy):
"""Returns a sorted list of CPU devices for the remote jobs.
Args:
strategy: A TPUStrategy object.
Returns:
A sort list of device strings.
"""
list_of_hosts = []
# Assume this is sorted by task
for tpu_device in strategy.extended.worker_devices:
host = device_util.get_host_for_device(tpu_device)
if host not in list_of_hosts:
list_of_hosts.append(host)
assert len(list_of_hosts) == strategy.extended.num_hosts
return list_of_hosts
def extract_variable_info(kwargs):
"""Extracts the variable creation attributes from the kwargs.
Args:
kwargs: a dict of keyword arguments that were passed to a variable creator
scope.
Returns:
A tuple of variable name, initialization function, shape, and dtype.
"""
if (isinstance(kwargs["initial_value"], functools.partial) and (
"shape" in kwargs["initial_value"].keywords or
kwargs["initial_value"].args)):
# Sometimes shape is passed positionally, sometimes it's passed as a kwarg.
if "shape" in kwargs["initial_value"].keywords:
shape = kwargs["initial_value"].keywords["shape"]
else:
shape = kwargs["initial_value"].args[0]
return (kwargs["name"], shape,
kwargs["initial_value"].keywords.get("dtype", kwargs["dtype"]),
kwargs["initial_value"].func)
elif isinstance(kwargs["initial_value"], base.CheckpointInitialValue):
return (kwargs["name"], kwargs["initial_value"].shape,
kwargs["initial_value"].dtype, kwargs["initial_value"])
elif "shape" not in kwargs or kwargs["shape"] is None:
raise ValueError(
"Unable to extract initializer function and shape from {}. Please "
"either pass a function that expects a shape and dtype as the "
"initial value for your variable or functools.partial object with "
"the shape and dtype kwargs set. This is needed so that we can "
"initialize the shards of the ShardedVariable locally.".format(
kwargs["initial_value"]))
else:
return (kwargs["name"], kwargs["shape"], kwargs["dtype"],
kwargs["initial_value"])
def make_sharded_variable_creator(hosts):
"""Makes a sharded variable creator given a list of hosts.
Args:
hosts: a list of tensorflow devices on which to shard the tensors.
Returns:
A variable creator function.
"""
def sharded_variable_creator(next_creator, *args, **kwargs):
"""The sharded variable creator."""
kwargs["skip_mirrored_creator"] = True
num_hosts = len(hosts)
name, shape, dtype, initial_value = extract_variable_info(kwargs)
rows = shape[0]
cols = shape[1]
missing = rows % num_hosts
# we partition as if we were using MOD sharding.
partitions = ([rows // num_hosts + 1] * missing + [rows // num_hosts] *
(num_hosts - missing))
variables = []
newkwargs = kwargs
newkwargs["dtype"] = dtype
# TODO(bfontain): Remove this check once we can pass position and shape of
# shards to CheckpointInitialValue.
if isinstance(initial_value, base.CheckpointInitialValue) and num_hosts > 1:
raise RuntimeError("Delayed restoration of variables not available when "
"there are multiple TPU hosts, please ensure that the "
"api object is build before you restore.")
for i, p in enumerate(partitions):
with ops.device(hosts[i]):
newkwargs["shape"] = (p, cols)
newkwargs["name"] = "{}_{}".format(name, i)
if isinstance(initial_value, base.CheckpointInitialValue):
# TODO(bfontain): Patch CheckpointInitialValue to take in account the
# position and shape of this shard.
newkwargs["initial_value"] = initial_value
else:
newkwargs["initial_value"] = (
lambda: initial_value(newkwargs["shape"], dtype=dtype))
variables.append(next_creator(*args, **kwargs))
return TPUShardedVariable(variables, name=name)
return sharded_variable_creator
| 43.469737
| 114
| 0.689681
|
a1f44f3ffc0411e3f190ae31f11985fe9762ccc6
| 5,450
|
py
|
Python
|
tests/test_processing.py
|
semccomas/string-method-gmxapi
|
fb68dce792d35df739225b1048e0816a4a61d45e
|
[
"MIT"
] | 6
|
2020-10-15T16:43:19.000Z
|
2022-01-21T09:09:13.000Z
|
tests/test_processing.py
|
semccomas/string-method-gmxapi
|
fb68dce792d35df739225b1048e0816a4a61d45e
|
[
"MIT"
] | 9
|
2020-07-01T08:36:49.000Z
|
2021-06-23T07:15:53.000Z
|
tests/test_processing.py
|
semccomas/string-method-gmxapi
|
fb68dce792d35df739225b1048e0816a4a61d45e
|
[
"MIT"
] | 5
|
2020-07-15T06:08:00.000Z
|
2021-07-02T14:24:59.000Z
|
import unittest
import numpy as np
from stringmethod.config import Config
from stringmethod.postprocessing import *
def create_constant_probability_distribution(n_grid_points, n_transitions=1):
prob = np.zeros((n_grid_points, n_grid_points))
prob += 1.0 / prob.size
minx, maxx = 0, n_grid_points - 1
miny, maxy = 1, n_grid_points
grid = np.array(
[
np.linspace(minx, maxx, n_grid_points),
np.linspace(miny, maxy, n_grid_points),
]
).T
# Create transition points from every grid point to all other
delta = grid[1, 0] * 0.49 # Some noise smaller than the grid size
vals = []
for startx in grid[:, 0]:
for starty in grid[:, 1]:
for endx in grid[:, 0]:
for endy in grid[:, 1]:
transition = np.empty((2, 2))
transition[0, 0] = startx
transition[0, 1] = starty
transition[1, 0] = endx
transition[1, 1] = endy
for n in range(n_transitions):
if (
maxx > startx > minx
and maxy > starty > miny
and maxx > endx > minx
and maxy > endy > miny
):
# Displace atoms slightly as long as we don't mess up the grid boundaries
r = (np.random.rand() - 0.5) * delta
vals.append(transition + r)
else:
vals.append(transition)
vals = np.array(vals)
return prob, grid, vals
class TestPostProcessing(unittest.TestCase):
def setUp(self):
self.config = Config()
def test_correct_transition_count(self):
n_grid_points = 10
n_transitions = 3
(
in_prob,
grid,
cv_coordinates,
) = create_constant_probability_distribution(
n_grid_points=n_grid_points, n_transitions=n_transitions
)
tc = TransitionCountCalculator(
config=self.config,
n_grid_points=n_grid_points,
cv_coordinates=cv_coordinates,
)
tc.run()
self.assertEqual(grid.shape, tc.grid.shape)
self.assertAlmostEqual(
abs(grid - tc.grid).max(), 0, places=4, msg="Grids differ"
)
print(tc.transition_count)
self.assertTrue(
np.all(tc.transition_count == n_transitions),
"Transition count is constant",
)
def test_correct_probability_distribution(self):
n_grid_points = 10
(
in_prob,
grid,
cv_coordinates,
) = create_constant_probability_distribution(
n_grid_points=n_grid_points
)
tc = TransitionCountCalculator(
config=self.config,
n_grid_points=n_grid_points,
cv_coordinates=cv_coordinates,
)
tc.run()
fc = FreeEnergyCalculator(
config=self.config,
grid=tc.grid,
transition_count=tc.transition_count,
)
fc.run()
out_prob = fc.probability_distribution
self.assertEqual(in_prob.shape, out_prob.shape)
for row_idx, p_row in enumerate(in_prob):
for col_idx, p_in in enumerate(p_row):
p_out = out_prob[row_idx, col_idx]
self.assertAlmostEqual(
p_out,
p_in,
places=4,
msg="Probability not equal at index %s, %s"
% (row_idx, col_idx),
)
self.assertAlmostEqual(
1.0,
out_prob.sum(),
places=4,
msg="Total probability should equal 1",
)
def test_two_state_1d_probability_distribution(self):
n_grid_points = 2
in_prob = np.array([[2 / 3], [1 / 3]])
cv_coordinates = np.array(
[[0, 0], [0, 0], [0, 1], [1, 1], [1, 0], [1, 0]]
)
in_transition_count = np.array([[2, 1], [2, 1]])
tc = TransitionCountCalculator(
config=self.config,
n_grid_points=n_grid_points,
cv_coordinates=cv_coordinates,
)
tc.run()
self.assertAlmostEqual(
abs(in_transition_count - tc.transition_count).max(),
0,
places=4,
msg="Transition count differs",
)
fc = FreeEnergyCalculator(
config=self.config,
grid=tc.grid,
transition_count=tc.transition_count,
)
fc.run()
out_prob = fc.probability_distribution
self.assertEqual(in_prob.shape, out_prob.shape)
for row_idx, p_row in enumerate(in_prob):
for col_idx, p_in in enumerate(p_row):
p_out = out_prob[row_idx, col_idx]
self.assertAlmostEqual(
p_out,
p_in,
places=4,
msg="Probability not equal at index %s, %s"
% (row_idx, col_idx),
)
self.assertAlmostEqual(
1.0,
out_prob.sum(),
places=4,
msg="Total probability should equal 1",
)
if __name__ == "__main__":
unittest.main()
| 32.831325
| 101
| 0.514862
|
dfba26428ee60107dc165dbf764202d286d4263e
| 3,043
|
py
|
Python
|
.conan/build.py
|
hmich/Catch
|
2a1b332732cd1721601226ca4e9ddfff9129ebf2
|
[
"BSL-1.0"
] | 322
|
2019-04-03T15:31:46.000Z
|
2022-03-21T13:32:06.000Z
|
.conan/build.py
|
hmich/Catch
|
2a1b332732cd1721601226ca4e9ddfff9129ebf2
|
[
"BSL-1.0"
] | 11
|
2019-04-03T15:32:09.000Z
|
2021-12-19T13:14:58.000Z
|
.conan/build.py
|
hmich/Catch
|
2a1b332732cd1721601226ca4e9ddfff9129ebf2
|
[
"BSL-1.0"
] | 110
|
2019-04-03T15:54:58.000Z
|
2022-03-25T09:26:41.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from cpt.packager import ConanMultiPackager
from cpt.ci_manager import CIManager
from cpt.printer import Printer
class BuilderSettings(object):
@property
def username(self):
""" Set catchorg as package's owner
"""
return os.getenv("CONAN_USERNAME", "catchorg")
@property
def login_username(self):
""" Set Bintray login username
"""
return os.getenv("CONAN_LOGIN_USERNAME", "horenmar")
@property
def upload(self):
""" Set Catch2 repository to be used on upload.
The upload server address could be customized by env var
CONAN_UPLOAD. If not defined, the method will check the branch name.
Only master or CONAN_STABLE_BRANCH_PATTERN will be accepted.
The master branch will be pushed to testing channel, because it does
not match the stable pattern. Otherwise it will upload to stable
channel.
"""
return os.getenv("CONAN_UPLOAD", "https://api.bintray.com/conan/catchorg/Catch2")
@property
def upload_only_when_stable(self):
""" Force to upload when running over tag branch
"""
return os.getenv("CONAN_UPLOAD_ONLY_WHEN_STABLE", "True").lower() in ["true", "1", "yes"]
@property
def stable_branch_pattern(self):
""" Only upload the package the branch name is like a tag
"""
return os.getenv("CONAN_STABLE_BRANCH_PATTERN", r"v\d+\.\d+\.\d+")
@property
def reference(self):
""" Read project version from branch create Conan referece
"""
return os.getenv("CONAN_REFERENCE", "Catch2/{}".format(self._version))
@property
def channel(self):
""" Default Conan package channel when not stable
"""
return os.getenv("CONAN_CHANNEL", "testing")
@property
def _version(self):
""" Get version name from cmake file
"""
pattern = re.compile(r"project\(Catch2 LANGUAGES CXX VERSION (\d+\.\d+\.\d+)\)")
version = "latest"
with open("CMakeLists.txt") as file:
for line in file:
result = pattern.search(line)
if result:
version = result.group(1)
return version
@property
def _branch(self):
""" Get branch name from CI manager
"""
printer = Printer(None)
ci_manager = CIManager(printer)
return ci_manager.get_branch()
if __name__ == "__main__":
settings = BuilderSettings()
builder = ConanMultiPackager(
reference=settings.reference,
channel=settings.channel,
upload=settings.upload,
upload_only_when_stable=settings.upload_only_when_stable,
stable_branch_pattern=settings.stable_branch_pattern,
login_username=settings.login_username,
username=settings.username,
test_folder=os.path.join(".conan", "test_package"))
builder.add()
builder.run()
| 32.031579
| 97
| 0.626684
|
086f924d9e98ae1eca06c1c8a03e06a0bc4a515a
| 74,190
|
py
|
Python
|
hl7apy/v2_4/messages.py
|
tmoat/hl7apy
|
e5ca5eef86c91e0e3f312b89e0a9a77651e21158
|
[
"MIT"
] | null | null | null |
hl7apy/v2_4/messages.py
|
tmoat/hl7apy
|
e5ca5eef86c91e0e3f312b89e0a9a77651e21158
|
[
"MIT"
] | null | null | null |
hl7apy/v2_4/messages.py
|
tmoat/hl7apy
|
e5ca5eef86c91e0e3f312b89e0a9a77651e21158
|
[
"MIT"
] | null | null | null |
from .groups import GROUPS
from .segments import SEGMENTS
MESSAGES = {
'ACK': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),)),
'ACK_N02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),)),
'ADR_A19': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('ADR_A19_QUERY_RESPONSE', GROUPS['ADR_A19_QUERY_RESPONSE'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'ADT_A01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('ADT_A01_PROCEDURE', GROUPS['ADT_A01_PROCEDURE'], (0, -1), 'GRP'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('ADT_A01_INSURANCE', GROUPS['ADT_A01_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('UB1', SEGMENTS['UB1'], (0, 1), 'SEG'),
('UB2', SEGMENTS['UB2'], (0, 1), 'SEG'),
('PDA', SEGMENTS['PDA'], (0, 1), 'SEG'),)),
'ADT_A02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('PDA', SEGMENTS['PDA'], (0, 1), 'SEG'),)),
'ADT_A03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('ADT_A03_PROCEDURE', GROUPS['ADT_A03_PROCEDURE'], (0, -1), 'GRP'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('PDA', SEGMENTS['PDA'], (0, 1), 'SEG'),)),
'ADT_A05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('ADT_A05_PROCEDURE', GROUPS['ADT_A05_PROCEDURE'], (0, -1), 'GRP'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('ADT_A05_INSURANCE', GROUPS['ADT_A05_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('UB1', SEGMENTS['UB1'], (0, 1), 'SEG'),
('UB2', SEGMENTS['UB2'], (0, 1), 'SEG'),)),
'ADT_A06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('MRG', SEGMENTS['MRG'], (0, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('ADT_A06_PROCEDURE', GROUPS['ADT_A06_PROCEDURE'], (0, -1), 'GRP'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('ADT_A06_INSURANCE', GROUPS['ADT_A06_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('UB1', SEGMENTS['UB1'], (0, 1), 'SEG'),
('UB2', SEGMENTS['UB2'], (0, 1), 'SEG'),)),
'ADT_A08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG',),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('ADT_A08_PROCEDURE', GROUPS['ADT_A08_PROCEDURE'], (0, -1), 'GRP'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('ADT_A08_INSURANCE', GROUPS['ADT_A08_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('UB1', SEGMENTS['UB1'], (0, 1), 'SEG'),
('UB2', SEGMENTS['UB2'], (0, 1), 'SEG'),
('PDA', SEGMENTS['PDA'], (0, 1), 'SEG'),)),
'ADT_A09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),)),
'ADT_A15': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),)),
'ADT_A16': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),)),
'ADT_A17': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),)),
'ADT_A18': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('MRG', SEGMENTS['MRG'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),)),
'ADT_A20': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('NPU', SEGMENTS['NPU'], (1, 1), 'SEG'),)),
'ADT_A21': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),)),
'ADT_A24': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),)),
'ADT_A30': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('MRG', SEGMENTS['MRG'], (1, 1), 'SEG'),)),
'ADT_A37': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),)),
'ADT_A38': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('OBX', SEGMENTS['OBX'], (0, -1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),)),
'ADT_A39': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('ADT_A39_PATIENT', GROUPS['ADT_A39_PATIENT'], (1, -1), 'GRP'),)),
'ADT_A43': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('ADT_A43_PATIENT', GROUPS['ADT_A43_PATIENT'], (1, -1), 'GRP'),)),
'ADT_A45': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ADT_A45_MERGE_INFO', GROUPS['ADT_A45_MERGE_INFO'], (1, -1), 'GRP'),)),
'ADT_A50': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('MRG', SEGMENTS['MRG'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),)),
'ADT_A52': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),)),
'ADT_A54': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),)),
'ADT_A60': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('IAM', SEGMENTS['IAM'], (0, -1), 'SEG'),)),
'ADT_A61': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),)),
'BAR_P01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('BAR_P01_VISIT', GROUPS['BAR_P01_VISIT'], (1, -1), 'GRP'),)),
'BAR_P02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('BAR_P02_PATIENT', GROUPS['BAR_P02_PATIENT'], (1, -1), 'GRP'),)),
'BAR_P05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('BAR_P05_VISIT', GROUPS['BAR_P05_VISIT'], (1, -1), 'GRP'),)),
'BAR_P06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('BAR_P06_PATIENT', GROUPS['BAR_P06_PATIENT'], (1, -1), 'GRP'),)),
'BAR_P10': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('GP1', SEGMENTS['GP1'], (1, 1), 'SEG'),
('BAR_P10_PROCEDURE', GROUPS['BAR_P10_PROCEDURE'], (0, -1), 'GRP'),)),
'CRM_C01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('CRM_C01_PATIENT', GROUPS['CRM_C01_PATIENT'], (1, -1), 'GRP'),)),
'CSU_C09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('CSU_C09_PATIENT', GROUPS['CSU_C09_PATIENT'], (1, -1), 'GRP'),)),
'DFT_P03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('DFT_P03_COMMON_ORDER', GROUPS['DFT_P03_COMMON_ORDER'], (0, -1), 'GRP'),
('DFT_P03_FINANCIAL', GROUPS['DFT_P03_FINANCIAL'], (1, -1), 'GRP'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('DFT_P03_INSURANCE', GROUPS['DFT_P03_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),)),
'DFT_P11': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('PV1', SEGMENTS['PV1'], (0, 1), 'SEG'),
('PV2', SEGMENTS['PV2'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, -1), 'SEG'),
('DB1', SEGMENTS['DB1'], (0, -1), 'SEG'),
('DFT_P11_COMMON_ORDER', GROUPS['DFT_P11_COMMON_ORDER'], (0, -1), 'GRP'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, 1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('DFT_P11_INSURANCE', GROUPS['DFT_P11_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('DFT_P11_FINANCIAL', GROUPS['DFT_P11_FINANCIAL'], (1, -1), 'GRP'),)),
'DOC_T12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('DOC_T12_RESULT', GROUPS['DOC_T12_RESULT'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'DSR_Q01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('DSP', SEGMENTS['DSP'], (1, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'DSR_Q03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (0, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('DSP', SEGMENTS['DSP'], (1, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'EAC_U07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('ECD', SEGMENTS['ECD'], (1, -1), 'SEG'),
('SAC', SEGMENTS['SAC'], (0, 1), 'SEG'),
('CNS', SEGMENTS['CNS'], (0, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'EAN_U09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('EAN_U09_NOTIFICATION', GROUPS['EAN_U09_NOTIFICATION'], (1, -1), 'GRP'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'EAR_U08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('EAR_U08_COMMAND_RESPONSE', GROUPS['EAR_U08_COMMAND_RESPONSE'], (1, -1), 'GRP'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'EDR_R07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('DSP', SEGMENTS['DSP'], (1, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'EQQ_Q04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EQL', SEGMENTS['EQL'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'ERP_R09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('ERQ', SEGMENTS['ERQ'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'ESR_U02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'ESU_U01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('ISD', SEGMENTS['ISD'], (0, -1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'INR_U06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('INV', SEGMENTS['INV'], (1, -1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'INU_U05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('INV', SEGMENTS['INV'], (1, -1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'LSU_U12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('EQP', SEGMENTS['EQP'], (1, -1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'MDM_T01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('TXA', SEGMENTS['TXA'], (1, 1), 'SEG'),)),
'MDM_T02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PV1', SEGMENTS['PV1'], (1, 1), 'SEG'),
('TXA', SEGMENTS['TXA'], (1, 1), 'SEG'),
('OBX', SEGMENTS['OBX'], (1, -1), 'SEG'),)),
'MFK_M01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFA', SEGMENTS['MFA'], (0, -1), 'SEG'),)),
'MFN_M01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M01_MF', GROUPS['MFN_M01_MF'], (1, -1), 'GRP'),)),
'MFN_M02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M02_MF_STAFF', GROUPS['MFN_M02_MF_STAFF'], (1, -1), 'GRP'),)),
'MFN_M03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M03_MF_TEST', GROUPS['MFN_M03_MF_TEST'], (1, -1), 'GRP'),)),
'MFN_M04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M04_MF_CDM', GROUPS['MFN_M04_MF_CDM'], (1, -1), 'GRP'),)),
'MFN_M05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M05_MF_LOCATION', GROUPS['MFN_M05_MF_LOCATION'], (1, -1), 'GRP'),)),
'MFN_M06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M06_MF_CLIN_STUDY', GROUPS['MFN_M06_MF_CLIN_STUDY'], (1, -1), 'GRP'),)),
'MFN_M07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M07_MF_CLIN_STUDY_SCHED', GROUPS['MFN_M07_MF_CLIN_STUDY_SCHED'], (1, -1), 'GRP'),)),
'MFN_M08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M08_MF_TEST_NUMERIC', GROUPS['MFN_M08_MF_TEST_NUMERIC'], (1, -1), 'GRP'),)),
'MFN_M09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M09_MF_TEST_CATEGORICAL', GROUPS['MFN_M09_MF_TEST_CATEGORICAL'], (1, -1), 'GRP'),)),
'MFN_M10': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M10_MF_TEST_BATTERIES', GROUPS['MFN_M10_MF_TEST_BATTERIES'], (1, -1), 'GRP'),)),
'MFN_M11': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M11_MF_TEST_CALCULATED', GROUPS['MFN_M11_MF_TEST_CALCULATED'], (1, -1), 'GRP'),)),
'MFN_M12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFN_M12_MF_OBS_ATTRIBUTES', GROUPS['MFN_M12_MF_OBS_ATTRIBUTES'], (1, -1), 'GRP'),)),
'MFQ_M01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'MFR_M01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('MFI', SEGMENTS['MFI'], (1, 1), 'SEG'),
('MFR_M01_MF_QUERY', GROUPS['MFR_M01_MF_QUERY'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'NMD_N02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NMD_N02_CLOCK_AND_STATS_WITH_NOTES', GROUPS['NMD_N02_CLOCK_AND_STATS_WITH_NOTES'], (1, -1), 'GRP'),)),
'NMQ_N01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NMQ_N01_QRY_WITH_DETAIL', GROUPS['NMQ_N01_QRY_WITH_DETAIL'], (0, 1), 'GRP'),
('NMQ_N01_CLOCK_AND_STATISTICS', GROUPS['NMQ_N01_CLOCK_AND_STATISTICS'], (1, -1), 'GRP'),)),
'NMR_N01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (0, 1), 'SEG'),
('NMR_N01_CLOCK_AND_STATS_WITH_NOTES_ALT', GROUPS['NMR_N01_CLOCK_AND_STATS_WITH_NOTES_ALT'], (1, -1), 'GRP'),)),
'OMD_O03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OMD_O03_PATIENT', GROUPS['OMD_O03_PATIENT'], (0, 1), 'GRP'),
('OMD_O03_ORDER_DIET', GROUPS['OMD_O03_ORDER_DIET'], (1, -1), 'GRP'),
('OMD_O03_ORDER_TRAY', GROUPS['OMD_O03_ORDER_TRAY'], (0, -1), 'GRP'),)),
'OMG_O19': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OMG_O19_PATIENT', GROUPS['OMG_O19_PATIENT'], (0, 1), 'GRP'),
('OMG_O19_ORDER', GROUPS['OMG_O19_ORDER'], (1, -1), 'GRP'),)),
'OML_O21': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OML_O21_PATIENT', GROUPS['OML_O21_PATIENT'], (0, 1), 'GRP'),
('OML_O21_ORDER_GENERAL', GROUPS['OML_O21_ORDER_GENERAL'], (1, -1), 'GRP'),)),
'OMN_O07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OMN_O07_PATIENT', GROUPS['OMN_O07_PATIENT'], (0, 1), 'GRP'),
('OMN_O07_ORDER', GROUPS['OMN_O07_ORDER'], (1, -1), 'GRP'),)),
'OMP_O09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OMP_O09_PATIENT', GROUPS['OMP_O09_PATIENT'], (0, 1), 'GRP'),
('OMP_O09_ORDER', GROUPS['OMP_O09_ORDER'], (1, -1), 'GRP'),)),
'OMS_O05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('OMS_O05_PATIENT', GROUPS['OMS_O05_PATIENT'], (0, 1), 'GRP'),
('OMS_O05_ORDER', GROUPS['OMS_O05_ORDER'], (1, -1), 'GRP'),)),
'ORD_O04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORD_O04_RESPONSE', GROUPS['ORD_O04_RESPONSE'], (0, 1), 'GRP'),)),
'ORF_R04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('ORF_R04_RESPONSE', GROUPS['ORF_R04_RESPONSE'], (1, -1), 'GRP'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'ORG_O20': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORG_O20_RESPONSE', GROUPS['ORG_O20_RESPONSE'], (0, 1), 'GRP'),)),
'ORL_O22': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORL_O22_RESPONSE', GROUPS['ORL_O22_RESPONSE'], (0, 1), 'GRP'),)),
'ORM_O01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORM_O01_PATIENT', GROUPS['ORM_O01_PATIENT'], (0, 1), 'GRP'),
('ORM_O01_ORDER', GROUPS['ORM_O01_ORDER'], (1, -1), 'GRP'),)),
'ORN_O08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORN_O08_RESPONSE', GROUPS['ORN_O08_RESPONSE'], (0, 1), 'GRP'),)),
'ORP_O10': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORP_O10_RESPONSE', GROUPS['ORP_O10_RESPONSE'], (0, 1), 'GRP'),)),
'ORR_O02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORR_O02_RESPONSE', GROUPS['ORR_O02_RESPONSE'], (0, 1), 'GRP'),)),
'ORS_O06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('ORS_O06_RSPONSE', GROUPS['ORS_O06_RSPONSE'], (0, 1), 'GRP'),)),
'ORU_R01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('ORU_R01_PATIENT_RESULT', GROUPS['ORU_R01_PATIENT_RESULT'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'OSQ_Q06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'OSR_Q06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('OSR_Q06_RESPONSE', GROUPS['OSR_Q06_RESPONSE'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'OUL_R21': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, 1), 'SEG'),
('OUL_R21_PATIENT', GROUPS['OUL_R21_PATIENT'], (0, 1), 'GRP'),
('OUL_R21_VISIT', GROUPS['OUL_R21_VISIT'], (0, 1), 'GRP'),
('OUL_R21_ORDER_OBSERVATION', GROUPS['OUL_R21_ORDER_OBSERVATION'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'PEX_P07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('PEX_P07_VISIT', GROUPS['PEX_P07_VISIT'], (0, 1), 'GRP'),
('PEX_P07_EXPERIENCE', GROUPS['PEX_P07_EXPERIENCE'], (1, -1), 'GRP'),)),
'PGL_PC6': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PGL_PC6_PATIENT_VISIT', GROUPS['PGL_PC6_PATIENT_VISIT'], (0, 1), 'GRP'),
('PGL_PC6_GOAL', GROUPS['PGL_PC6_GOAL'], (1, -1), 'GRP'),)),
'PMU_B01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('STF', SEGMENTS['STF'], (1, 1), 'SEG'),
('PRA', SEGMENTS['PRA'], (0, -1), 'SEG'),
('ORG', SEGMENTS['ORG'], (0, -1), 'SEG'),
('AFF', SEGMENTS['AFF'], (0, -1), 'SEG'),
('LAN', SEGMENTS['LAN'], (0, -1), 'SEG'),
('EDU', SEGMENTS['EDU'], (0, -1), 'SEG'),)),
'PMU_B03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('STF', SEGMENTS['STF'], (1, 1), 'SEG'),)),
'PMU_B04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EVN', SEGMENTS['EVN'], (1, 1), 'SEG'),
('STF', SEGMENTS['STF'], (1, 1), 'SEG'),
('PRA', SEGMENTS['PRA'], (0, -1), 'SEG'),
('ORG', SEGMENTS['ORG'], (0, 1), 'SEG'),)),
'PPG_PCG': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PPG_PCG_PATIENT_VISIT', GROUPS['PPG_PCG_PATIENT_VISIT'], (0, 1), 'GRP'),
('PPG_PCG_PATHWAY', GROUPS['PPG_PCG_PATHWAY'], (1, -1), 'GRP'),)),
'PPP_PCB': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PPP_PCB_PATIENT_VISIT', GROUPS['PPP_PCB_PATIENT_VISIT'], (0, 1), 'GRP'),
('PPP_PCB_PATHWAY', GROUPS['PPP_PCB_PATHWAY'], (1, -1), 'GRP'),)),
'PPR_PC1': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PPR_PC1_PATIENT_VISIT', GROUPS['PPR_PC1_PATIENT_VISIT'], (0, 1), 'GRP'),
('PPR_PC1_PROBLEM', GROUPS['PPR_PC1_PROBLEM'], (1, -1), 'GRP'),)),
'PPT_PCL': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('PPT_PCL_PATIENT', GROUPS['PPT_PCL_PATIENT'], (1, -1), 'GRP'),)),
'PPV_PCA': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('PPV_PCA_PATIENT', GROUPS['PPV_PCA_PATIENT'], (1, -1), 'GRP'),)),
'PRR_PC5': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('PRR_PC5_PATIENT', GROUPS['PRR_PC5_PATIENT'], (1, -1), 'GRP'),)),
'PTR_PCF': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('PTR_PCF_PATIENT', GROUPS['PTR_PCF_PATIENT'], (1, -1), 'GRP'),)),
'QBP_K13': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('QBP_K13_ROW_DEFINITION', GROUPS['QBP_K13_ROW_DEFINITION'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QBP_Q11': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QBP_Q13': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('QBP_Q13_QBP', GROUPS['QBP_Q13_QBP'], (0, 1), 'GRP'),
('RDF', SEGMENTS['RDF'], (0, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QBP_Q15': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QBP_Q21': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QBP_Qnn': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RDF', SEGMENTS['RDF'], (0, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QBP_Z73': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),)),
'QCK_Q02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (0, 1), 'SEG'),)),
'QCN_J01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QID', SEGMENTS['QID'], (1, 1), 'SEG'),)),
'QRY_A19': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),)),
'QRY_PC4': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),)),
'QRY_Q01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QRY_Q02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QRY_R02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (1, 1), 'SEG'),)),
'QRY_T12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),)),
'QSB_Q16': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'QVR_Q17': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RAR_RAR': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('RAR_RAR_DEFINITION', GROUPS['RAR_RAR_DEFINITION'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RAS_O17': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RAS_O17_PATIENT', GROUPS['RAS_O17_PATIENT'], (0, 1), 'GRP'),
('RAS_O17_ORDER', GROUPS['RAS_O17_ORDER'], (1, -1), 'GRP'),)),
'RCI_I05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('RCI_I05_PROVIDER', GROUPS['RCI_I05_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('RCI_I05_OBSERVATION', GROUPS['RCI_I05_OBSERVATION'], (0, -1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RCL_I06': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('RCL_I06_PROVIDER', GROUPS['RCL_I06_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('DSP', SEGMENTS['DSP'], (0, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RDE_O11': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RDE_O11_PATIENT', GROUPS['RDE_O11_PATIENT'], (0, 1), 'GRP'),
('RDE_O11_ORDER', GROUPS['RDE_O11_ORDER'], (1, -1), 'GRP'),)),
'RDR_RDR': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('RDR_RDR_DEFINITION', GROUPS['RDR_RDR_DEFINITION'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RDS_O13': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RDS_O13_PATIENT', GROUPS['RDS_O13_PATIENT'], (0, 1), 'GRP'),
('RDS_O13_ORDER', GROUPS['RDS_O13_ORDER'], (1, -1), 'GRP'),)),
'RDY_K15': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('DSP', SEGMENTS['DSP'], (0, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'REF_I12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('RF1', SEGMENTS['RF1'], (0, 1), 'SEG'),
('REF_I12_AUTHORIZATION_CONTACT', GROUPS['REF_I12_AUTHORIZATION_CONTACT'], (0, 1), 'GRP'),
('REF_I12_PROVIDER_CONTACT', GROUPS['REF_I12_PROVIDER_CONTACT'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('REF_I12_INSURANCE', GROUPS['REF_I12_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('REF_I12_PROCEDURE', GROUPS['REF_I12_PROCEDURE'], (0, -1), 'GRP'),
('REF_I12_OBSERVATION', GROUPS['REF_I12_OBSERVATION'], (0, -1), 'GRP'),
('REF_I12_PATIENT_VISIT', GROUPS['REF_I12_PATIENT_VISIT'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RER_RER': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('RER_RER_DEFINITION', GROUPS['RER_RER_DEFINITION'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RGR_RGR': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('RGR_RGR_DEFINTION', GROUPS['RGR_RGR_DEFINTION'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RGV_O15': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RGV_O15_PATIENT', GROUPS['RGV_O15_PATIENT'], (0, 1), 'GRP'),
('RGV_O15_ORDER', GROUPS['RGV_O15_ORDER'], (1, -1), 'GRP'),)),
'ROR_ROR': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('ROR_ROR_DEFINITION', GROUPS['ROR_ROR_DEFINITION'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RPA_I08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('RF1', SEGMENTS['RF1'], (0, 1), 'SEG'),
('RPA_I08_AUTHORIZATION', GROUPS['RPA_I08_AUTHORIZATION'], (0, 1), 'GRP'),
('RPA_I08_PROVIDER', GROUPS['RPA_I08_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('RPA_I08_INSURANCE', GROUPS['RPA_I08_INSURANCE'], (0, -1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('RPA_I08_PROCEDURE', GROUPS['RPA_I08_PROCEDURE'], (1, -1), 'GRP'),
('RPA_I08_OBSERVATION', GROUPS['RPA_I08_OBSERVATION'], (0, -1), 'GRP'),
('RPA_I08_VISIT', GROUPS['RPA_I08_VISIT'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RPI_I01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('RPI_I01_PROVIDER', GROUPS['RPI_I01_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('RPI_I01_GUARANTOR_INSURANCE', GROUPS['RPI_I01_GUARANTOR_INSURANCE'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RPI_I04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('RPI_I04_PROVIDER', GROUPS['RPI_I04_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('RPI_I04_GUARANTOR_INSURANCE', GROUPS['RPI_I04_GUARANTOR_INSURANCE'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RPL_I02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('RPL_I02_PROVIDER', GROUPS['RPL_I02_PROVIDER'], (1, -1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('DSP', SEGMENTS['DSP'], (0, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RPR_I03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('RPR_I03_PROVIDER', GROUPS['RPR_I03_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RQA_I08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('RF1', SEGMENTS['RF1'], (0, 1), 'SEG'),
('RQA_I08_AUTHORIZATION', GROUPS['RQA_I08_AUTHORIZATION'], (0, 1), 'GRP'),
('RQA_I08_PROVIDER', GROUPS['RQA_I08_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('RQA_I08_GUARANTOR_INSURANCE', GROUPS['RQA_I08_GUARANTOR_INSURANCE'], (0, 1), 'GRP'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('RQA_I08_PROCEDURE', GROUPS['RQA_I08_PROCEDURE'], (0, -1), 'GRP'),
('RQA_I08_OBSERVATION', GROUPS['RQA_I08_OBSERVATION'], (0, -1), 'GRP'),
('RQA_I08_VISIT', GROUPS['RQA_I08_VISIT'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RQC_I05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('RQC_I05_PROVIDER', GROUPS['RQC_I05_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RQI_I01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('RQI_I01_PROVIDER', GROUPS['RQI_I01_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('RQI_I01_GUARANTOR_INSURANCE', GROUPS['RQI_I01_GUARANTOR_INSURANCE'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RQP_I04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('RQP_I04_PROVIDER', GROUPS['RQP_I04_PROVIDER'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RQQ_Q09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('ERQ', SEGMENTS['ERQ'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RRA_O18': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RRA_O18_RESPONSE', GROUPS['RRA_O18_RESPONSE'], (0, 1), 'GRP'),)),
'RRD_O14': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RRD_O14_RESPONSE', GROUPS['RRD_O14_RESPONSE'], (0, 1), 'GRP'),)),
'RRE_O12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RRE_O12_RESPONSE', GROUPS['RRE_O12_RESPONSE'], (0, 1), 'GRP'),)),
'RRG_O16': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('RRG_O16_RESPONSE', GROUPS['RRG_O16_RESPONSE'], (0, 1), 'GRP'),)),
'RRI_I12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (0, 1), 'SEG'),
('RF1', SEGMENTS['RF1'], (0, 1), 'SEG'),
('RRI_I12_AUTHORIZATION_CONTACT', GROUPS['RRI_I12_AUTHORIZATION_CONTACT'], (0, 1), 'GRP'),
('RRI_I12_PROVIDER_CONTACT', GROUPS['RRI_I12_PROVIDER_CONTACT'], (1, -1), 'GRP'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('ACC', SEGMENTS['ACC'], (0, 1), 'SEG'),
('DG1', SEGMENTS['DG1'], (0, -1), 'SEG'),
('DRG', SEGMENTS['DRG'], (0, -1), 'SEG'),
('AL1', SEGMENTS['AL1'], (0, -1), 'SEG'),
('RRI_I12_PROCEDURE', GROUPS['RRI_I12_PROCEDURE'], (0, -1), 'GRP'),
('RRI_I12_OBSERVATION', GROUPS['RRI_I12_OBSERVATION'], (0, -1), 'GRP'),
('RRI_I12_PATIENT_VISIT', GROUPS['RRI_I12_PATIENT_VISIT'], (0, 1), 'GRP'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),)),
'RSP_K11': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_K13': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RSP_K13_ROW_DEFINITION', GROUPS['RSP_K13_ROW_DEFINITION'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_K15': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('DSP', SEGMENTS['DSP'], (0, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_K21': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RSP_K21_QUERY_RESPONSE', GROUPS['RSP_K21_QUERY_RESPONSE'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_K22': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RSP_K22_QUERY_RESPONSE', GROUPS['RSP_K22_QUERY_RESPONSE'], (0, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_K23': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_K24': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_K25': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('RSP_K25_STAFF', GROUPS['RSP_K25_STAFF'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_Z82': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('RSP_Z82_QUERY_RESPONSE', GROUPS['RSP_Z82_QUERY_RESPONSE'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_Z86': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RSP_Z86_QUERY_RESPONSE', GROUPS['RSP_Z86_QUERY_RESPONSE'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RSP_Z88': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('RSP_Z88_QUERY_RESPONSE', GROUPS['RSP_Z88_QUERY_RESPONSE'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (1, 1), 'SEG'),)),
'RSP_Z90': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RCP', SEGMENTS['RCP'], (1, 1), 'SEG'),
('RSP_Z90_QUERY_RESPONSE', GROUPS['RSP_Z90_QUERY_RESPONSE'], (1, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (1, 1), 'SEG'),)),
'RTB_K13': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RTB_K13_ROW_DEFINITION', GROUPS['RTB_K13_ROW_DEFINITION'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RTB_Knn': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RTB_Q13': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RTB_Q13_ROW_DEFINITION', GROUPS['RTB_Q13_ROW_DEFINITION'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'RTB_Z74': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('QPD', SEGMENTS['QPD'], (1, 1), 'SEG'),
('RTB_Z74_ROW_DEFINITION', GROUPS['RTB_Z74_ROW_DEFINITION'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'SIU_S12': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SCH', SEGMENTS['SCH'], (1, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('SIU_S12_PATIENT', GROUPS['SIU_S12_PATIENT'], (0, -1), 'GRP'),
('SIU_S12_RESOURCES', GROUPS['SIU_S12_RESOURCES'], (1, -1), 'GRP'),)),
'SPQ_Q08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SPR', SEGMENTS['SPR'], (1, 1), 'SEG'),
('RDF', SEGMENTS['RDF'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'SQM_S25': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('SQM_S25_REQUEST', GROUPS['SQM_S25_REQUEST'], (0, 1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'SQR_S25': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('SQR_S25_SCHEDULE', GROUPS['SQR_S25_SCHEDULE'], (0, -1), 'GRP'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'SRM_S01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('ARQ', SEGMENTS['ARQ'], (1, 1), 'SEG'),
('APR', SEGMENTS['APR'], (0, 1), 'SEG'),
('NTE', SEGMENTS['NTE'], (0, -1), 'SEG'),
('SRM_S01_PATIENT', GROUPS['SRM_S01_PATIENT'], (0, -1), 'GRP'),
('SRM_S01_RESOURCES', GROUPS['SRM_S01_RESOURCES'], (1, -1), 'GRP'),)),
'SRR_S01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('SRR_S01_SCHEDULE', GROUPS['SRR_S01_SCHEDULE'], (0, 1), 'GRP'),)),
'SSR_U04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('SAC', SEGMENTS['SAC'], (1, -1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'SSU_U03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('SSU_U03_SPECIMEN_CONTAINER', GROUPS['SSU_U03_SPECIMEN_CONTAINER'], (1, -1), 'GRP'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'SUR_P09': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('SUR_P09_FACILITY', GROUPS['SUR_P09_FACILITY'], (1, -1), 'GRP'),)),
'TBR_R08': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('ERR', SEGMENTS['ERR'], (0, 1), 'SEG'),
('QAK', SEGMENTS['QAK'], (1, 1), 'SEG'),
('RDF', SEGMENTS['RDF'], (1, 1), 'SEG'),
('RDT', SEGMENTS['RDT'], (1, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'TCU_U10': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('EQU', SEGMENTS['EQU'], (1, 1), 'SEG'),
('TCC', SEGMENTS['TCC'], (1, -1), 'SEG'),
('ROL', SEGMENTS['ROL'], (0, 1), 'SEG'),)),
'UDM_Q05': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('URD', SEGMENTS['URD'], (1, 1), 'SEG'),
('URS', SEGMENTS['URS'], (0, 1), 'SEG'),
('DSP', SEGMENTS['DSP'], (1, -1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'VQQ_Q07': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('VTQ', SEGMENTS['VTQ'], (1, 1), 'SEG'),
('RDF', SEGMENTS['RDF'], (0, 1), 'SEG'),
('DSC', SEGMENTS['DSC'], (0, 1), 'SEG'),)),
'VXQ_V01': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),)),
'VXR_V03': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('VXR_V03_PATIENT_VISIT', GROUPS['VXR_V03_PATIENT_VISIT'], (0, 1), 'GRP'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('VXR_V03_INSURANCE', GROUPS['VXR_V03_INSURANCE'], (0, -1), 'GRP'),
('VXR_V03_ORDER', GROUPS['VXR_V03_ORDER'], (0, -1), 'GRP'),)),
'VXU_V04': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('PID', SEGMENTS['PID'], (1, 1), 'SEG'),
('PD1', SEGMENTS['PD1'], (0, 1), 'SEG'),
('NK1', SEGMENTS['NK1'], (0, -1), 'SEG'),
('VXU_V04_PATIENT', GROUPS['VXU_V04_PATIENT'], (0, 1), 'GRP'),
('GT1', SEGMENTS['GT1'], (0, -1), 'SEG'),
('VXU_V04_INSURANCE', GROUPS['VXU_V04_INSURANCE'], (0, -1), 'GRP'),
('VXU_V04_ORDER', GROUPS['VXU_V04_ORDER'], (0, -1), 'GRP'),)),
'VXX_V02': ('sequence',
(('MSH', SEGMENTS['MSH'], (1, 1), 'SEG'),
('MSA', SEGMENTS['MSA'], (1, 1), 'SEG'),
('QRD', SEGMENTS['QRD'], (1, 1), 'SEG'),
('QRF', SEGMENTS['QRF'], (0, 1), 'SEG'),
('VXX_V02_PATIENT', GROUPS['VXX_V02_PATIENT'], (1, -1), 'GRP'),)),
}
| 57.825409
| 129
| 0.381103
|
1135f4583fb01a2da58d40c2eef033b615b26b04
| 19,853
|
py
|
Python
|
testplan/common/utils/sockets/fix/server.py
|
ymn1k/testplan
|
b1bde8495c449d75a74a7fe4e7c6501b0476f833
|
[
"Apache-2.0"
] | null | null | null |
testplan/common/utils/sockets/fix/server.py
|
ymn1k/testplan
|
b1bde8495c449d75a74a7fe4e7c6501b0476f833
|
[
"Apache-2.0"
] | null | null | null |
testplan/common/utils/sockets/fix/server.py
|
ymn1k/testplan
|
b1bde8495c449d75a74a7fe4e7c6501b0476f833
|
[
"Apache-2.0"
] | 1
|
2019-09-11T09:13:18.000Z
|
2019-09-11T09:13:18.000Z
|
"""Fix TCP server module."""
import errno
import socket
import select
import threading
from six.moves import queue as Queue
from testplan.common.utils.timing import (TimeoutException,
TimeoutExceptionInfo,
wait)
from testplan.common.utils.sockets.fix.utils import utc_timestamp
class ConnectionDetails(object):
"""
Contains all information required for each connection to the server
"""
def __init__(self, connection, name=None, queue=None,
in_seqno=1, out_seqno=1):
"""
Create a new ConnectionDetails. Only the connection is required
initially, as the rest of the details are set later.
:param connection: The connection
:type connection: ``socket._socketobject``
:param name: Name of connection (tuple of sender and target)
:type name: ``tuple`` of ``str`` and ``str``
:param queue: Queue of receiving messages
:type queue: ``queue``
:param in_seqno: Input messages sequence number
:type in_seqno: ``int``
:param out_seqno: Output messages sequence number
:type out_seqno: ``int``
"""
self.connection = connection
self.name = name
self.queue = queue
self.in_seqno = in_seqno
self.out_seqno = out_seqno
def _has_logon_tag(msg):
"""
Check if it is a logon message.
:param msg: Fix message
:type msg: ``FixMessage``
:return: ``True`` if it is a logon message
:rtype: ``bool``
"""
return msg.tag_exact(35, b'A')
def _is_session_control_msg(msg):
"""
Check if message is logout or heartbeat.
:param msg: Fix message.
:type msg: ``FixMessage``
:return: ``True`` if it is a message with non-business code
:rtype: ``bool``
"""
return (_has_logout_tag(msg) or
_has_heartbeat_tag(msg))
def _has_logout_tag(msg):
"""
Check if logout message.
:param msg: Fix message.
:type msg: ``FixMessage``
:return: True if it is a logout message
:rtype: ``bool``
"""
return msg.tag_exact(35, b'5')
def _has_heartbeat_tag(msg):
"""
Check if heartbeat message.
:param msg: Fix message.
:type msg: ``FixMessage``
:return: True if it is a heartbeat message
:rtype: ``bool``
"""
return msg.tag_exact(35, b'0')
class Server(object):
"""
A server that can send and receive FIX messages over the session protocol.
Supports multiple connections.
The server stamps every outgoing message with the senderCompID and
targetCompID for the corresponding connection.
"""
def __init__(self, msgclass, codec, host='localhost', port=0,
version='FIX.4.2', logger=None):
"""
Create a new FIX server.
This constructor takes parameters that specify the address (host, port)
to connect to. The server stamps every outgoing message with the
senderCompID and targetCompID for the corresponding connection.
:param msgclass: Type used to send and receive FIX messages.
:type msgclass: ``type``
:param codec: A Codec to use to encode and decode FIX messages.
:type codec: a ``Codec`` instance
:param host: hostname or IP address to bind to.
:type host: ``str``
:param port: port number
:type port: ``str`` or ``int``
:param version: FIX version, defaults to "FIX.4.2". This string is used
as the contents of tag 8 (BeginString).
:type version: ``str``
:param logger: Logger instance to be used.
:type logger: ``logging.Logger``
"""
self._input_host = host
self._input_port = port
self._ip = None
self._port = None
self.version = version
self.msgclass = msgclass
self.codec = codec
self.log_callback = logger.debug if logger else lambda msg: None
self._listening = False
self._conndetails_by_fd = {}
self._conndetails_by_name = {}
self._first_sender = None
self._first_target = None
self._socket = None
self._recv_thread = None
self._lock = threading.Lock()
self._pobj = select.poll()
@property
def host(self):
"""Input host provided."""
return self._input_host
@property
def ip(self):
"""IP retrieved from socket."""
return self._ip
@property
def port(self):
"""Port retrieved after binding."""
return self._port
def start(self, timeout=30):
"""
Start the FIX server.
"""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((self._input_host, self._input_port))
self._ip, self._port = self._socket.getsockname()
self.log_callback('Started server on {}:{}'.format(
self.host, self.port))
self._recv_thread = threading.Thread(target=self._listen)
self._recv_thread.daemon = True
self._recv_thread.start()
timeout_info = TimeoutExceptionInfo()
wait(lambda: self._listening, timeout=timeout, interval=0.1)
if not self._listening:
raise TimeoutException(
'Could not start server: timed out on listening. {}'.format(
timeout_info.msg()))
self.log_callback('Listening for socket events.')
def _listen(self):
"""
Listen for new inbound connections and messages from existing
connections.
"""
self._socket.listen(1)
self._listening = True
self._pobj.register(self._socket.fileno(),
select.POLLIN | select.POLLNVAL | select.POLLHUP)
closed = False
while (not closed) and self._listening:
events = self._pobj.poll(1.0)
for fdesc, event in events:
if fdesc == self._socket.fileno():
# Socket event received
if event in [select.POLLNVAL, select.POLLHUP]:
self.log_callback('"Close socket" event received.')
closed = True
break # out of 'for'
elif event == select.POLLIN:
self.log_callback('"New connection" event received.')
self._add_connection()
else:
raise Exception(
'Unexpected event {0} on fdesc {1}.'.format(
event, fdesc))
else:
# Connection event received
self._process_connection_event(fdesc, event)
self._remove_all_connections()
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
def _add_connection(self):
"""
Accept new inbound connection from socket.
"""
connection, _ = self._socket.accept()
conn_details = ConnectionDetails(connection)
self._conndetails_by_fd[connection.fileno()] = conn_details
self._pobj.register(connection.fileno(),
select.POLLIN | select.POLLNVAL | select.POLLHUP)
def _remove_connection(self, fdesc):
"""
Unregister, close and remove inbound connection with given fd.
:param fdesc: File descriptor of connection to be removed.
:type fdesc: ``int``
"""
self._pobj.unregister(fdesc)
try:
self._conndetails_by_fd[fdesc].connection.shutdown(socket.SHUT_RDWR)
except socket.error as serr:
if serr.errno != errno.ENOTCONN:
raise
# Else, client already closed the connection.
self._conndetails_by_fd[fdesc].connection.close()
name = self._conndetails_by_fd[fdesc].name
del self._conndetails_by_fd[fdesc]
if name in self._conndetails_by_name:
del self._conndetails_by_name[name]
def _remove_all_connections(self):
"""
Unregister, close and remove all existing inbound connections.
"""
for fdesc in self._conndetails_by_fd:
self._pobj.unregister(fdesc)
self._conndetails_by_fd[fdesc].connection.shutdown(socket.SHUT_RDWR)
self._conndetails_by_fd[fdesc].connection.close()
del self._conndetails_by_name[self._conndetails_by_fd[fdesc].name]
self._conndetails_by_fd = {}
def _process_connection_event(self, fdesc, event):
"""
Process an event received from a connection.
:param fdesc: File descriptor of the connection the message was
received from.
:type fdesc: ``int``
:param event: Event received from connection.
:type event: ``.int``
"""
connection = self._conndetails_by_fd[fdesc].connection
if event == select.POLLIN:
with self._lock:
data = connection.recv(4096)
if not data:
self.log_callback(
'Closing connection {} since no data available'.format(
self._conndetails_by_fd[fdesc].name))
self._remove_connection(fdesc)
else:
msg = self.msgclass.from_buffer(data, self.codec)
self._process_message(fdesc, msg)
elif event in [select.POLLNVAL, select.POLLHUP]:
self.log_callback(
'Closing connection {} event received'.format(connection.name))
self._remove_connection(fdesc)
else:
raise Exception(
'unexpected event {0} on fdesc {1}'.format(event, fdesc))
def _process_message(self, fdesc, msg):
"""
Process given message received from connection with given fd.
:param fdesc: File descriptor of connection message was received from.
:type fdesc: ``int``
:param msg: Fix message received.
:type msg: ``FixMessage``
"""
conn_name = (msg[56], msg[49])
if _has_logout_tag(msg):
self._no_lock_send(msg, conn_name, fdesc)
self._remove_connection(fdesc)
elif self._conn_loggedon(conn_name):
if _is_session_control_msg(msg):
self.log_callback(
'Session control msg from {}'.format(conn_name))
self._no_lock_send(msg, conn_name)
else:
self.log_callback('Incoming data msg from {}'.format(conn_name))
self._conndetails_by_name[conn_name].in_seqno += 1
self._conndetails_by_name[conn_name].queue.put(msg, True, 1)
elif _has_logon_tag(msg):
self._logon_connection(fdesc, conn_name)
self._no_lock_send(msg, conn_name)
else:
raise Exception(
'Connection {} sent msg before logon'.format(conn_name))
def _conn_loggedon(self, conn_name):
"""
Check if given connection is logged on.
:param conn_name: Connection name.
:type conn_name: ``tuple`` of ``str`` and ``str``
:return: ``True`` if it is a connection has already logged on
:rtype: ``bool``
"""
return conn_name in self._conndetails_by_name
def _logon_connection(self, fdesc, conn_name):
"""
Logon given connection for given file descriptor.
:param fdesc: File descriptor of connection.
:type fdesc: ``int``
:param conn_name: Connection name.
:type conn_name: ``tuple`` of ``str`` and ``str``
"""
conndetails = self._conndetails_by_fd[fdesc]
conndetails.name = conn_name
conndetails.queue = Queue.Queue()
conndetails.in_seqno = 1
conndetails.out_seqno = 1
self._conndetails_by_name[conn_name] = conndetails
if self._first_sender is None:
(self._first_sender, self._first_target) = conn_name
self.log_callback('Logged on connection {}.'.format(conn_name))
def active_connections(self):
"""
Returns a list of currently active connections
:return: List of active connection names (each a tuple of sender and
target)
:rtype: ``list`` of ``tuple`` of ``str`` and ``str``
"""
return [detail.name
for detail in self._conndetails_by_fd.itervalues()
if detail.name is not None]
def is_connection_active(self, conn_name):
"""
Checks whether the given connection is currently active.
:param conn_name: Connection name to be checked if active
:type conn_name: ``tuple`` of ``str`` and ``str``
:return: ``True`` if the given connection is active. ``False`` otherwise
:rtype: ``bool``
"""
return conn_name in self._conndetails_by_name
def stop(self):
"""
Close the connection.
"""
self._listening = False
if self._recv_thread:
self._recv_thread.join()
self.log_callback('Stopped server.')
def _validate_connection_name(self, conn_name):
"""
Check if given connection name is valid.
If this is ``(None, None)``, then the connection defaults to the one
and only existing active connection. If there are more active
connections or the initial connection is no longer valid this will fail.
The tuple of ``(sender, target)`` represents the connection name.
:param sender: Sender id.
:type sender: ``str``
:param target: Target id.
:type target: ``str``
:return: Connection name to send message to.
:rtype: ``tuple`` of ``str`` and ``str``
"""
sender, target = conn_name
if (sender, target) == (None, None):
if len(self._conndetails_by_name) != 1:
raise Exception('Cannot use default connection '
'since more connections active')
(sender, target) = (self._first_sender, self._first_target)
if not self.is_connection_active((sender, target)):
raise Exception('Connection {} not active'.format((sender, target)))
return sender, target
def _add_msg_tags(self, msg, conn_name, fdesc=None):
"""
Add session tags and senderCompID and targetCompID tags to the given
FIX message.
:param msg: Message to be sent.
:type msg: ``FixMessage``
:param sender: Sender id.
:type sender: ``str``
:param target: Target id.
:type target: ``str``
:return: The FIX msg with the tags set.
:rtype: ``FixMessage``
"""
sender, target = conn_name
msg[8] = self.version
if fdesc:
conndetails = self._conndetails_by_fd[fdesc]
else:
conndetails = self._conndetails_by_name[(sender, target)]
msg[34] = conndetails.out_seqno
conndetails.out_seqno += 1
msg[49] = sender
msg[56] = target
msg[52] = getattr(self.codec, 'utc_timestamp', utc_timestamp)()
return msg
def _no_lock_send(self, msg, conn_name, fdesc=None):
"""
Send the given Fix message through the given connection, expecting
the lock is already acquired.
The message will be enriched with session tags and sequence numbers.
:param msg: message to be sent
:type msg: ``FixMessage``
:param sender: Sender id.
:type sender: ``str``
:param target: Target id.
:type target: ``str``
"""
sender, target = conn_name
msg = self._add_msg_tags(msg, (sender, target), fdesc)
self.log_callback('Sending on connection {} message {}'.format(
(sender, target), msg))
if fdesc:
self._conndetails_by_fd[fdesc].connection.send(
msg.to_wire(self.codec))
else:
self._conndetails_by_name[(sender, target)].connection.send(
msg.to_wire(self.codec))
def send(self, msg, conn_name=(None, None)):
"""
Send the given Fix message through the given connection.
The message will be enriched with session tags and sequence numbers.
The connection name - (sender, target) - defaults to (None, None).
In this case, the server will try to find the one and only available
connection. This will fail if there are more connections available or
if the initial connection is no longer active.
:param msg: Message to be sent.
:type msg: ``FixMessage``
:param conn_name: Connection name to send message to. This is the tuple
(sender id, target id)
:type conn_name: ``tuple`` of ``str`` and ``str``
:return: Fix message sent
:rtype: ``FixMessage``
"""
conn_name = self._validate_connection_name(
self._encode_conn_name(conn_name))
with self._lock:
conn_name = self._validate_connection_name(conn_name)
msg = self._add_msg_tags(msg, conn_name)
self.log_callback('Sending on connection {} message {}'.format(
conn_name, msg))
conn_name = self._validate_connection_name(conn_name)
self._conndetails_by_name[conn_name].connection.send(
msg.to_wire(self.codec))
return msg
def receive(self, conn_name=(None, None), timeout=30):
"""
Receive a FIX message from the given connection.
The connection name defaults to ``(None, None)``. In this case,
the server will try to find the one and only available connection.
This will fail if there are more connections available or if the initial
connection is no longer active.
:param conn_name: Connection name to receive message from
:type conn_name: ``tuple`` of ``str`` and ``str``
:param timeout: timeout in seconds
:type timeout: ``int``
:return: Fix message received
:rtype: ``FixMessage``
"""
conn_name = self._validate_connection_name(
self._encode_conn_name(conn_name))
return self._conndetails_by_name[conn_name].queue.get(True, timeout)
def _encode_conn_name(self, conn_name):
return (conn_name[0].encode('utf-8') if conn_name[0] else conn_name[0],
conn_name[1].encode('utf-8') if conn_name[1] else conn_name[1])
def flush(self):
"""
Flush the receive queues.
"""
for conn in self._conndetails_by_name:
self._flush_queue(self._conndetails_by_name[conn].queue)
if self.log_callback:
self.log_callback('Flushed received message queues')
def _flush_queue(self, queue):
"""
Flush the given receive queue.
:param queue: Queue to flush.
:type queue: ``queue``
"""
try:
while True:
queue.get(False)
except Queue.Empty:
return
| 35.771171
| 81
| 0.57825
|
93271d07d72566f84da160de166d6ea6417c4a91
| 7,443
|
py
|
Python
|
nova/context.py
|
bopopescu/nova-28
|
add7f5625ba49c0575328294a796428c443cd988
|
[
"Apache-2.0"
] | null | null | null |
nova/context.py
|
bopopescu/nova-28
|
add7f5625ba49c0575328294a796428c443cd988
|
[
"Apache-2.0"
] | null | null | null |
nova/context.py
|
bopopescu/nova-28
|
add7f5625ba49c0575328294a796428c443cd988
|
[
"Apache-2.0"
] | 1
|
2020-07-24T06:47:54.000Z
|
2020-07-24T06:47:54.000Z
|
# Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of nova."""
import copy
import uuid
import six
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import policy
LOG = logging.getLogger(__name__)
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
user_name=None, project_name=None,
service_catalog=None, instance_lock_checked=False, **kwargs):
""":param read_deleted: 'no' indicates deleted records are hidden,
'yes' indicates deleted records are visible,
'only' indicates that *only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
if kwargs:
LOG.warn(_('Arguments dropped when creating context: %s') %
str(kwargs))
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
self.auth_token = auth_token
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in ('volume',)]
else:
# if list is empty or none
self.service_catalog = []
self.instance_lock_checked = instance_lock_checked
self.user_name = user_name
self.project_name = project_name
self.is_admin = is_admin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
if overwrite or not hasattr(local.store, 'context'):
self.update_store()
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def update_store(self):
local.store.context = self
def to_dict(self):
return {'user_id': self.user_id,
'project_id': self.project_id,
'is_admin': self.is_admin,
'read_deleted': self.read_deleted,
'roles': self.roles,
'remote_address': self.remote_address,
'timestamp': timeutils.strtime(self.timestamp),
'request_id': self.request_id,
'auth_token': self.auth_token,
'user_name': self.user_name,
'service_catalog': self.service_catalog,
'project_name': self.project_name,
'instance_lock_checked': self.instance_lock_checked,
'tenant': self.tenant,
'user': self.user}
@classmethod
def from_dict(cls, values):
values.pop('user', None)
values.pop('tenant', None)
return cls(**values)
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
# NOTE(sirp): the openstack/common version of RequestContext uses
# tenant/user whereas the Nova version uses project_id/user_id. We need
# this shim in order to use context-aware code from openstack/common, like
# logging, until we make the switch to using openstack/common's version of
# RequestContext.
@property
def tenant(self):
return self.project_id
@property
def user(self):
return self.user_id
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def require_admin_context(ctxt):
"""Raise exception.AdminRequired() if context is an admin context."""
if not ctxt.is_admin:
raise exception.AdminRequired()
def require_context(ctxt):
"""Raise exception.Forbidden() if context is not a user or an
admin context.
"""
if not ctxt.is_admin and not is_user_context(ctxt):
raise exception.Forbidden()
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.Forbidden()
elif context.project_id != project_id:
raise exception.Forbidden()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.Forbidden()
elif context.user_id != user_id:
raise exception.Forbidden()
| 34.299539
| 78
| 0.641811
|
9b56ea53ea19099a0cabda141e878f9d9bbc6503
| 30,751
|
py
|
Python
|
pywinauto/controls/uiawrapper.py
|
badari412/pywinauto
|
9380de8ded10956573fb496af4722ccf526fe743
|
[
"BSD-3-Clause"
] | 1
|
2018-12-02T07:17:21.000Z
|
2018-12-02T07:17:21.000Z
|
pywinauto/controls/uiawrapper.py
|
rla006/pywinauto
|
dc855275cb27be796be55b51f37e5a4082a2cbcc
|
[
"BSD-3-Clause"
] | null | null | null |
pywinauto/controls/uiawrapper.py
|
rla006/pywinauto
|
dc855275cb27be796be55b51f37e5a4082a2cbcc
|
[
"BSD-3-Clause"
] | null | null | null |
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Basic wrapping of UI Automation elements"""
from __future__ import unicode_literals
from __future__ import print_function
import six
import time
import warnings
import comtypes
from .. import backend
from ..timings import Timings
from ..base_wrapper import BaseWrapper
from ..base_wrapper import BaseMeta
from ..uia_defines import IUIA
from .. import uia_defines as uia_defs
from ..uia_element_info import UIAElementInfo, elements_from_uia_array
# region PATTERNS
AutomationElement = IUIA().ui_automation_client.IUIAutomationElement
DockPattern = IUIA().ui_automation_client.IUIAutomationDockPattern
ExpandCollapsePattern = IUIA().ui_automation_client.IUIAutomationExpandCollapsePattern
GridItemPattern = IUIA().ui_automation_client.IUIAutomationGridItemPattern
GridPattern = IUIA().ui_automation_client.IUIAutomationGridPattern
InvokePattern = IUIA().ui_automation_client.IUIAutomationInvokePattern
ItemContainerPattern = IUIA().ui_automation_client.IUIAutomationItemContainerPattern
LegacyIAccessiblePattern = IUIA().ui_automation_client.IUIAutomationLegacyIAccessiblePattern
MultipleViewPattern = IUIA().ui_automation_client.IUIAutomationMultipleViewPattern
RangeValuePattern = IUIA().ui_automation_client.IUIAutomationRangeValuePattern
ScrollItemPattern = IUIA().ui_automation_client.IUIAutomationScrollItemPattern
ScrollPattern = IUIA().ui_automation_client.IUIAutomationScrollPattern
SelectionItemPattern = IUIA().ui_automation_client.IUIAutomationSelectionItemPattern
SelectionPattern = IUIA().ui_automation_client.IUIAutomationSelectionPattern
SynchronizedInputPattern = IUIA().ui_automation_client.IUIAutomationSynchronizedInputPattern
TableItemPattern = IUIA().ui_automation_client.IUIAutomationTableItemPattern
TablePattern = IUIA().ui_automation_client.IUIAutomationTablePattern
TextPattern = IUIA().ui_automation_client.IUIAutomationTextPattern
TogglePattern = IUIA().ui_automation_client.IUIAutomationTogglePattern
TransformPattern = IUIA().ui_automation_client.IUIAutomationTransformPattern
ValuePattern = IUIA().ui_automation_client.IUIAutomationValuePattern
VirtualizedItemPattern = IUIA().ui_automation_client.IUIAutomationVirtualizedItemPattern
WindowPattern = IUIA().ui_automation_client.IUIAutomationWindowPattern
# endregion
# =========================================================================
_friendly_classes = {
'Custom': None,
'DataGrid': 'ListView',
'DataItem': 'DataItem',
'Document': None, # TODO: this is RichTextBox
'Group': 'GroupBox',
'Header': None,
'HeaderItem': None,
'Hyperlink': None,
'Image': None,
'List': 'ListBox',
'ListItem': 'ListItem',
'MenuBar': 'Menu',
'Menu': 'Menu',
'MenuItem': 'MenuItem',
'Pane': None,
'ProgressBar': 'Progress',
'ScrollBar': None,
'Separator': None,
'Slider': None,
'Spinner': 'UpDown',
'SplitButton': None,
'Tab': 'TabControl',
'Table': None,
'Text': 'Static',
'Thumb': None,
'TitleBar': None,
'ToolBar': 'Toolbar',
'ToolTip': 'ToolTips',
'Tree': 'TreeView',
'TreeItem': 'TreeItem',
'Window': 'Dialog',
}
# =========================================================================
class LazyProperty(object):
"""
A lazy evaluation of an object attribute.
The property should represent immutable data, as it replaces itself.
Provided by: http://stackoverflow.com/a/6849299/1260742
"""
def __init__(self, fget):
"""Init the property name and method to calculate the property"""
self.fget = fget
self.func_name = fget.__name__
def __get__(self, obj, cls):
"""Replace the property itself on a first access"""
if obj is None:
return None
value = self.fget(obj)
setattr(obj, self.func_name, value)
return value
lazy_property = LazyProperty
# =========================================================================
class UiaMeta(BaseMeta):
"""Metaclass for UiaWrapper objects"""
control_type_to_cls = {}
def __init__(cls, name, bases, attrs):
"""Register the control types"""
BaseMeta.__init__(cls, name, bases, attrs)
for t in cls._control_types:
UiaMeta.control_type_to_cls[t] = cls
@staticmethod
def find_wrapper(element):
"""Find the correct wrapper for this UIA element"""
# Set a general wrapper by default
wrapper_match = UIAWrapper
# Check for a more specific wrapper in the registry
if element.control_type in UiaMeta.control_type_to_cls:
wrapper_match = UiaMeta.control_type_to_cls[element.control_type]
return wrapper_match
# =========================================================================
@six.add_metaclass(UiaMeta)
class UIAWrapper(BaseWrapper):
"""
Default wrapper for User Interface Automation (UIA) controls.
All other UIA wrappers are derived from this.
This class wraps a lot of functionality of underlying UIA features
for working with windows.
Most of the methods apply to every single element type. For example
you can click() on any element.
"""
_control_types = []
# ------------------------------------------------------------
def __new__(cls, element_info):
"""Construct the control wrapper"""
return super(UIAWrapper, cls)._create_wrapper(cls, element_info, UIAWrapper)
# -----------------------------------------------------------
def __init__(self, element_info):
"""
Initialize the control
* **element_info** is either a valid UIAElementInfo or it can be an
instance or subclass of UIAWrapper.
If the handle is not valid then an InvalidWindowHandle error
is raised.
"""
BaseWrapper.__init__(self, element_info, backend.registry.backends['uia'])
# ------------------------------------------------------------
def __hash__(self):
"""Return a unique hash value based on the element's Runtime ID"""
return hash(self.element_info.runtime_id)
# ------------------------------------------------------------
@lazy_property
def iface_expand_collapse(self):
"""Get the element's ExpandCollapse interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "ExpandCollapse")
# ------------------------------------------------------------
@lazy_property
def iface_selection(self):
"""Get the element's Selection interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Selection")
# ------------------------------------------------------------
@lazy_property
def iface_selection_item(self):
"""Get the element's SelectionItem interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "SelectionItem")
# ------------------------------------------------------------
@lazy_property
def iface_invoke(self):
"""Get the element's Invoke interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Invoke")
# ------------------------------------------------------------
@lazy_property
def iface_toggle(self):
"""Get the element's Toggle interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Toggle")
# ------------------------------------------------------------
@lazy_property
def iface_text(self):
"""Get the element's Text interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Text")
# ------------------------------------------------------------
@lazy_property
def iface_value(self):
"""Get the element's Value interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Value")
# ------------------------------------------------------------
@lazy_property
def iface_range_value(self):
"""Get the element's RangeValue interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "RangeValue")
# ------------------------------------------------------------
@lazy_property
def iface_grid(self):
"""Get the element's Grid interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Grid")
# ------------------------------------------------------------
@lazy_property
def iface_grid_item(self):
"""Get the element's GridItem interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "GridItem")
# ------------------------------------------------------------
@lazy_property
def iface_table(self):
"""Get the element's Table interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Table")
# ------------------------------------------------------------
@lazy_property
def iface_table_item(self):
"""Get the element's TableItem interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "TableItem")
# ------------------------------------------------------------
@lazy_property
def iface_scroll_item(self):
"""Get the element's ScrollItem interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "ScrollItem")
# ------------------------------------------------------------
@lazy_property
def iface_scroll(self):
"""Get the element's Scroll interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Scroll")
# ------------------------------------------------------------
@lazy_property
def iface_transform(self):
"""Get the element's Transform interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Transform")
# ------------------------------------------------------------
@lazy_property
def iface_transformV2(self):
"""Get the element's TransformV2 interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "TransformV2")
# ------------------------------------------------------------
@lazy_property
def iface_window(self):
"""Get the element's Window interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "Window")
# ------------------------------------------------------------
@lazy_property
def iface_item_container(self):
"""Get the element's ItemContainer interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "ItemContainer")
# ------------------------------------------------------------
@lazy_property
def iface_virtualized_item(self):
"""Get the element's VirtualizedItem interface pattern"""
elem = self.element_info.element
return uia_defs.get_elem_interface(elem, "VirtualizedItem")
# ------------------------------------------------------------
@property
def writable_props(self):
"""Extend default properties list."""
props = super(UIAWrapper, self).writable_props
props.extend(['is_keyboard_focusable',
'has_keyboard_focus',
'automation_id',
])
return props
# ------------------------------------------------------------
def legacy_properties(self):
"""Get the element's LegacyIAccessible control pattern interface properties"""
elem = self.element_info.element
impl = uia_defs.get_elem_interface(elem, "LegacyIAccessible")
property_name_identifier = 'Current'
interface_properties = [prop for prop in dir(LegacyIAccessiblePattern)
if (isinstance(getattr(LegacyIAccessiblePattern, prop), property)
and property_name_identifier in prop)]
return {prop.replace(property_name_identifier, '') : getattr(impl, prop) for prop in interface_properties}
# ------------------------------------------------------------
def friendly_class_name(self):
"""
Return the friendly class name for the control
This differs from the class of the control in some cases.
class_name() is the actual 'Registered' window class of the control
while friendly_class_name() is hopefully something that will make
more sense to the user.
For example Checkboxes are implemented as Buttons - so the class
of a CheckBox is "Button" - but the friendly class is "CheckBox"
"""
if self.friendlyclassname is None:
if self.element_info.control_type not in IUIA().known_control_types.keys():
self.friendlyclassname = self.element_info.control_type
else:
ctrl_type = self.element_info.control_type
if (ctrl_type not in _friendly_classes) or (_friendly_classes[ctrl_type] is None):
self.friendlyclassname = ctrl_type
else:
self.friendlyclassname = _friendly_classes[ctrl_type]
return self.friendlyclassname
#------------------------------------------------------------
def automation_id(self):
"""Return the Automation ID of the control"""
return self.element_info.automation_id
# -----------------------------------------------------------
def is_keyboard_focusable(self):
"""Return True if the element can be focused with keyboard"""
return self.element_info.element.CurrentIsKeyboardFocusable == 1
# -----------------------------------------------------------
def has_keyboard_focus(self):
"""Return True if the element is focused with keyboard"""
return self.element_info.element.CurrentHasKeyboardFocus == 1
# -----------------------------------------------------------
def set_focus(self):
"""Set the focus to this element"""
try:
if self.is_minimized():
if self.was_maximized():
self.maximize()
else:
self.restore()
except uia_defs.NoPatternInterfaceError:
pass
try:
self.element_info.element.SetFocus()
except comtypes.COMError as exc:
warnings.warn('The window has not been focused due to ' \
'COMError: {}'.format(exc), RuntimeWarning)
return self
# TODO: figure out how to implement .has_focus() method (if no handle available)
# -----------------------------------------------------------
def close(self):
"""
Close the window
Only a control supporting Window pattern should answer.
If it doesn't (menu shadows, tooltips,...), try to send "Esc" key
"""
try:
name = self.element_info.name
control_type = self.element_info.control_type
iface = self.iface_window
iface.Close()
if name and control_type:
self.actions.log("Closed " + control_type.lower() + ' "' + name + '"')
except(uia_defs.NoPatternInterfaceError):
self.type_keys("{ESC}")
# -----------------------------------------------------------
def minimize(self):
"""
Minimize the window
Only controls supporting Window pattern should answer
"""
iface = self.iface_window
if iface.CurrentCanMinimize:
iface.SetWindowVisualState(uia_defs.window_visual_state_minimized)
return self
# -----------------------------------------------------------
def maximize(self):
"""
Maximize the window
Only controls supporting Window pattern should answer
"""
iface = self.iface_window
if iface.CurrentCanMaximize:
iface.SetWindowVisualState(uia_defs.window_visual_state_maximized)
return self
# -----------------------------------------------------------
def restore(self):
"""
Restore the window to normal size
Only controls supporting Window pattern should answer
"""
iface = self.iface_window
iface.SetWindowVisualState(uia_defs.window_visual_state_normal)
return self
# -----------------------------------------------------------
def get_show_state(self):
"""Get the show state and Maximized/minimzed/restored state
Returns values as following
window_visual_state_normal = 0
window_visual_state_maximized = 1
window_visual_state_minimized = 2
"""
iface = self.iface_window
ret = iface.CurrentWindowVisualState
return ret
# -----------------------------------------------------------
def is_minimized(self):
"""Indicate whether the window is minimized or not"""
return self.get_show_state() == uia_defs.window_visual_state_minimized
# -----------------------------------------------------------
def is_maximized(self):
"""Indicate whether the window is maximized or not"""
return self.get_show_state() == uia_defs.window_visual_state_maximized
# -----------------------------------------------------------
def is_normal(self):
"""Indicate whether the window is normal (i.e. not minimized and not maximized)"""
return self.get_show_state() == uia_defs.window_visual_state_normal
# -----------------------------------------------------------
def invoke(self):
"""An interface to the Invoke method of the Invoke control pattern"""
name = self.element_info.name
control_type = self.element_info.control_type
self.iface_invoke.Invoke()
if name and control_type:
self.actions.log("Invoked " + control_type.lower() + ' "' + name + '"')
# Return itself to allow action chaining
return self
# -----------------------------------------------------------
def expand(self):
"""
Displays all child nodes, controls, or content of the control
An interface to Expand method of the ExpandCollapse control pattern.
"""
self.iface_expand_collapse.Expand()
# Return itself to allow action chaining
return self
# -----------------------------------------------------------
def collapse(self):
"""
Displays all child nodes, controls, or content of the control
An interface to Collapse method of the ExpandCollapse control pattern.
"""
self.iface_expand_collapse.Collapse()
# Return itself to allow action chaining
return self
# -----------------------------------------------------------
def get_expand_state(self):
"""
Indicates the state of the control: expanded or collapsed.
An interface to CurrentExpandCollapseState property of the ExpandCollapse control pattern.
Values for enumeration as defined in uia_defines module:
expand_state_collapsed = 0
expand_state_expanded = 1
expand_state_partially = 2
expand_state_leaf_node = 3
"""
return self.iface_expand_collapse.CurrentExpandCollapseState
# -----------------------------------------------------------
def is_expanded(self):
"""Test if the control is expanded"""
state = self.get_expand_state()
return state == uia_defs.expand_state_expanded
# -----------------------------------------------------------
def is_collapsed(self):
"""Test if the control is collapsed"""
state = self.get_expand_state()
return state == uia_defs.expand_state_collapsed
# -----------------------------------------------------------
def get_selection(self):
"""
An interface to GetSelection of the SelectionProvider pattern
Retrieves a UI Automation provider for each child element
that is selected. Builds a list of UIAElementInfo elements
from all retrieved providers.
"""
ptrs_array = self.iface_selection.GetCurrentSelection()
return elements_from_uia_array(ptrs_array)
# -----------------------------------------------------------
def selected_item_index(self):
"""Return the index of a selected item"""
# Go through all children and look for an index
# of an item with the same text.
# Maybe there is another and more efficient way to do it
selection = self.get_selection()
if selection:
for i, c in enumerate(self.children()):
if c.window_text() == selection[0].name:
return i
return None
# -----------------------------------------------------------
def select(self):
"""Select the item
Only items supporting SelectionItem pattern should answer.
Raise NoPatternInterfaceError if the pattern is not supported
Usually applied for controls like: a radio button, a tree view item
or a list item.
"""
self.iface_selection_item.Select()
name = self.element_info.name
control_type = self.element_info.control_type
if name and control_type:
self.actions.log("Selected " + control_type.lower() + ' "' + name + '"')
# Return itself so that action can be chained
return self
# -----------------------------------------------------------
def is_selected(self):
"""Indicate that the item is selected or not.
Only items supporting SelectionItem pattern should answer.
Raise NoPatternInterfaceError if the pattern is not supported
Usually applied for controls like: a radio button, a tree view item,
a list item.
"""
return self.iface_selection_item.CurrentIsSelected
# -----------------------------------------------------------
def children_texts(self):
"""Get texts of the control's children"""
return [c.window_text() for c in self.children()]
# -----------------------------------------------------------
def can_select_multiple(self):
"""
An interface to CanSelectMultiple of the SelectionProvider pattern
Indicates whether the UI Automation provider allows more than one
child element to be selected concurrently.
"""
return self.iface_selection.CurrentCanSelectMultiple
# -----------------------------------------------------------
def is_selection_required(self):
"""
An interface to IsSelectionRequired property of the SelectionProvider pattern.
This property can be dynamic. For example, the initial state of
a control might not have any items selected by default,
meaning that IsSelectionRequired is FALSE. However,
after an item is selected the control must always have
at least one item selected.
"""
return self.iface_selection.CurrentIsSelectionRequired
# -----------------------------------------------------------
def _select(self, item=None):
"""
Find a child item by the name or index and select
The action can be applied for dirrent controls with items:
ComboBox, TreeView, Tab control
"""
if isinstance(item, six.integer_types):
item_index = item
title = None
elif isinstance(item, six.string_types):
item_index = 0
title = item
else:
err_msg = u"unsupported {0} for item {1}".format(type(item), item)
raise ValueError(err_msg)
list_ = self.children(title=title)
if item_index < len(list_):
wrp = list_[item_index]
wrp.iface_selection_item.Select()
else:
raise IndexError("item not found")
# -----------------------------------------------------------
def is_active(self):
"""Whether the window is active or not"""
ae = IUIA().get_focused_element()
focused_wrap = UIAWrapper(UIAElementInfo(ae))
return (focused_wrap.top_level_parent() == self.top_level_parent())
# -----------------------------------------------------------
def is_dialog(self):
"""Return true if the control is a dialog window (WindowPattern interface is available)"""
try:
return self.iface_window is not None
except uia_defs.NoPatternInterfaceError:
return False
# -----------------------------------------------------------
def menu_select(self, path, exact=False, ):
"""Select a menu item specified in the path
The full path syntax is specified in:
:py:meth:`pywinauto.menuwrapper.Menu.get_menu_path`
There are usually at least two menu bars: "System" and "Application"
System menu bar is a standard window menu with items like:
'Restore', 'Move', 'Size', 'Minimize', e.t.c.
This menu bar usually has a "Title Bar" control as a parent.
Application menu bar is often what we look for. In most cases,
its parent is the dialog itself so it should be found among the direct
children of the dialog. Notice that we don't use "Application"
string as a title criteria because it couldn't work on applications
with a non-english localization.
If there is no menu bar has been found we fall back to look up
for Menu control. We try to find the control through all descendants
of the dialog
"""
self.verify_actionable()
cc = self.children(control_type="MenuBar")
if not cc:
cc = self.descendants(control_type="Menu")
if not cc:
raise AttributeError
menu = cc[0]
menu.item_by_path(path, exact).select()
# -----------------------------------------------------------
_scroll_types = {
"left": {
"line": (uia_defs.scroll_small_decrement, uia_defs.scroll_no_amount),
"page": (uia_defs.scroll_large_decrement, uia_defs.scroll_no_amount),
},
"right": {
"line": (uia_defs.scroll_small_increment, uia_defs.scroll_no_amount),
"page": (uia_defs.scroll_large_increment, uia_defs.scroll_no_amount),
},
"up": {
"line": (uia_defs.scroll_no_amount, uia_defs.scroll_small_decrement),
"page": (uia_defs.scroll_no_amount, uia_defs.scroll_large_decrement),
},
"down": {
"line": (uia_defs.scroll_no_amount, uia_defs.scroll_small_increment),
"page": (uia_defs.scroll_no_amount, uia_defs.scroll_large_increment),
},
}
def scroll(self, direction, amount, count=1, retry_interval=Timings.scroll_step_wait):
"""Ask the control to scroll itself
**direction** can be any of "up", "down", "left", "right"
**amount** can be only "line" or "page"
**count** (optional) the number of times to scroll
**retry_interval** (optional) interval between scroll actions
"""
def _raise_attrib_err(details):
control_type = self.element_info.control_type
name = self.element_info.name
msg = "".join([control_type.lower(), ' "', name, '" ', details])
raise AttributeError(msg)
try:
scroll_if = self.iface_scroll
if direction.lower() in ("up", "down"):
if not scroll_if.CurrentVerticallyScrollable:
_raise_attrib_err('is not vertically scrollable')
elif direction.lower() in ("left", "right"):
if not scroll_if.CurrentHorizontallyScrollable:
_raise_attrib_err('is not horizontally scrollable')
h, v = self._scroll_types[direction.lower()][amount.lower()]
# Scroll as often as we have been asked to
for _ in range(count, 0, -1):
scroll_if.Scroll(h, v)
time.sleep(retry_interval)
except uia_defs.NoPatternInterfaceError:
_raise_attrib_err('is not scrollable')
except KeyError:
raise ValueError("""Wrong arguments:
direction can be any of "up", "down", "left", "right"
amount can be only "line" or "page"
""")
return self
backend.register('uia', UIAElementInfo, UIAWrapper)
| 39.073698
| 114
| 0.58242
|
a0125bed70a8a668a1659666b86e81b2a16e795b
| 10,126
|
py
|
Python
|
vendor/noiselabs/smarty-bundle/NoiseLabs/Bundle/SmartyBundle/Resources/doc/conf.py
|
rubberbullet/collab
|
1c55f265466c8dfc6af851e63de45376e2dcd7ef
|
[
"MIT"
] | null | null | null |
vendor/noiselabs/smarty-bundle/NoiseLabs/Bundle/SmartyBundle/Resources/doc/conf.py
|
rubberbullet/collab
|
1c55f265466c8dfc6af851e63de45376e2dcd7ef
|
[
"MIT"
] | null | null | null |
vendor/noiselabs/smarty-bundle/NoiseLabs/Bundle/SmartyBundle/Resources/doc/conf.py
|
rubberbullet/collab
|
1c55f265466c8dfc6af851e63de45376e2dcd7ef
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# SmartyBundle documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 5 02:30:06 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
for localdir in ['_exts', '_themes']:
sys.path.append(os.path.abspath(localdir))
# adding PhpLexer
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
# enable highlighting for PHP code not between ``<?php ... ?>`` by default
lexers['php'] = PhpLexer(startinline=True)
lexers['php-annotations'] = PhpLexer(startinline=True)
# use PHP as the primary domain
primary_domain = 'php'
# set url for API links
api_url = 'http://www.noiselabs.org/projects/smarty-bundle/api/%s'
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0.7'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sensio.sphinx.refinclude', 'sensio.sphinx.configurationblock', 'sensio.sphinx.phpcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SmartyBundle'
copyright = u'2011-2014, Vítor Brandão'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1.0-DEV'
# The full version, including alpha/beta/rc tags.
release = '1.1.0-DEV'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Options: 'sphinx-bootstrap', 'bootstrap', 'nature', 'pyramid'
html_theme = 'sphinx-bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'analytics_code': 'UA-18238226-7',
'github_user': 'noiselabs',
'github_repo': 'SmartyBundle',
'twitter_username': 'noiselabs',
'home_url': 'http://smartybundle.noiselabs.org',
'disqus_shortname': 'noiselabs',
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# Bootstrap:
# (Optional) Logo. Should be exactly 32x32 px to fit the nav. bar.
# Path should be relative to the html_static_path setting (e.g.,
# "_static") in source.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SmartyBundledoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'SmartyBundle.tex', u'SmartyBundle Documentation',
u'Vítor Brandão', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'smartybundle', u'SmartyBundle Documentation',
[u'Vítor Brandão'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SmartyBundle', u'SmartyBundle Documentation',
u'Vítor Brandão', 'SmartyBundle', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'SmartyBundle'
epub_author = u'Vítor Brandão'
epub_publisher = u'Vítor Brandão'
epub_copyright = u'2012, Vítor Brandão'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| 32.248408
| 144
| 0.716966
|
e8a31e3a9d0b4f6187148cc6762bce5a0398707a
| 3,409
|
py
|
Python
|
indextools/console/split.py
|
dnanexus/IndexTools
|
0392b3be92ff50b401290b59e9ca6c7767fa5a96
|
[
"MIT"
] | 15
|
2019-07-17T11:41:36.000Z
|
2021-03-02T09:36:34.000Z
|
indextools/console/split.py
|
dnanexus/IndexTools
|
0392b3be92ff50b401290b59e9ca6c7767fa5a96
|
[
"MIT"
] | 22
|
2019-05-15T20:08:12.000Z
|
2019-10-11T13:33:42.000Z
|
indextools/console/split.py
|
dnanexus/IndexTools
|
0392b3be92ff50b401290b59e9ca6c7767fa5a96
|
[
"MIT"
] | 3
|
2019-06-01T15:58:06.000Z
|
2022-01-21T21:10:01.000Z
|
import enum
from functools import partial
import os
from typing import Optional, Tuple
import autoclick as ac
import pysam
from indextools.bed import BedInterval, iter_bed_interval_groups
from indextools.utils import References, split_path
class FeatureInclusion(enum.Enum):
"""
Enumeration of options for determining which features to include for an
interval.
"""
OVERLAP = lambda start, end, read: (start < read.end and end > read.start)
"""Include any feature that overlaps at least one base of the interval."""
CONTAIN = lambda start, end, read: (read.start >= start and read.end <= end)
"""Include any feature that is fully contained in the interval."""
START = lambda start, end, read: (start <= read.start <= end)
"""Include any feature whose starting position is within the interval."""
END = lambda start, end, read: (start <= read.end <= end)
"""Include any feature whose ending position is within the interval."""
def split(
primary: ac.ReadableFile,
partitions_bed: ac.ReadableFile,
slop: Optional[Tuple[int, ...]] = None,
features: FeatureInclusion = FeatureInclusion.OVERLAP,
name_format: str = "{prefix}.{rank}.{ext}",
output_dir: Optional[ac.WritableDir] = None,
contig_sizes: Optional[ac.ReadableFile] = None,
):
"""
Split a primary file based on partitions in a BED file.
Args:
primary: The primary file to split.
partitions_bed:
slop: Padding to add on each side of each interval. If a single value,
the same value is used for both the left and right side of the
interval; otherwise the left and right slop can be specified separately.
features:
name_format:
output_dir:
contig_sizes: A file with the sizes of all the contigs in the index;
only necessary if the primary file is not specified, or if it does not
have sequence information in its header. This is a two-column tab-delimited
file ({contig_name}\t{contig_size}).
"""
if slop and len(slop) == 1:
slop = slop[0], slop[0]
path, prefix, exts = split_path(primary)
ext = os.extsep.join(exts)
if output_dir is None:
output_dir = path
if contig_sizes:
references = References.from_file(contig_sizes)
else:
references = References.from_bam(primary)
with pysam.AlignmentFile(primary, "rb") as bam:
for rank, ivl_list in enumerate(iter_bed_interval_groups(partitions_bed)):
partition_bam_filename = output_dir / name_format.format(
rank=rank, prefix=prefix, ext=ext, **ivl_list[0].as_dict()
)
with pysam.AlignmentFile(partition_bam_filename, "wb", bam) as out:
def write_ivl_reads(ivl: BedInterval):
contig, start, end = ivl.as_bed3()
if slop:
start = max(start - slop[0], 0)
end = min(end + slop[1], references.get_size(contig))
reads = bam.fetch(contig, start, end)
if features is not FeatureInclusion.OVERLAP:
reads = filter(partial(features.value, start, end), reads)
for read in reads:
out.write(read)
for i in ivl_list:
write_ivl_reads(i)
| 36.655914
| 87
| 0.629803
|
caf43fab7e4a4b6cdaccf3e6a206f8fc5a4f3264
| 222
|
py
|
Python
|
mult.py
|
Carlos123b/X-Serv-Python-Multiplica
|
3119f0794abb7dcbdfc1f56011c23bc44ff966c9
|
[
"Apache-2.0"
] | null | null | null |
mult.py
|
Carlos123b/X-Serv-Python-Multiplica
|
3119f0794abb7dcbdfc1f56011c23bc44ff966c9
|
[
"Apache-2.0"
] | null | null | null |
mult.py
|
Carlos123b/X-Serv-Python-Multiplica
|
3119f0794abb7dcbdfc1f56011c23bc44ff966c9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
for i in range(1,11):
print("Tabla del " + str(i) + ":")
print("---------------------------")
for j in range(1,11):
print(i,"por",j,"es",i*j)
print("\n")
| 22.2
| 40
| 0.418919
|
95221dff0d7c1eeb5778a42a262a2a2e69138570
| 6,955
|
py
|
Python
|
codes/Model_Dataset.py
|
S-rz/THINE
|
cfd16b693adfcaf943e4566ab52b03197b65ba4d
|
[
"MIT"
] | 2
|
2021-09-20T11:48:43.000Z
|
2022-03-20T12:06:39.000Z
|
codes/Model_Dataset.py
|
S-rz/THINE
|
cfd16b693adfcaf943e4566ab52b03197b65ba4d
|
[
"MIT"
] | null | null | null |
codes/Model_Dataset.py
|
S-rz/THINE
|
cfd16b693adfcaf943e4566ab52b03197b65ba4d
|
[
"MIT"
] | 1
|
2021-11-21T14:03:36.000Z
|
2021-11-21T14:03:36.000Z
|
from torch.utils.data import Dataset
import functools
import numpy as np
import sys
import random
class mtne_metapath_dataset(Dataset):
def __init__(self, args, metapath_data, train_edge):
self.args = args
self.metapath_data = metapath_data
self.train_edge = train_edge # [[node, type, node, type, time], ...]
random.shuffle(self.train_edge)
self.data_size = len(self.train_edge)
self.closest_metapath = self.args.closest_metapath
self.neg_sample = self.args.negative_sample
self.neg_method = self.args.negative_sample_method
self.node_type = self.args.node_type
self.node_dim = self.args.node_dim # {node type: , ...}
self.metapath_type = self.args.metapath_type
self.max_time = -sys.maxsize
self.NEG_SAMPLING_POWER = 0.75
self.neg_table_size = int(1e8)
self.metapath_to_node_dict()
def __len__(self):
return self.data_size
def __getitem__(self, item):
s_node = self.train_edge[item][0]
s_type = self.train_edge[item][1]
t_node = self.train_edge[item][2]
t_type = self.train_edge[item][3]
s_t_time = self.train_edge[item][4]
metapath_s = self.choice_metapath(s_node, s_type, t_node, t_type, s_t_time, self.closest_metapath)
negative_s_node = self.fun_negative_sample([s_node, s_type], [t_node, t_type])
negative_s_metapath = {}
for negative_node_temp in negative_s_node:
negative_s_metapath[negative_node_temp[1] + negative_node_temp[0]] = self.choice_metapath(s_node, s_type, negative_node_temp[0],
negative_node_temp[1], s_t_time, self.closest_metapath)
# source_node: [id, type]
# target_node: [id, type]
# metapath_s: [{type:, edge:, node type:, time: }, ...]
# metapath_t: [{type:, edge:, node type:, time: }, ...]
# negative_s_node: [[id, type], ...]
# negative_s_metapath: { type + id: [{type:, edge:, node type:, time: }, ...], ...}
# negative_t_node: [[id, type], ...]
# negative_t_metapath: { id: [{type:, edge:, node type:, time: }, ...], ...}
sample = {
'source_node':[s_node, s_type],
'target_node':[t_node, t_type],
'train_time':s_t_time,
'metapath_s':metapath_s,
'negative_s_node':negative_s_node,
'negative_s_metapath':negative_s_metapath
}
# print(sample)
return sample
def choice_metapath(self, s_node, s_node_type, t_node, t_node_type, time, closest_metapath):
dict_key = s_node_type + s_node
node_metapath = []
output_metapath = []
for id_metapath_temp in self.node_metapath_data[dict_key]:
# print(id_metapath_temp)
if int(id_metapath_temp[2]) <= int(time) and id_metapath_temp[4] == t_node and id_metapath_temp[5] == t_node_type:
node_metapath.append(id_metapath_temp)
if len(node_metapath) > closest_metapath:
node_metapath.sort(key=functools.cmp_to_key(self.cmp))
node_metapath.pop(-1)
for node_metapath_temp in node_metapath:
type_temp = node_metapath_temp[0]
id_temp = node_metapath_temp[1]
for metapath_temp in self.metapath_data[type_temp]:
if metapath_temp['id'] == id_temp:
metapath_temp['type'] = type_temp
output_metapath.append(metapath_temp)
break
return output_metapath # [{type:, edge:, node type:, time:, id:}, ...]
def cmp(self, x, y):
# index 2 : max_time, index 3 : avg_time
if x[2] > y[2]:
return -1
elif x[2] == y[2] and x[3] > y[3]:
return -1
elif x[2] < y[2]:
return 1
elif x[2] == y[2] and x[3] < y[3]:
return 1
else:
return 0
def metapath_to_node_dict(self):
# {node_type + node: [[metapath type, metapath id, max time, avg time, next node, next node type], ...], ...}
self.node_metapath_data = {}
for metapath_type_temp in self.metapath_data.keys(): # {type: [metapath, ...], ...}, metapath = {id: , edge: [], node_type: [],time: []}
for metapath_temp in self.metapath_data[metapath_type_temp]:
max_time, avg_time = self.max_average_time(metapath_temp['time'])
for index in range(len(metapath_temp['edge']) - 1):
dict_key = metapath_temp['node_type'][index] + metapath_temp['edge'][index]
if dict_key not in self.node_metapath_data.keys():
self.node_metapath_data[dict_key] = []
self.node_metapath_data[dict_key].append([metapath_type_temp, metapath_temp['id'], max_time,
avg_time, metapath_temp['edge'][index + 1], metapath_temp['node_type'][index + 1]])
def max_average_time(self, time_list):
max_time = -sys.maxsize
total_time = 0
for time_temp in time_list:
total_time = total_time + int(time_temp)
max_time = max(max_time, int(time_temp))
return str(max_time), str(int(total_time / len(time_list)))
def fun_negative_sample(self, s_node, t_node):
negative_node = []
if self.neg_method:
# metapath
for i in range(self.neg_sample):
node_type_index = np.random.randint(0, len(self.node_type) - 1, 1)
node_type_index = node_type_index[0]
node_id = np.random.randint(1, self.node_dim[np.array(self.node_type)[node_type_index]], 1)
while (
(str(node_id[0]) == s_node[0] and np.array(self.node_type)[node_type_index] == s_node[1])
or(str(node_id[0]) == t_node[0] and np.array(self.node_type)[node_type_index] == t_node[1])
):
node_type_index = np.random.randint(0, len(self.node_type) - 1, 1)
node_type_index = node_type_index[0]
node_id = np.random.randint(1, self.node_dim[np.array(self.node_type)[node_type_index]], 1)
negative_node.append([str(node_id[0]), np.array(self.node_type)[node_type_index]])
# error error error error
else:
# metapath++
node_type = t_node[1]
node_id = np.random.randint(1, self.node_dim[node_type], self.neg_sample)
for node_id_temp in node_id:
negative_node.append([str(node_id_temp) ,node_type])
return negative_node
| 46.366667
| 146
| 0.56995
|
5490570a9bedbb9a0167165e1f9bacbda591f9aa
| 5,569
|
py
|
Python
|
mango/constants.py
|
liqprotocol/mango-explorer
|
c2eecdd80f3e57261e209109af39bc05d34cabe9
|
[
"MIT"
] | null | null | null |
mango/constants.py
|
liqprotocol/mango-explorer
|
c2eecdd80f3e57261e209109af39bc05d34cabe9
|
[
"MIT"
] | null | null | null |
mango/constants.py
|
liqprotocol/mango-explorer
|
c2eecdd80f3e57261e209109af39bc05d34cabe9
|
[
"MIT"
] | null | null | null |
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import decimal
import importlib.metadata
import json
import os.path
import typing
from solana.publickey import PublicKey
# # 🥭 Constants
#
# This file contains some hard-coded values, all kept in one place, as well as the mechanism
# for loading the Mango `ids.json` file.
# ## SYSTEM_PROGRAM_ADDRESS
#
# The Solana system program address is always 11111111111111111111111111111111.
#
SYSTEM_PROGRAM_ADDRESS = PublicKey("11111111111111111111111111111111")
# ## SOL_MINT_ADDRESS
#
# The fake mint address of the SOL token. **Note:** Wrapped SOL has a different mint address - it is So11111111111111111111111111111111111111112.
#
SOL_MINT_ADDRESS = PublicKey("So11111111111111111111111111111111111111111")
# ## SOL_DECIMALS
#
# The number of decimal places used to convert Lamports into SOLs.
#
SOL_DECIMALS = decimal.Decimal(9)
# ## SOL_DECIMAL_DIVISOR decimal
#
# The divisor to use to turn an integer value of SOLs from an account's `balance` into a value with the correct number of decimal places.
#
SOL_DECIMAL_DIVISOR = decimal.Decimal(10 ** SOL_DECIMALS)
# ## NUM_TOKENS
#
# This is currently hard-coded to 3.
#
NUM_TOKENS = 3
# ## NUM_MARKETS
#
# There is one fewer market than tokens.
#
NUM_MARKETS = NUM_TOKENS - 1
# # WARNING_DISCLAIMER_TEXT
#
# This is the warning text that is output on each run of a command.
#
WARNING_DISCLAIMER_TEXT = """
⚠ WARNING ⚠
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
🥭 Mango Markets: https://mango.markets
📄 Documentation: https://docs.mango.markets/
💬 Discord: https://discord.gg/67jySBhxrg
🐦 Twitter: https://twitter.com/mangomarkets
🚧 Github: https://github.com/blockworks-foundation
📧 Email: mailto:hello@blockworks.foundation
"""
# # _build_data_path
#
# The code needs access to some data files, such as the ids.json file that's used in multiple Mango projects. In
# this project all these data files are kept in the /data directory, relative to the project root.
#
# Some situations can make it tricky accessing files in that known location though. (In particular, Nuitka
# compilation to a standalone executable seems to make accessing internal paths with '..' in them impossible.)
#
# This function provides a consistent way to determine the correct data path for use throughout `mango-explorer`.
#
def _build_data_path() -> str:
possibilities: typing.Sequence[str] = ["../data", "data", ".", "../../data", "../../../data"]
attempts: typing.List[str] = []
file_root: str = os.path.dirname(__file__)
for possibility in possibilities:
data_path: str = os.path.normpath(os.path.join(file_root, possibility))
attempts += [data_path]
try:
attempted_ids_path: str = os.path.normpath(os.path.join(data_path, "ids.json"))
with open(attempted_ids_path) as ids_file:
json.load(ids_file)
return data_path
except:
pass
raise Exception(f"Could not determine data path - ids.json not found in: {attempts}")
# # DATA_PATH
#
# This is the path to the data directory that contains (among other things) the ids.json.
#
DATA_PATH: str = _build_data_path()
# ## MangoConstants
#
# Load all Mango Market's constants from its own `ids.json` file (retrieved from [GitHub](https://raw.githubusercontent.com/blockworks-foundation/mango-client-ts/main/src/ids.json).
#
with open(os.path.join(DATA_PATH, "ids.json")) as json_file:
MangoConstants = json.load(json_file)
# # 🥭 PackageVersion class
#
# Runtime details of the current version of mango-explorer.
#
class PackageVersion(typing.NamedTuple):
version: str
last_commit: str
def __str__(self) -> str:
return f"« 𝙿𝚊𝚌𝚔𝚊𝚐𝚎𝚅𝚎𝚛𝚜𝚒𝚘𝚗 {self.version} - '{self.last_commit}' »"
def __repr__(self) -> str:
return f"{self}"
def version() -> PackageVersion:
package_version: str = "Unknown"
try:
package_version = importlib.metadata.version("mango-explorer")
except Exception:
pass
version_filename: str = os.path.join(DATA_PATH, ".version")
last_commit = f"Unknown (no version file found at '{version_filename}')."
if os.path.isfile(version_filename):
with open(version_filename) as version_file:
last_commit = version_file.read().strip()
return PackageVersion(version=package_version, last_commit=last_commit)
| 34.165644
| 460
| 0.726522
|
38488d77669fb7d858e0b56b2c71327f4a2d015f
| 2,277
|
py
|
Python
|
tests/test_scripts/output/genpython/testtypes.py
|
hrshdhgd/linkml
|
6a1138921b59b64ccd6cb80162ab7e074449e274
|
[
"CC0-1.0"
] | null | null | null |
tests/test_scripts/output/genpython/testtypes.py
|
hrshdhgd/linkml
|
6a1138921b59b64ccd6cb80162ab7e074449e274
|
[
"CC0-1.0"
] | null | null | null |
tests/test_scripts/output/genpython/testtypes.py
|
hrshdhgd/linkml
|
6a1138921b59b64ccd6cb80162ab7e074449e274
|
[
"CC0-1.0"
] | null | null | null |
# Auto generated from testtypes.yaml by pythongen.py version: 0.9.0
# Generation date: 2021-12-11T03:15:41
# Schema: types
#
# id: http://example.org/tests/types
# description:
# license:
import dataclasses
import sys
import re
from jsonasobj2 import JsonObj, as_dict
from typing import Optional, List, Union, Dict, ClassVar, Any
from dataclasses import dataclass
from linkml_runtime.linkml_model.meta import EnumDefinition, PermissibleValue, PvFormulaOptions
from linkml_runtime.utils.slot import Slot
from linkml_runtime.utils.metamodelcore import empty_list, empty_dict, bnode
from linkml_runtime.utils.yamlutils import YAMLRoot, extended_str, extended_float, extended_int
from linkml_runtime.utils.dataclass_extensions_376 import dataclasses_init_fn_with_kwargs
from linkml_runtime.utils.formatutils import camelcase, underscore, sfx
from linkml_runtime.utils.enumerations import EnumDefinitionImpl
from rdflib import Namespace, URIRef
from linkml_runtime.utils.curienamespace import CurieNamespace
from linkml_runtime.utils.metamodelcore import Bool
metamodel_version = "1.7.0"
# Overwrite dataclasses _init_fn to add **kwargs in __init__
dataclasses._init_fn = dataclasses_init_fn_with_kwargs
# Namespaces
METATYPE = CurieNamespace('metatype', 'https://w3id.org/linkml/types/')
XSD = CurieNamespace('xsd', 'http://www.w3.org/2001/XMLSchema#')
DEFAULT_ = CurieNamespace('', 'http://example.org/tests/types/')
# Types
class String(str):
type_class_uri = XSD.string
type_class_curie = "xsd:string"
type_name = "string"
type_model_uri = URIRef("http://example.org/tests/types/String")
class Boolean(Bool):
type_class_uri = XSD.boolean
type_class_curie = "xsd:boolean"
type_name = "boolean"
type_model_uri = URIRef("http://example.org/tests/types/Boolean")
class BooleanType(Boolean):
type_class_uri = XSD.boolean
type_class_curie = "xsd:boolean"
type_name = "boolean type"
type_model_uri = URIRef("http://example.org/tests/types/BooleanType")
class StringType(String):
type_class_uri = XSD.string
type_class_curie = "xsd:string"
type_name = "string type"
type_model_uri = URIRef("http://example.org/tests/types/StringType")
# Class references
# Enumerations
# Slots
class slots:
pass
| 28.4625
| 95
| 0.777339
|
75b006859572079aade2c855a67b2cbee5eed653
| 19,284
|
py
|
Python
|
myqtpy/Unregister.py
|
alchenerd/hvhnonc
|
8bd831cb1adadc3a92c274b5b38b6f26ed2ff253
|
[
"MIT"
] | null | null | null |
myqtpy/Unregister.py
|
alchenerd/hvhnonc
|
8bd831cb1adadc3a92c274b5b38b6f26ed2ff253
|
[
"MIT"
] | null | null | null |
myqtpy/Unregister.py
|
alchenerd/hvhnonc
|
8bd831cb1adadc3a92c274b5b38b6f26ed2ff253
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sqlite3
import sys
from PyQt5 import QtCore, QtWidgets
from _unregister_skeleton import Ui_Dialog as UnregisterDialog
from SearchBox import SearchBox
from Filter import Filter
from myconnect import connect
"""
@author: alchenerd (alchenerd@gmail.com)
"""
# These are mine
if __name__ == '__main__':
sys.path.append('../')
else:
sys.path.append('./myqtpy/')
class Unregister(QtWidgets.QDialog, UnregisterDialog):
def __init__(self, dialog):
super(self.__class__, self).__init__(dialog)
self.setupUi(dialog)
self.isEnabled = None
self.iid = None # in_ID
self.unregisterIdIndex = -1
self.unregisgerIdDict = self.get_id_dict('unregister')
self.getPreviousBtn.clicked.connect(self.onclick_prev)
self.getNextBtn.clicked.connect(self.onclick_next)
self.searchBtn.clicked.connect(self.on_searchBtn_clicked)
self.formBtn.clicked.connect(self.on_formBtn_clicked)
self.selectRecordBtn.clicked.connect(self.on_selectRecordBtn_clicked)
self.saveBtn.clicked.connect(self.on_saveBtn_clicked)
self.deleteBtn.clicked.connect(self.on_deleteBtn_clicked)
self.unregister_amount.textEdited.connect(self.amount_edit)
self.clear_all_fields()
self.disable_all_fields()
def on_saveBtn_clicked(self):
""" Callback function when saveBtn is clicked. """
print('on_saveBtn_clicked')
# check if valid field
if not self.check_user_input():
return
# check if valid unregisterIdIndex
if self.unregisterIdIndex in range(len(self.unregisgerIdDict)):
# valid id, ask user save as new or writeover
choice = self.ask_new_or_writeover()
if choice == 'new':
self.save_as_new()
elif choice == 'write_over':
self.write_over()
elif choice == 'cancel':
return
else:
# invalid id, check if new record(editable):
if self.isEnabled == True:
if self.ask_confirm():
self.save_as_new()
# update unregisgerIdDict
self.unregisgerIdDict = self.get_id_dict('unregister')
def ask_confirm(self) -> bool:
""" Asks user 'Are you sure?'. """
mb = QtWidgets.QMessageBox()
mb.setIcon(QtWidgets.QMessageBox.Question)
mb.setWindowTitle(u'確定?')
mb.setText(u'確定要存入一筆新資料嗎?')
mb.addButton(u'取消', QtWidgets.QMessageBox.RejectRole)
mb.addButton(u'確定', QtWidgets.QMessageBox.AcceptRole)
return mb.exec_()
def ask_new_or_writeover(self) -> str:
""" Asks user if save as new or writeover with a messagebox.
Returns
-------
str
'cancel', 'new' or 'writeover', which indicates the user's choice.
"""
mb = QtWidgets.QMessageBox()
mb.setWindowTitle('存入資料')
mb.setText('請選擇存入本筆資料的方式:')
mb.addButton('取消', QtWidgets.QMessageBox.RejectRole)
mb.addButton('新增一筆', QtWidgets.QMessageBox.YesRole)
mb.addButton('覆蓋本筆', QtWidgets.QMessageBox.NoRole)
res = mb.exec_()
if res == 0:
return 'cancel'
elif res == 1:
return 'new'
elif res == 2:
return 'write_over'
def check_user_input(self) -> bool:
""" Checks if the user input of unregister form is valid.
Returns
-------
bool
The user input's validity.
In case of returning False, a messagebox will popup and
inform user where went wrong.
"""
print('check_user_input')
if not self.isEnabled:
return False
# We check amount > 0 only for now.
if int(self.unregister_amount.text()) <= 0:
QtWidgets.QMessageBox.warning(
self, u'錯誤', u'除帳數量不可小於等於0!')
return False
return True
def update_cache(self):
""" Update the user input into cache. """
con, cur = connect._get_connection()
sqlstr = ('insert or ignore into hvhnonc_cache ({columns}) '
'values ({questionmarks})')
d = {}
columns = ['this_ID', 'this_value', 'change_ID', 'change_value']
d['columns'] = (', '.join(columns))
d['questionmarks'] = ('?, ' * len(columns))[:-2]
widgetsToUpdate = ['reason', 'unregister_place']
for w in widgetsToUpdate:
params = ['0', '', connect.get_field_id(w),
getattr(self, w).currentText()]
cur.execute(sqlstr.format(**d), params)
con.commit()
con.close()
def save_as_new(self):
""" Save users input to database as a new row via sqlite."""
print('save_as_new')
con, cur = connect._get_connection()
sqlstr = ('insert into hvhnonc_out({fields}) values({questionmarks})')
d = {}
d['fields'] = ('in_ID, unregister_date, amount, reason, '
'unregister_place, unregister_remark')
params = [str(self.iid), self.unregister_date.date().toPyDate(),
self.unregister_amount.text(), self.reason.currentText(),
self.unregister_place.currentText(),
self.unregister_remark.text()]
d['questionmarks'] = ('?, ' * len(params))[:-2]
cur.execute(sqlstr.format(**d), params)
con.commit()
con.close()
self.update_cache()
QtWidgets.QMessageBox.information(self, '成功', '已存入一筆資料')
def write_over(self):
""" Write over old record with the user input via sqlite.
The old record is located with
self.unregisgerIdDict[self.unregisterIdIndex])."""
print('write_over')
con, cur = connect._get_connection()
sqlstr = ('update hvhnonc_out set {settings} where ID=?')
d = {}
d['settings'] = ''
fields = ['in_ID', 'unregister_date', 'amount', 'reason',
'unregister_place', 'unregister_remark']
params = [str(self.iid), self.unregister_date.date().toPyDate(),
self.unregister_amount.text(), self.reason.currentText(),
self.unregister_place.currentText(),
self.unregister_remark.text()]
for f in fields:
d['settings'] += '{} = ?, '.format(f)
d['settings'] = d['settings'][:-2]
params += (str(self.unregisgerIdDict[self.unregisterIdIndex]),)
cur.execute(sqlstr.format(**d), params)
con.commit()
con.close()
self.update_cache()
QtWidgets.QMessageBox.information(self, '成功', '已覆蓋一筆資料')
def on_deleteBtn_clicked(self):
if not self.isEnabled:
return
if self.unregisterIdIndex < 0:
return
print('on_deleteBtn_clicked')
mb = QtWidgets.QMessageBox()
mb.setWindowTitle('確認刪除')
mb.setText('確定刪除這筆資料?')
mb.addButton('取消', QtWidgets.QMessageBox.RejectRole)
mb.addButton('確認', QtWidgets.QMessageBox.AcceptRole)
ret = mb.exec_()
if ret == 0:
return
con, cur = connect._get_connection()
sqlstr = ('delete from hvhnonc_out where ID = ?')
params = (str(self.unregisgerIdDict[self.unregisterIdIndex]),)
cur.execute(sqlstr, params)
con.commit()
con.close()
QtWidgets.QMessageBox.information(self, '已刪除', '已刪除一筆資料')
self.clear_all_fields()
def on_selectRecordBtn_clicked(self):
# open a search box
self.sb = QtWidgets.QDialog()
Filter(self.sb, mode='both')
returnID = self.sb.exec_()
if returnID == 0:
return
self.unregisterIdIndex = -1
self.update_field_by_id(returnID)
def on_formBtn_clicked(self):
# open a search box
self.sb = QtWidgets.QDialog()
Filter(self.sb, mode='out')
returnID = self.sb.exec_()
if returnID == 0:
return
self.unregisterIdIndex = -1
self.update_field_by_id(returnID)
def on_searchBtn_clicked(self):
# open a search box
self.sb = QtWidgets.QDialog()
SearchBox(self.sb, 'both')
# self.sb.exec_() returns a hvhnonc_in ID or a negative hvhnonc_out ID
returnID = self.sb.exec_()
if returnID == 0:
return
self.unregisterIdIndex = -1
self.update_field_by_id(returnID)
def update_field_by_id(self, returnID: int):
if returnID < 0:
oid = -returnID
for k, id in self.unregisgerIdDict.items():
if id == oid:
self.unregisterIdIndex = k
iid = self.get_inID(oid)
else:
# return id has no unregister record
oid = -1
iid = returnID
self.unregisterIdIndex = -1
self.iid = iid
self.load_inRecord(iid)
self.load_history_record(iid)
self.load_outRecord(oid)
self.enable_some_fields()
self.load_cache()
self.amount_initialize()
def load_cache(self):
# enabled comboboxes:
enabledComboboxes = ['reason', 'unregister_place']
con, cur = connect._get_connection()
sqlstr = ('select change_value '
'from hvhnonc_cache '
'where this_ID = 0 '
'and this_value = "" '
'and change_ID = ?;')
for cb in enabledComboboxes:
params = (connect.get_field_id(cb),)
cur.execute(sqlstr, params)
rows = cur.fetchall()
for row in rows:
getattr(self, cb).addItem(row[0])
getattr(self, cb).setCurrentText('')
con.close()
def amount_initialize(self):
if self.unregister_amount.text() in (None, ''):
return
# get total amount
totalAmount = int(self.amount.text())
# get unregistered amount
try:
unregisteredAmount = int(self.unregistered_amount.text())
except ValueError:
unregisteredAmount = 0
# calculate remain
remain = totalAmount - unregisteredAmount
self.remain_amount.setText(str(remain))
def amount_edit(self):
if self.unregister_amount.text() in (None, ''):
return
# get total amount
totalAmount = int(self.amount.text())
# get unregistered amount
if self.unregisterIdIndex == -1:
unregisteredAmount = 0
else:
unregisteredAmount = int(self.unregistered_amount.text())
# get editingAmount
try:
editingAmount = int(self.unregister_amount.text())
except ValueError:
QtWidgets.QMessageBox.warning(self, '錯誤', '數量需為正整數!')
self.unregister_amount.setText(
str(totalAmount - unregisteredAmount))
return
# calculate remain
remain = totalAmount - unregisteredAmount - editingAmount
if remain < 0:
QtWidgets.QMessageBox.warning(self, '錯誤', '剩餘數量不可小於0!')
self.unregister_amount.setText(
str(totalAmount - unregisteredAmount))
else:
self.remain_amount.setText(str(remain))
# enable some fields for user editing
def enable_some_fields(self):
self.isEnabled = True
fieldsToEnable = ['unregister_date', 'unregister_amount', 'reason',
'unregister_place', 'unregister_remark']
fieldsToEnable = [getattr(self, x) for x in fieldsToEnable]
for field in fieldsToEnable:
field.setEnabled(True)
def get_id_dict(self, fromWhere: str):
where = ''
if fromWhere == 'register':
where = 'hvhnonc_in'
dateField = 'acquire_date'
if fromWhere == 'unregister':
where = 'hvhnonc_out'
dateField = 'unregister_date'
con, cursor = connect._get_connection()
sqlstr = ('select ID from {0} order by {1};'.format(where, dateField))
cursor.execute(sqlstr)
rows = cursor.fetchall()
con.close()
return {i: row[0] for i, row in enumerate(rows)}
def get_inID(self, oid: int) -> int:
con, cursor = connect._get_connection()
sqlstr = ('select in_ID from hvhnonc_out where ID = ?')
params = (oid,)
cursor.execute(sqlstr, params)
row = cursor.fetchone()
con.close()
if row:
return row[0]
else:
return -1
def onclick_next(self):
if not len(self.unregisgerIdDict):
return
if self.unregisterIdIndex == -1:
self.unregisterIdIndex = 0
elif self.unregisterIdIndex == len(self.unregisgerIdDict) - 1:
QtWidgets.QMessageBox.warning(self, u'到底了', u'已到達最末筆')
else:
self.unregisterIdIndex += 1
oid = self.unregisgerIdDict[self.unregisterIdIndex] # outID
self.update_field_by_id(-oid) # negative value for unregister record
def onclick_prev(self):
if not len(self.unregisgerIdDict):
return
if self.unregisterIdIndex == -1:
self.unregisterIdIndex = len(self.unregisgerIdDict) - 1
elif self.unregisterIdIndex == 0:
QtWidgets.QMessageBox.warning(self, u'到頂了', u'已到達第一筆')
else:
self.unregisterIdIndex -= 1
oid = self.unregisgerIdDict[self.unregisterIdIndex] # outID
self.update_field_by_id(-oid) # negative value for unregister record
def clear_history_record(self):
widgetsToClear = ('last_unregister_date', 'unregistered_amount',
'unregistered_count')
widgetsToClear = [self.__dict__.get(x) for x in widgetsToClear]
for widget in widgetsToClear:
if isinstance(widget, QtWidgets.QLineEdit):
widget.clear()
elif isinstance(widget, QtWidgets.QComboBox):
widget.clearEditText()
elif isinstance(widget, QtWidgets.QDateEdit):
(y, m, d) = (1800, 1, 1)
date = QtCore.QDate(y, m, d)
widget.setDate(date)
def load_history_record(self, iid: int):
con, cursor = connect._get_connection()
con.row_factory = sqlite3.Row
cursor = con.cursor()
sqlstr = ('select '
'max(unregister_date) as last_unregister_date, '
'count(*) as unregistered_count, '
'sum(amount) as unregistered_amount '
'from hvhnonc_out '
'where in_ID=?'
'limit 1;')
params = (iid,)
cursor.execute(sqlstr, params)
row = cursor.fetchone()
if not row or None in row:
self.clear_history_record()
return
con.close()
for k in row.keys():
try:
w = getattr(self, k)
except:
continue
if isinstance(w, QtWidgets.QLineEdit):
w.setText(str(row[k]))
elif isinstance(w, QtWidgets.QComboBox):
w.setEditText(str(row[k]))
elif isinstance(w, QtWidgets.QDateEdit):
(y, m, d) = map(int, row[k].split('-'))
date = QtCore.QDate(y, m, d)
w.setDate(date)
def load_inRecord(self, iid: int):
con, cursor = connect._get_connection()
con.row_factory = sqlite3.Row
cursor = con.cursor()
sqlstr = ('select * from hvhnonc_in where ID = ?')
params = (iid,)
cursor.execute(sqlstr, params)
row = cursor.fetchone()
con.close()
for k in row.keys():
try:
w = getattr(self, k)
except:
continue
if isinstance(w, QtWidgets.QLineEdit):
w.setText(str(row[k]))
elif isinstance(w, QtWidgets.QComboBox):
w.setEditText(str(row[k]))
elif isinstance(w, QtWidgets.QDateEdit):
(y, m, d) = map(int, row[k].split('-'))
date = QtCore.QDate(y, m, d)
w.setDate(date)
def clear_out_fields(self):
widgetsToClear = ('unregister_date', 'unregister_amount', 'reason',
'unregister_place', 'unregister_remark')
widgetsToClear = [self.__dict__.get(x) for x in widgetsToClear]
for widget in widgetsToClear:
if isinstance(widget, QtWidgets.QLineEdit):
widget.clear()
elif isinstance(widget, QtWidgets.QComboBox):
widget.clearEditText()
elif isinstance(widget, QtWidgets.QDateEdit):
date = QtCore.QDate()
date = QtCore.QDate.currentDate()
widget.setDate(date)
self.unregister_amount.setText('0')
def load_outRecord(self, oid: int):
if oid == -1:
self.clear_out_fields()
return
con, cursor = connect._get_connection()
con.row_factory = sqlite3.Row
cursor = con.cursor()
sqlstr = ('select * from hvhnonc_out where ID = ?')
params = (oid,)
cursor.execute(sqlstr, params)
row = cursor.fetchone()
con.close()
for k in row.keys():
if k == 'amount':
w = getattr(self, 'unregister_amount')
else:
try:
w = getattr(self, k)
except:
continue
if isinstance(w, QtWidgets.QLineEdit):
w.setText(str(row[k]))
elif isinstance(w, QtWidgets.QComboBox):
w.setEditText(str(row[k]))
elif isinstance(w, QtWidgets.QDateEdit):
(y, m, d) = map(int, row[k].split('-'))
date = QtCore.QDate(y, m, d)
w.setDate(date)
def clear_all_fields(self):
widgetsToClear = {k: i for k, i in self.__dict__.items() if (
isinstance(i, QtWidgets.QComboBox) or
isinstance(i, QtWidgets.QLineEdit) or
isinstance(i, QtWidgets.QDateEdit))}
for i in widgetsToClear.values():
if isinstance(i, QtWidgets.QComboBox):
i.clearEditText()
i.clear()
elif isinstance(i, QtWidgets.QLineEdit):
i.clear()
elif isinstance(i, QtWidgets.QDateEdit):
date = QtCore.QDate()
date = QtCore.QDate.currentDate()
i.setDate(date)
def disable_all_fields(self):
self.isEnabled = False
self.idIndex = -1
widgetsToDisable = {k: i for k, i in self.__dict__.items() if (
isinstance(i, QtWidgets.QComboBox) or
isinstance(i, QtWidgets.QLineEdit) or
isinstance(i, QtWidgets.QDateEdit))}
for w in widgetsToDisable.values():
w.setEnabled(False)
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QDialog()
ui = Unregister(dialog)
dialog.show()
sys.exit(app.exec_())
| 37.156069
| 78
| 0.567932
|
ce6b4b5bb906932a8cc9e5393ecafc79ed352104
| 3,978
|
py
|
Python
|
google/ads/googleads/v7/services/services/ad_group_service/transports/base.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 285
|
2018-10-05T16:47:58.000Z
|
2022-03-31T00:58:39.000Z
|
google/ads/googleads/v7/services/services/ad_group_service/transports/base.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 425
|
2018-09-10T13:32:41.000Z
|
2022-03-31T14:50:05.000Z
|
google/ads/googleads/v7/services/services/ad_group_service/transports/base.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 369
|
2018-11-28T07:01:00.000Z
|
2022-03-28T09:53:22.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v7.resources.types import ad_group
from google.ads.googleads.v7.services.types import ad_group_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AdGroupServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AdGroupService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_ad_group: gapic_v1.method.wrap_method(
self.get_ad_group,
default_timeout=None,
client_info=client_info,
),
self.mutate_ad_groups: gapic_v1.method.wrap_method(
self.mutate_ad_groups,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_ad_group(
self,
) -> typing.Callable[
[ad_group_service.GetAdGroupRequest], ad_group.AdGroup
]:
raise NotImplementedError
@property
def mutate_ad_groups(
self,
) -> typing.Callable[
[ad_group_service.MutateAdGroupsRequest],
ad_group_service.MutateAdGroupsResponse,
]:
raise NotImplementedError
__all__ = ("AdGroupServiceTransport",)
| 34.894737
| 78
| 0.66365
|
32651ec8835b64ec7be984f54a353bb45be83eee
| 829
|
py
|
Python
|
tests/test_analysis.py
|
csala/Orion
|
12e31541ad1be11f9559b5cab1435af72a096a08
|
[
"MIT"
] | null | null | null |
tests/test_analysis.py
|
csala/Orion
|
12e31541ad1be11f9559b5cab1435af72a096a08
|
[
"MIT"
] | null | null | null |
tests/test_analysis.py
|
csala/Orion
|
12e31541ad1be11f9559b5cab1435af72a096a08
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from orion import analysis
def test__build_events_df_empty():
events = np.array([], dtype=np.float64)
returned = analysis._build_events_df(events)
assert returned.empty
assert list(returned.columns) == ['start', 'end', 'score']
def test__build_events_df_events():
events = np.array([
[1.22297040e+09, 1.22299200e+09, 5.72643599e-01],
[1.22301360e+09, 1.22303520e+09, 5.72643599e-01],
[1.22351040e+09, 1.22353200e+09, 5.72643599e-01],
])
returned = analysis._build_events_df(events)
expected = pd.DataFrame({
'start': [1222970400, 1223013600, 1223510400],
'end': [1222992000, 1223035200, 1223532000],
'score': [0.572644, 0.572644, 0.572644]
})
pd.testing.assert_frame_equal(returned, expected)
| 26.741935
| 62
| 0.665862
|
9652ea1a9d27fd061424ec7a9c790c63b685ef47
| 4,574
|
py
|
Python
|
reactorch/solution.py
|
erwanp/reactorch
|
439cb2d45629f978476aff936ebe1f0a66f85fe9
|
[
"MIT"
] | 25
|
2020-05-12T14:05:15.000Z
|
2022-03-04T05:23:01.000Z
|
reactorch/solution.py
|
erwanp/reactorch
|
439cb2d45629f978476aff936ebe1f0a66f85fe9
|
[
"MIT"
] | 21
|
2020-05-06T21:32:33.000Z
|
2021-06-23T14:02:27.000Z
|
reactorch/solution.py
|
erwanp/reactorch
|
439cb2d45629f978476aff936ebe1f0a66f85fe9
|
[
"MIT"
] | 19
|
2020-05-12T14:05:21.000Z
|
2022-03-05T05:31:58.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Weiqi Ji"
__copyright__ = "Copyright 2020, DENG"
__version__ = "0.1"
__email__ = "weiqiji@mit.edu"
__status__ = "Development"
import cantera as ct
import torch
from ruamel.yaml import YAML
from torch import nn
torch.set_default_tensor_type("torch.DoubleTensor")
class Solution(nn.Module):
from .import_kinetics import set_nasa
from .import_kinetics import set_reactions
from .import_kinetics import set_transport
from .kinetics import forward_rate_constants_func
from .kinetics import forward_rate_constants_func_vec
from .kinetics import equilibrium_constants_func
from .kinetics import reverse_rate_constants_func
from .kinetics import wdot_func
from .kinetics import Ydot_func
from .kinetics import Xdot_func
from .kinetics import Tdot_func
from .kinetics import TXdot_func
from .kinetics import TYdot_func
from .thermo import cp_mole_func
from .thermo import cp_mass_func
from .thermo import enthalpy_mole_func
from .thermo import enthalpy_mass_func
from .thermo import entropy_mole_func
from .thermo import entropy_mass_func
from .transport import update_transport
from .transport import viscosities_func
from .transport import thermal_conductivity_func
from .transport import binary_diff_coeffs_func
from .magic_function import C2X, Y2X, Y2C, X2C, X2Y
def __init__(self, mech_yaml=None, device=None, vectorize=False,
is_clip=True, is_norm=True, is_wdot_vec=True):
super(Solution, self).__init__()
if device is None:
self.device = torch.device('cpu')
else:
self.device = device
# whether the computation of reaction rate of type 4 will be vectorized
self.vectorize = vectorize
self.is_clip = is_clip
self.is_norm = is_norm
self.is_wdot_vec = is_wdot_vec
self.gas = ct.Solution(mech_yaml)
self.R = ct.gas_constant
self.one_atm = torch.Tensor([ct.one_atm]).to(self.device)
self.n_species = self.gas.n_species
self.n_reactions = self.gas.n_reactions
self.uq_A = nn.Parameter(torch.Tensor(self.n_reactions).fill_(1.0).to(self.device))
self.molecular_weights = torch.Tensor([self.gas.molecular_weights]).T.to(self.device)
with open(mech_yaml, 'r') as stream:
yaml = YAML()
model_yaml = yaml.load(stream)
self.model_yaml = model_yaml
self.set_nasa()
self.set_reactions()
def set_pressure(self, P):
self.P_ref = torch.Tensor([P]).to(self.device)
def set_states(self, TPY, eval_rate=True):
self.T = torch.clamp(TPY[:, 0:1], min=200, max=None)
self.logT = torch.log(self.T)
if TPY.shape[1] == self.n_species + 2:
self.P = TPY[:, 1:2]
if self.is_clip:
self.Y = torch.clamp(TPY[:, 2:], min=0, max=None)
else:
self.Y = TPY[:, 2:]
if TPY.shape[1] == self.n_species + 1:
self.P = torch.ones_like(self.T) * self.P_ref
if self.is_clip:
self.Y = torch.clamp(TPY[:, 1:], min=0.0, max=None)
else:
self.Y = TPY[:, 1:]
if self.is_norm:
self.Y = (self.Y.T / self.Y.sum(dim=1)).T
self.mean_molecular_weight = 1 / torch.mm(self.Y, 1 / self.molecular_weights)
self.density_mass = self.P / self.R / self.T * self.mean_molecular_weight
self.Y2X()
self.Y2C()
self.cp_mole_func()
self.cp_mass_func()
self.enthalpy_mole_func()
self.enthalpy_mass_func()
self.entropy_mole_func()
self.entropy_mass_func()
if eval_rate:
# concentration of M in three-body reaction (type 2)
self.C_M = torch.mm(self.C, self.efficiencies_coeffs)
self.identity_mat = torch.ones_like(self.C_M)
# for batch computation
self.C_M2 = (self.C_M * self.is_three_body +
self.identity_mat * (1 - self.is_three_body))
if self.vectorize:
# for reaction of type 4
self.C_M_type4 = torch.mm(self.C, self.efficiencies_coeffs_type4)
self.forward_rate_constants_func_vec()
else:
self.forward_rate_constants_func()
self.equilibrium_constants_func()
self.reverse_rate_constants_func()
self.wdot_func()
| 29.320513
| 93
| 0.631613
|
e99ec2d320cdf89c98bae6f69643e3f643a31b10
| 6,119
|
py
|
Python
|
ax/utils/common/logger.py
|
Balandat/Ax
|
6c7556165291a5329744b5075d5f95d2dec18938
|
[
"MIT"
] | null | null | null |
ax/utils/common/logger.py
|
Balandat/Ax
|
6c7556165291a5329744b5075d5f95d2dec18938
|
[
"MIT"
] | null | null | null |
ax/utils/common/logger.py
|
Balandat/Ax
|
6c7556165291a5329744b5075d5f95d2dec18938
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import logging
import os
from functools import wraps
from typing import Any, Callable, TypeVar, Iterable
AX_ROOT_LOGGER_NAME = "ax"
DEFAULT_LOG_LEVEL: int = logging.INFO
T = TypeVar("T")
class AxOutputNameFilter(logging.Filter):
"""This is a filter which sets the record's output_name, if
not configured
"""
def filter(self, record: logging.LogRecord) -> bool:
if not hasattr(record, "output_name"):
# pyre-ignore[16]: Record supports arbitrary attributes
record.output_name = record.name
return True
def get_logger(name: str) -> logging.Logger:
"""Get an Axlogger.
To set a human-readable "output_name" that appears in logger outputs,
add `{"output_name": "[MY_OUTPUT_NAME]"}` to the logger's contextual
information. By default, we use the logger's `name`
NOTE: To change the log level on particular outputs (e.g. STDERR logs),
set the proper log level on the relevant handler, instead of the logger
e.g. logger.handers[0].setLevel(INFO)
Args:
name: The name of the logger.
Returns:
The logging.Logger object.
"""
logger = logging.getLogger(name)
logger.addFilter(AxOutputNameFilter())
return logger
# pyre-fixme[24]: Generic type `logging.StreamHandler` expects 1 type parameter.
def build_stream_handler(level: int = DEFAULT_LOG_LEVEL) -> logging.StreamHandler:
"""Build the default stream handler used for most Ax logging. Sets
default level to INFO, instead of WARNING.
Args:
level: The log level. By default, sets level to INFO
Returns:
A logging.StreamHandler instance
"""
console = logging.StreamHandler()
console.setLevel(level=level)
formatter = _build_stream_formatter()
console.setFormatter(formatter)
return console
def build_file_handler(
filepath: str,
level: int = DEFAULT_LOG_LEVEL
# pyre-fixme[24]: Generic type `logging.StreamHandler` expects 1 type parameter.
) -> logging.StreamHandler:
"""Build a file handle that logs entries to the given file, using the
same formatting as the stream handler.
Args:
filepath: Location of the file to log output to. If the file exists, output
will be appended. If it does not exist, a new file will be created.
level: The log level. By default, sets level to INFO
Returns:
A logging.FileHandler instance
"""
if os.path.isfile(filepath):
get_logger(__name__).warning(
f"Log file ({filepath}) already exists, appending logs."
)
logfile = logging.FileHandler(filepath)
logfile.setLevel(level=level)
formatter = _build_stream_formatter()
logfile.setFormatter(formatter)
return logfile
def _build_stream_formatter() -> logging.Formatter:
"""Default formatter for log messages. Add timestamps to log messages."""
return logging.Formatter(
fmt="[%(levelname)s %(asctime)s] %(output_name)s: %(message)s",
datefmt="%m-%d %H:%M:%S",
)
# pyre-ignore (ignoring Any in argument and output typing)
def _round_floats_for_logging(item: Any, decimal_places: int = 2) -> Any:
"""Round a number or numbers in a mapping to a given number of decimal places.
If item or values in dictionary is not a number, returns it as it.
"""
if isinstance(item, float):
return round(item, decimal_places)
elif isinstance(item, dict):
return {
k: _round_floats_for_logging(item=v, decimal_places=decimal_places)
for k, v in item.items()
}
elif isinstance(item, list):
return [
_round_floats_for_logging(item=i, decimal_places=decimal_places)
for i in item
]
elif isinstance(item, tuple):
return tuple(
_round_floats_for_logging(item=i, decimal_places=decimal_places)
for i in item
)
return item
def set_stderr_log_level(level: int) -> None:
"""Set the log level for stream handler, such that logs of given level
are printed to STDERR by the root logger
"""
ROOT_STREAM_HANDLER.setLevel(level)
# pyre-ignore[3] Supports generic callables
def disable_logger(
name: str, level: int = logging.ERROR
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
"""Disables a specific logger by name (e.g. module path) by setting the
log level at the given one for the duration of the decorated function's call
"""
def decorator(func: Callable[..., T]) -> Callable[..., T]:
@wraps(func)
def inner(*args: Any, **kwargs: Any) -> T:
logger = get_logger(name)
prev_level = logger.getEffectiveLevel()
logger.setLevel(level)
try:
return func(*args, **kwargs)
finally:
logger.setLevel(prev_level)
return inner
return decorator
"""Sets up Ax's root logger to not propogate to Python's root logger and
use the default stream handler.
"""
ROOT_LOGGER: logging.Logger = get_logger(AX_ROOT_LOGGER_NAME)
ROOT_LOGGER.propagate = False
# Uses a permissive level on the logger, instead make each
# handler as permissive/restrictive as desired
ROOT_LOGGER.setLevel(logging.DEBUG)
# pyre-fixme[24]: Generic type `logging.StreamHandler` expects 1 type parameter.
ROOT_STREAM_HANDLER: logging.StreamHandler = build_stream_handler()
ROOT_LOGGER.addHandler(ROOT_STREAM_HANDLER)
def make_indices_str(indices: Iterable[int]) -> str:
"""Generate a string representation of an iterable of indices;
if indices are contiguous, returns a string formatted like like
'<min_idx> - <max_idx>', otherwise a string formatted like
'[idx_1, idx_2, ..., idx_n'].
"""
idcs = sorted(indices)
contiguous = len(idcs) > 1 and (idcs[-1] - idcs[0] == len(idcs) - 1)
return f"{idcs[0]} - {idcs[-1]}" if contiguous else f"{idcs}"
| 33.255435
| 84
| 0.676745
|
ae4760e9532e26f603898cfaa734634cc1a84b3d
| 1,721
|
py
|
Python
|
test/test_ilf_comp.py
|
hertogp/jabs
|
ed419caa448075dcf327d2af561952a385115228
|
[
"MIT"
] | 1
|
2021-05-14T03:17:48.000Z
|
2021-05-14T03:17:48.000Z
|
test/test_ilf_comp.py
|
hertogp/jabs
|
ed419caa448075dcf327d2af561952a385115228
|
[
"MIT"
] | null | null | null |
test/test_ilf_comp.py
|
hertogp/jabs
|
ed419caa448075dcf327d2af561952a385115228
|
[
"MIT"
] | 1
|
2017-10-31T02:04:52.000Z
|
2017-10-31T02:04:52.000Z
|
'''
test ilf compiler
'''
import pytest
import sys
sys.path.insert(0, '..')
sys.path.insert(0, '.')
from jabs.ilf.comp import compile
# tmp
from jabs.ilf.core import Ival, pp2portstr, Ip4Filter
def test_bad_input():
pass
def test_good_input():
txt = """
dns 53/udp, 53/tcp, 10.10.10.10, 10.10.11.11
web 80/tcp, 443/tcp, 8080/tcp, 15-21/tcp, 18-31/tcp
~ 10/8 > 11/8 @ web : permit = { "color": "green"}
~ (dns) any <> dns @ dns : permit = 10
"""
ipf = compile(txt)
m = ipf.match('10.1.2.3', '11.11.11.11', '8080/tcp')
assert m.rule == 0
assert m.action == 'permit'
assert m.name == ''
assert m.object == { "color": "green" }
m = ipf.match('1.2.3.4', '10.10.10.10', '53/udp')
assert m.rule == 1
assert m.action == 'permit'
assert m.name == 'dns'
assert m.object == 10
print(ipf.ruleset('1.2.3.4', '10.10.10.10', '53/udp'))
print(ipf.ruleset('1.2.3.4', '10.10.10.10', None))
print(ipf.ruleset(None, '10.10.10.10', None))
print(ipf.ruleset(None, None, None))
print(ipf.ruleset(None, None, '99/tcp'))
def test_csv_roundtrip():
txt = """
winAD 389/tcp, 135/udp, 135/tcp, 123/udp, 1.1.1.0/27, 445/tcp
web 80/tcp, 8080/tcp, 443/tcp, 10.10.10.0/24
~ (auth) web > winAD @ winAD : permit = {"type": "auth"}
~ (webbers) any > web @ web : permit
"""
ipf = compile(txt)
csv1 = ipf.to_csv()
csv2 = Ip4Filter().from_csv(csv1).to_csv()
assert csv1 == csv2
print(ipf.ruleset('10.10.10.10', '1.1.1.1', None))
def test_pp2portstr():
assert '18/udp' == pp2portstr(18, 17)
with pytest.raises(ValueError):
assert '18/udp' == pp2portstr(-1, 17)
| 25.686567
| 69
| 0.566531
|
02b0a056c2c1f358fb9356b3398665a04be550ce
| 4,221
|
py
|
Python
|
Image2CAD/Core/Features/Cognition/DimensionalLinesFeature.py
|
David-Alfred/Image2CAD
|
0c1399717b96904524d60576b6c6e313107c574f
|
[
"Apache-2.0"
] | null | null | null |
Image2CAD/Core/Features/Cognition/DimensionalLinesFeature.py
|
David-Alfred/Image2CAD
|
0c1399717b96904524d60576b6c6e313107c574f
|
[
"Apache-2.0"
] | null | null | null |
Image2CAD/Core/Features/Cognition/DimensionalLinesFeature.py
|
David-Alfred/Image2CAD
|
0c1399717b96904524d60576b6c6e313107c574f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: Aditya Intwala
Copyright (C) 2016, Aditya Intwala.
Licensed under the Apache License 2.0. See LICENSE file in the project root for full license information.
"""
import cv2
from Core.Features.LineSegments.LineSegmentsFeature import LineSegmentsFeature
from xml.etree import ElementTree as ET
from xml.etree.ElementTree import Element, SubElement
global img, threshImg, imageCorner, imageHoughLine, imageOutput, imgHeight, imgWidth, imgChannels, blankImg
global ArrowHeadsList
class DimensionalLinesFeature():
@staticmethod
def Detect(Feature_Manager):
global make_dir_root, timestr, ArrowHeadsList
make_dir_root = Feature_Manager._RootDirectory
ExtractedArrows = Feature_Manager._DetectedArrowHead
image = Feature_Manager._ImageOriginal.copy()
ArrowHeadsList = ExtractedArrows
LineSegmentsFeature.Make_Directory(make_dir_root)
LineSegmentsFeature.InitializeDimLine(image)
cornerPts, cornerImageOutput, extractedLinesP, houghlineImageOutput = LineSegmentsFeature.EntityDetection()
arrowPts = []
for i in ExtractedArrows:
pt = i._ArrowCenter
arrowPts.append(pt)
uniqueLines = LineSegmentsFeature.SegmentCreator(extractedLinesP)
segLines = LineSegmentsFeature.SeggeratedCreator(uniqueLines)
LineSegmentsFeature.cornerOnSegLine(arrowPts, segLines) # arrow centers as corners of dimension lines
detectedLineSegments = LineSegmentsFeature.DetectDimensionalLineSegments(segLines, ArrowHeadsList)
DimensionalLineImage = LineSegmentsFeature.PlotLine(detectedLineSegments)
LineSegmentsFeature.DisplayOutputs()
cv2.imwrite(make_dir_root + "/DimensionalLine_Extraction_Output.png", DimensionalLineImage)
return detectedLineSegments, DimensionalLineImage
@staticmethod
def Dump(make_dir_root, time, segments):
Root = Element("Root")
Extracted_DimensionalLine = SubElement(Root, "Extracted_DimensionalLine")
i = 0
for item in segments:
Extracted_Dimension_Line = SubElement(Extracted_DimensionalLine, "Extracted_Dimension_Line")
Extracted_Dimension_Line.text = str(i)
for ar in item._ArrowHeads:
Arrow_Head = SubElement(Extracted_Dimension_Line, "Arrow_Head")
p1 = ar._BoundingBoxP1
p2 = ar._BoundingBoxP2
center = ar._ArrowCenter
BB_Min_Point = SubElement(Arrow_Head, "BB_Min_Point")
BB_Min_Point_X = SubElement(BB_Min_Point, "X")
BB_Min_Point_X.text = str(p1.x)
BB_Min_Point_Y = SubElement(BB_Min_Point, "Y")
BB_Min_Point_Y.text = str(p1.y)
BB_Max_Point = SubElement(Arrow_Head, "BB_Max_Point")
BB_Max_Point_X = SubElement(BB_Max_Point, "X")
BB_Max_Point_X.text = str(p2.x)
BB_Max_Point_Y = SubElement(BB_Max_Point, "Y")
BB_Max_Point_Y.text = str(p2.y)
Centroid = SubElement(Arrow_Head, "Centroid")
X_Point = SubElement(Centroid, "X")
X_Point.text = str(center.x)
Y_Point = SubElement(Centroid, "Y")
Y_Point.text = str(center.y)
for ls in item._Leaders:
Segment = SubElement(Extracted_Dimension_Line, "Segment")
Start_Point = SubElement(Segment, "Start_Point")
Start_Point_X = SubElement(Start_Point, "X")
Start_Point_X.text = str(ls.startPoint.x)
Start_Point_Y = SubElement(Start_Point, "Y")
Start_Point_Y.text = str(ls.startPoint.y)
End_Point = SubElement(Segment, "End_Point")
End_Point_X = SubElement(End_Point, "X")
End_Point_X.text = str(ls.endPoint.x)
End_Point_Y = SubElement(End_Point, "Y")
End_Point_Y.text = str(ls.endPoint.y)
i += 1
tree = ET.ElementTree(Root)
tree.write(make_dir_root +"/Dimensional_Line_Segment_Extraction.xml")
| 44.431579
| 115
| 0.654347
|
e7f435098eeab6eccb76a9b9b7ac4308caa0bbe8
| 702
|
py
|
Python
|
module1-introduction-to-sql/rpg_queries.py
|
keilayb/DS-Unit-3-Sprint-2-SQL-and-Databases
|
3f053923d223b1bd081b07918a6d95fe60aefeb6
|
[
"MIT"
] | null | null | null |
module1-introduction-to-sql/rpg_queries.py
|
keilayb/DS-Unit-3-Sprint-2-SQL-and-Databases
|
3f053923d223b1bd081b07918a6d95fe60aefeb6
|
[
"MIT"
] | null | null | null |
module1-introduction-to-sql/rpg_queries.py
|
keilayb/DS-Unit-3-Sprint-2-SQL-and-Databases
|
3f053923d223b1bd081b07918a6d95fe60aefeb6
|
[
"MIT"
] | null | null | null |
import sqlite3
from queries import all_queries
import pandas as pd
conn = sqlite3.connect("rpg_db.sqlite3")
curs = conn.cursor()
result_dfs = []
for quer in all_queries:
curs.execute(quer)
result = curs.fetchall()
result_as_df = pd.DataFrame(result)
result_dfs.append(result_as_df)
print(result_dfs[0].head(), '\n')
print(result_dfs[1].head(), "\n", "0: cleric, 1: fighter, 2: mage, 3: necromancer, 4: thief", "\n" )
for num in range(2, 5):
to_call = result_dfs[num]
print(to_call.head(), '\n')
for num in range(5, 7):
to_call = result_dfs[num]
print(to_call.head(20), "\n")
for num in range(7, 9):
to_call = result_dfs[num]
print(to_call.head(), '\n')
| 21.9375
| 100
| 0.656695
|
d841a1024459527d57002a8126ed0b76bcc97996
| 12,357
|
py
|
Python
|
controller/api/south_migrations/0010_auto__add_field_build_sha__add_field_build_procfile__add_field_build_d.py
|
yun-an/deis
|
de27c11475bb7ca24816f288aa115699a1c37e26
|
[
"Apache-2.0"
] | 3,375
|
2015-01-01T04:03:45.000Z
|
2022-02-08T14:53:45.000Z
|
controller/api/south_migrations/0010_auto__add_field_build_sha__add_field_build_procfile__add_field_build_d.py
|
yun-an/deis
|
de27c11475bb7ca24816f288aa115699a1c37e26
|
[
"Apache-2.0"
] | 2,422
|
2015-01-01T02:40:01.000Z
|
2021-11-30T07:50:32.000Z
|
controller/api/south_migrations/0010_auto__add_field_build_sha__add_field_build_procfile__add_field_build_d.py
|
yun-an/deis
|
de27c11475bb7ca24816f288aa115699a1c37e26
|
[
"Apache-2.0"
] | 688
|
2015-01-01T00:36:48.000Z
|
2022-01-22T00:32:07.000Z
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Build.sha'
db.add_column(u'api_build', 'sha',
self.gf('django.db.models.fields.CharField')(default='', max_length=40, blank=True),
keep_default=False)
# Adding field 'Build.procfile'
db.add_column(u'api_build', 'procfile',
self.gf('json_field.fields.JSONField')(default=u'{}', blank=True),
keep_default=False)
# Adding field 'Build.dockerfile'
db.add_column(u'api_build', 'dockerfile',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Build.sha'
db.delete_column(u'api_build', 'sha')
# Deleting field 'Build.procfile'
db.delete_column(u'api_build', 'procfile')
# Deleting field 'Build.dockerfile'
db.delete_column(u'api_build', 'dockerfile')
models = {
u'api.app': {
'Meta': {'object_name': 'App'},
'cluster': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Cluster']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'structure': ('json_field.fields.JSONField', [], {'default': "u'{}'", 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.build': {
'Meta': {'ordering': "[u'-created']", 'unique_together': "((u'app', u'uuid'),)", 'object_name': 'Build'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dockerfile': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'procfile': ('json_field.fields.JSONField', [], {'default': "u'{}'", 'blank': 'True'}),
'sha': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.cluster': {
'Meta': {'object_name': 'Cluster'},
'auth': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'hosts': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'options': ('json_field.fields.JSONField', [], {'default': "u'{}'", 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'type': ('django.db.models.fields.CharField', [], {'default': "u'coreos'", 'max_length': '16'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.config': {
'Meta': {'ordering': "[u'-created']", 'unique_together': "((u'app', u'uuid'),)", 'object_name': 'Config'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'values': ('json_field.fields.JSONField', [], {'default': "u'{}'", 'blank': 'True'})
},
u'api.container': {
'Meta': {'ordering': "[u'created']", 'object_name': 'Container'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'num': ('django.db.models.fields.PositiveIntegerField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Release']"}),
'state': ('django_fsm.FSMField', [], {'default': "u'initialized'", 'max_length': '50'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.key': {
'Meta': {'unique_together': "((u'owner', u'id'),)", 'object_name': 'Key'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'public': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.push': {
'Meta': {'ordering': "[u'-created']", 'unique_together': "((u'app', u'uuid'),)", 'object_name': 'Push'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingerprint': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'receive_repo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'receive_user': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sha': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'ssh_connection': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ssh_original_command': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'api.release': {
'Meta': {'ordering': "[u'-created']", 'unique_together': "((u'app', u'version'),)", 'object_name': 'Release'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.App']"}),
'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Build']"}),
'config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Config']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('api.fields.UuidField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'}),
'version': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['api']
| 72.263158
| 195
| 0.547139
|
3a919cba2a9959cffd528a6bd65cc2f127c2adb2
| 6,453
|
py
|
Python
|
tests/miscellaneous/test_table_handler.py
|
astro-projects/astro
|
7fa0404fc690569ff85e379ecca54778f09a9333
|
[
"Apache-2.0"
] | 71
|
2021-12-06T22:41:59.000Z
|
2022-03-31T21:47:16.000Z
|
tests/miscellaneous/test_table_handler.py
|
astro-projects/astro
|
7fa0404fc690569ff85e379ecca54778f09a9333
|
[
"Apache-2.0"
] | 171
|
2021-12-14T07:34:57.000Z
|
2022-03-31T21:04:15.000Z
|
tests/miscellaneous/test_table_handler.py
|
astro-projects/astro
|
7fa0404fc690569ff85e379ecca54778f09a9333
|
[
"Apache-2.0"
] | 11
|
2021-12-06T22:46:23.000Z
|
2022-03-31T18:09:46.000Z
|
from astro.sql.table import Metadata, Table
from astro.utils.table_handler import TableHandler
def test__set_variables_from_first_table_with_same_db_tables_in_op_args():
"""Test _set_variables_from_first_table() when the tables passed are with same tables"""
def dummy_function(param_1: str, param_2: str): # skipcq: PTC-W0049, PY-D0003
pass
handler = TableHandler()
handler.python_callable = dummy_function
handler.op_kwargs = {"param_1": "dummy_value", "param_2": "dummy_value"}
handler.parameters = {}
handler.op_args = (
Table(
conn_id="conn_1",
metadata=Metadata(
database="database_1",
schema="scheme_1",
),
),
Table(
conn_id="conn_1",
metadata=Metadata(
database="database_1",
schema="scheme_1",
),
),
)
handler._set_variables_from_first_table()
assert handler.conn_id == "conn_1"
assert handler.database == "database_1"
assert handler.schema == "scheme_1"
def test__set_variables_from_first_table_with_different_db_tables_in_op_args():
"""Test _set_variables_from_first_table() when the tables passed are with same tables"""
def dummy_function(param_1: str, param_2: str): # skipcq: PTC-W0049, PY-D0003
pass
handler = TableHandler()
handler.python_callable = dummy_function
handler.op_kwargs = {"param_1": "dummy_value", "param_2": "dummy_value"}
handler.parameters = {}
handler.op_args = (
Table(
conn_id="conn_1",
metadata=Metadata(
database="database_1",
schema="scheme_1",
),
),
Table(
conn_id="conn_2",
metadata=Metadata(
database="database_2",
schema="scheme_2",
),
),
)
handler._set_variables_from_first_table()
assert not hasattr(handler, "conn_id")
assert not hasattr(handler, "database")
assert not hasattr(handler, "schema")
assert not hasattr(handler, "warehouse")
assert not hasattr(handler, "role")
def test__set_variables_from_first_table_with_same_db_tables_in_python_callable():
"""Test _set_variables_from_first_table() when the tables passed are with same tables in python_callable"""
table_1 = Table(
conn_id="conn_1",
metadata=Metadata(
database="database_1",
schema="scheme_1",
),
)
table_2 = Table(
conn_id="conn_1",
metadata=Metadata(
database="database_1",
schema="scheme_1",
),
)
def dummy_function(param_1: Table, param_2: Table): # skipcq: PTC-W0049, PY-D0003
pass
handler = TableHandler()
handler.python_callable = dummy_function
handler.op_args = ()
handler.parameters = {}
handler.op_kwargs = {
"param_1": table_1,
"param_2": table_2,
}
handler._set_variables_from_first_table()
assert handler.conn_id == "conn_1"
assert handler.database == "database_1"
assert handler.schema == "scheme_1"
def test__set_variables_from_first_table_with_different_db_tables_in_python_callable():
"""Test _set_variables_from_first_table() when the tables passed are with same tables in python_callable"""
table_1 = Table(
conn_id="conn_1",
metadata=Metadata(
database="database_1",
schema="scheme_1",
),
)
table_2 = Table(
conn_id="conn_2",
metadata=Metadata(
database="database_2",
schema="scheme_2",
),
)
def dummy_function(param_1: Table, param_2: Table): # skipcq: PTC-W0049, PY-D0003
pass
handler = TableHandler()
handler.python_callable = dummy_function
handler.op_args = ()
handler.parameters = {}
handler.op_kwargs = {
"param_1": table_1,
"param_2": table_2,
}
handler._set_variables_from_first_table()
assert not hasattr(handler, "conn_id")
assert not hasattr(handler, "database")
assert not hasattr(handler, "schema")
assert not hasattr(handler, "warehouse")
assert not hasattr(handler, "role")
def test__set_variables_from_first_table_with_same_db_tables_in_parameters():
"""Test _set_variables_from_first_table() when the tables passed are with same tables in parameters"""
def dummy_function(param_1: str, param_2: str): # skipcq: PTC-W0049, PY-D0003
pass
handler = TableHandler()
handler.op_args = ()
handler.python_callable = dummy_function
handler.op_kwargs = {"param_1": "dummy_value", "param_2": "dummy_value"}
handler.parameters = {
"param_1": Table(
conn_id="conn_1",
metadata=Metadata(
database="database_1",
schema="scheme_1",
),
),
"param_3": Table(
conn_id="conn_1",
metadata=Metadata(
database="database_1",
schema="scheme_1",
),
),
}
handler._set_variables_from_first_table()
assert handler.conn_id == "conn_1"
assert handler.database == "database_1"
assert handler.schema == "scheme_1"
def test__set_variables_from_first_table_with_different_db_tables_in_parameters():
"""Test _set_variables_from_first_table() when the tables passed are with same tables in parameters"""
def dummy_function(param_1: str, param_2: str): # skipcq: PTC-W0049, PY-D0003
pass
handler = TableHandler()
handler.op_args = ()
handler.python_callable = dummy_function
handler.op_kwargs = {"param_1": "dummy_value", "param_2": "dummy_value"}
handler.parameters = {
"param_1": Table(
conn_id="conn_1",
metadata=Metadata(
database="database_1",
schema="scheme_1",
),
),
"param_3": Table(
conn_id="conn_2",
metadata=Metadata(
database="database_2",
schema="scheme_2",
),
),
}
handler._set_variables_from_first_table()
assert not hasattr(handler, "conn_id")
assert not hasattr(handler, "database")
assert not hasattr(handler, "schema")
assert not hasattr(handler, "warehouse")
assert not hasattr(handler, "role")
| 30.582938
| 111
| 0.618162
|
d516af84b565cd97a3c9c5afe33a49d3d5a03f64
| 4,677
|
py
|
Python
|
catkin_ws/src/00-infrastructure/easy_regression/include/easy_regression/conditions/references.py
|
yxiao1996/dev
|
e2181233aaa3d16c472b792b58fc4863983825bd
|
[
"CC-BY-2.0"
] | 2
|
2018-06-25T02:51:25.000Z
|
2018-06-25T02:51:27.000Z
|
catkin_ws/src/00-infrastructure/easy_regression/include/easy_regression/conditions/references.py
|
yxiao1996/dev
|
e2181233aaa3d16c472b792b58fc4863983825bd
|
[
"CC-BY-2.0"
] | null | null | null |
catkin_ws/src/00-infrastructure/easy_regression/include/easy_regression/conditions/references.py
|
yxiao1996/dev
|
e2181233aaa3d16c472b792b58fc4863983825bd
|
[
"CC-BY-2.0"
] | 2
|
2018-09-04T06:44:21.000Z
|
2018-10-15T02:30:50.000Z
|
from contracts.utils import check_isinstance
import yaml
from duckietown_utils.exception_utils import raise_wrapped, check_is_in
from duckietown_utils.system_cmd_imp import contract
from duckietown_utils.text_utils import remove_prefix, string_split
from easy_regression.conditions.eval import Evaluable, EvaluationError
from easy_regression.conditions.interface import RTParseError
from easy_regression.conditions.result_db import ResultDBEntry
def parse_reference(s):
"""
v:analyzer/log/statistic~master@date
"""
prefix = 'v:'
if s.startswith(prefix):
s = remove_prefix(s, prefix)
T_DATE = '@'
T_BRANCH = '~'
T_COMMIT = '?'
TS = [T_DATE, T_BRANCH, T_COMMIT]
if (T_COMMIT in s) and (T_DATE in s):
msg = 'Cannot specify commit and date: %s' % s
raise RTParseError(msg)
date = None
commit = None
branch_spec = None
def get_last_one(s0):
for c in s0[::-1]:
if c in TS:
return c
while True:
which = get_last_one(s)
if which is None:
break
elif which == T_DATE:
s, date_spec = string_split(s, T_DATE)
if not date_spec:
msg = 'Invalid date spec %r.' % date_spec
raise RTParseError(msg)
date = parse_date_spec(date_spec)
elif which == T_BRANCH:
s, branch_spec = string_split(s, T_BRANCH)
if not branch_spec:
msg = 'Invalid branch spec %r.' % branch_spec
raise RTParseError(msg)
elif which == T_COMMIT:
s, commit = string_split(s, T_COMMIT)
if not commit:
msg = 'Invalid commit %r.' % branch_spec
raise RTParseError(msg)
tokens = s.split('/')
if not len(tokens) >= 3:
msg = 'Expected "analyzer/log/statistic"'
raise RTParseError(msg)
analyzer = tokens[0]
log = tokens[1]
statistic = tuple(tokens[2:])
return StatisticReference(analyzer=analyzer, log=log, statistic=statistic,
branch=branch_spec, date=date, commit=commit)
try:
c = yaml.load(s)
if isinstance(c, str) and '/' in c:
msg = 'The syntax is "v:analyzer/log/statistic"'
msg += '\nInvalid string: %r' % c
raise RTParseError(msg)
return Constant(c)
except yaml.YAMLError:
msg = 'Could not parse reference %s.' % s.__repr__()
raise RTParseError(msg)
def parse_date_spec(d):
from dateutil.parser import parse
try:
return parse(d)
except ValueError as e:
msg = 'Cannot parse date %s.' % d.__repr__()
raise_wrapped(RTParseError, e, msg, compact=True)
class StatisticReference(Evaluable):
@contract(statistic='seq(str)')
def __init__(self, analyzer, log, statistic, branch, date, commit):
self.analyzer = analyzer
self.log = log
self.statistic = statistic
self.branch = branch
self.date = date
self.commit = commit
def __str__(self):
return ('StatisticReference(%s,%s,%s,%s,%s)' %
(self.analyzer, self.log, self.statistic, self.branch, self.date))
def eval(self, rdb):
db_entry = rdb.query_results_one(branch=self.branch,
date=self.date,
commit=self.commit)
check_isinstance(db_entry, ResultDBEntry)
# print('Results= %s' % db_entry.__repr__())
results = db_entry.results
check_is_in('analyzer', self.analyzer, results, EvaluationError)
logs = results[self.analyzer]
check_is_in('log', self.log, logs, EvaluationError)
forlog = logs[self.log]
val = eval_name(forlog, self.statistic)
return val
@contract(name_tuple=tuple)
def eval_name(x, name_tuple):
if not name_tuple:
return x
else:
first = name_tuple[0]
rest = name_tuple[1:]
check_is_in('value', first, x, EvaluationError)
xx = x[first]
return eval_name(xx, rest)
class Constant(Evaluable):
def __init__(self, x):
self.x = x
def eval(self, _test_results):
return self.x
def __repr__(self):
return 'Constant(%s)' % self.x.__repr__()
| 32.479167
| 83
| 0.555698
|
e3986a524b4b18edbfcaaf2b77eddcf8dbcba21a
| 55,619
|
py
|
Python
|
tests/test_download_photos.py
|
horizon0514/icloud_photos_downloader
|
7e9525bd169bc97f82b68c2461430c58f8e4d8e9
|
[
"MIT"
] | null | null | null |
tests/test_download_photos.py
|
horizon0514/icloud_photos_downloader
|
7e9525bd169bc97f82b68c2461430c58f8e4d8e9
|
[
"MIT"
] | null | null | null |
tests/test_download_photos.py
|
horizon0514/icloud_photos_downloader
|
7e9525bd169bc97f82b68c2461430c58f8e4d8e9
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from vcr import VCR
import os
import sys
import shutil
import pytest
import mock
import datetime
from mock import call, ANY
from click.testing import CliRunner
import piexif
from piexif._exceptions import InvalidImageDataError
from pyicloud_ipd.services.photos import PhotoAsset, PhotoAlbum, PhotosService
from pyicloud_ipd.base import PyiCloudService
from pyicloud_ipd.exceptions import PyiCloudAPIResponseError
from requests.exceptions import ConnectionError
from icloudpd.base import main
from tests.helpers.print_result_exception import print_result_exception
vcr = VCR(decode_compressed_response=True)
class DownloadPhotoTestCase(TestCase):
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
def test_download_and_skip_existing_photos(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
os.makedirs(os.path.join(base_dir, "2018/07/30/"))
with open(os.path.join(base_dir, "2018/07/30/IMG_7408.JPG"), "a") as f:
f.truncate(1151066)
with open(os.path.join(base_dir, "2018/07/30/IMG_7407.JPG"), "a") as f:
f.truncate(656257)
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"5",
"--skip-videos",
"--skip-live-photos",
"--set-exif-datetime",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn("DEBUG Looking up all photos from album All Photos...", self._caplog.text)
self.assertIn(
f"INFO Downloading 5 original photos to {base_dir} ...",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}",
self._caplog.text,
)
self.assertNotIn(
"IMG_7409.MOV",
self._caplog.text,
)
self.assertIn(
f"INFO {os.path.join(base_dir, os.path.normpath('2018/07/30/IMG_7408.JPG'))} already exists.",
self._caplog.text,
)
self.assertIn(
f"INFO {os.path.join(base_dir, os.path.normpath('2018/07/30/IMG_7407.JPG'))} already exists.",
self._caplog.text,
)
self.assertIn(
"INFO Skipping IMG_7405.MOV, only downloading photos.",
self._caplog.text,
)
self.assertIn(
"INFO Skipping IMG_7404.MOV, only downloading photos.",
self._caplog.text,
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
# Check that file was downloaded
self.assertTrue(
os.path.exists(os.path.join(base_dir, os.path.normpath("2018/07/31/IMG_7409.JPG"))))
# Check that mtime was updated to the photo creation date
photo_mtime = os.path.getmtime(os.path.join(base_dir, os.path.normpath("2018/07/31/IMG_7409.JPG")))
photo_modified_time = datetime.datetime.utcfromtimestamp(photo_mtime)
self.assertEqual(
"2018-07-31 07:22:24",
photo_modified_time.strftime('%Y-%m-%d %H:%M:%S'))
assert result.exit_code == 0
@pytest.mark.skipif(sys.platform == 'win32',
reason="does not run on windows -- wrong dates")
def test_download_photos_and_set_exif(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
os.makedirs(os.path.join(base_dir, "2018/07/30/"))
with open(os.path.join(base_dir, "2018/07/30/IMG_7408.JPG"), "a") as f:
f.truncate(1151066)
with open(os.path.join(base_dir, "2018/07/30/IMG_7407.JPG"), "a") as f:
f.truncate(656257)
# Download the first photo, but mock the video download
orig_download = PhotoAsset.download
def mocked_download(self, size):
if not hasattr(PhotoAsset, "already_downloaded"):
response = orig_download(self, size)
setattr(PhotoAsset, "already_downloaded", True)
return response
return mock.MagicMock()
with mock.patch.object(PhotoAsset, "download", new=mocked_download):
with mock.patch(
"icloudpd.exif_datetime.get_photo_exif"
) as get_exif_patched:
get_exif_patched.return_value = False
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"4",
"--set-exif-datetime",
# '--skip-videos',
# "--skip-live-photos",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn(
"DEBUG Looking up all photos and videos from album All Photos...",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading 4 original photos and videos to {base_dir} ...",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}",
self._caplog.text,
)
# YYYY:MM:DD is the correct format.
self.assertIn(
f"DEBUG Setting EXIF timestamp for {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}: 2018:07:31 07:22:24", # TODO On windows it is picked as 00:22:24
self._caplog.text,
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
assert result.exit_code == 0
def test_download_photos_and_get_exif_exceptions(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with mock.patch.object(piexif, "load") as piexif_patched:
piexif_patched.side_effect = InvalidImageDataError
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--skip-videos",
"--skip-live-photos",
"--set-exif-datetime",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn("DEBUG Looking up all photos from album All Photos...", self._caplog.text)
self.assertIn(
f"INFO Downloading the first original photo to {base_dir} ...",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}",
self._caplog.text,
)
self.assertIn(
f"DEBUG Error fetching EXIF data for {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}",
self._caplog.text,
)
self.assertIn(
f"DEBUG Error setting EXIF data for {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}",
self._caplog.text,
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
assert result.exit_code == 0
def test_skip_existing_downloads(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
os.makedirs(os.path.join(base_dir, "2018/07/31"))
with open(os.path.join(base_dir, "2018/07/31/IMG_7409.JPG"), "a") as f:
f.truncate(1884695)
with open(os.path.join(base_dir, "2018/07/31/IMG_7409.MOV"), "a") as f:
f.truncate(3294075)
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
# '--skip-videos',
# "--skip-live-photos",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn(
"DEBUG Looking up all photos and videos from album All Photos...", self._caplog.text
)
self.assertIn(
f"INFO Downloading the first original photo or video to {base_dir} ...",
self._caplog.text,
)
self.assertIn(
f"INFO {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))} already exists.",
self._caplog.text,
)
self.assertIn(
f"INFO {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.MOV'))} already exists.",
self._caplog.text,
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
assert result.exit_code == 0
@pytest.mark.skipif(sys.platform == 'win32',
reason="requires large timeout on windows to create big files")
def test_until_found(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
os.makedirs(os.path.join(base_dir, "2018/07/30/"))
os.makedirs(os.path.join(base_dir, "2018/07/31/"))
files_to_download = []
files_to_skip = []
files_to_download.append(("2018/07/31/IMG_7409.JPG", "photo"))
files_to_download.append(("2018/07/31/IMG_7409-medium.MOV", "photo"))
files_to_skip.append(("2018/07/30/IMG_7408.JPG", "photo", 1151066))
files_to_skip.append(("2018/07/30/IMG_7408-medium.MOV", "photo", 894467))
files_to_download.append(("2018/07/30/IMG_7407.JPG", "photo"))
files_to_download.append(("2018/07/30/IMG_7407-medium.MOV", "photo"))
files_to_skip.append(("2018/07/30/IMG_7405.MOV", "video", 36491351))
files_to_skip.append(("2018/07/30/IMG_7404.MOV", "video", 225935003))
files_to_download.append(("2018/07/30/IMG_7403.MOV", "video"))
files_to_download.append(("2018/07/30/IMG_7402.MOV", "video"))
files_to_skip.append(("2018/07/30/IMG_7401.MOV", "photo", 565699696)) # TODO large files on Windows times out
files_to_skip.append(("2018/07/30/IMG_7400.JPG", "photo", 2308885))
files_to_skip.append(("2018/07/30/IMG_7400-medium.MOV", "photo", 1238639))
files_to_skip.append(("2018/07/30/IMG_7399.JPG", "photo", 2251047))
files_to_download.append(("2018/07/30/IMG_7399-medium.MOV", "photo"))
for f in files_to_skip:
with open(os.path.join(base_dir, f[0]), "a") as fi:
fi.truncate(f[2])
with mock.patch("icloudpd.download.download_media") as dp_patched:
dp_patched.return_value = True
with mock.patch("icloudpd.download.os.utime") as ut_patched:
ut_patched.return_value = None
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--live-photo-size",
"medium",
"--until-found",
"3",
"--recent",
"20",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
expected_calls = list(
map(
lambda f: call(
ANY, ANY, os.path.join(base_dir, os.path.normpath(f[0])),
"mediumVideo" if (
f[1] == 'photo' and f[0].endswith('.MOV')
) else "original"),
files_to_download,
)
)
dp_patched.assert_has_calls(expected_calls)
self.assertIn(
"DEBUG Looking up all photos and videos from album All Photos...", self._caplog.text
)
self.assertIn(
f"INFO Downloading ??? original photos and videos to {base_dir} ...",
self._caplog.text,
)
for f in files_to_skip:
expected_message = f"INFO {os.path.join(base_dir, os.path.normpath(f[0]))} already exists."
self.assertIn(expected_message, self._caplog.text)
self.assertIn(
"INFO Found 3 consecutive previously downloaded photos. Exiting",
self._caplog.text,
)
self.assertNotIn(
f"INFO {os.path.join(base_dir, os.path.normpath('2018/07/30/IMG_7399-medium.MOV'))} already exists.",
self._caplog.text
)
assert result.exit_code == 0
def test_handle_io_error(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
with mock.patch("icloudpd.download.open", create=True) as m:
# Raise IOError when we try to write to the destination file
m.side_effect = IOError
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--skip-videos",
"--skip-live-photos",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn("DEBUG Looking up all photos from album All Photos...", self._caplog.text)
self.assertIn(
f"INFO Downloading the first original photo to {base_dir} ...",
self._caplog.text,
)
self.assertIn(
"ERROR IOError while writing file to "
f"{os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}! "
"You might have run out of disk space, or the file might "
"be too large for your OS. Skipping this file...",
self._caplog.text,
)
assert result.exit_code == 0
def test_handle_session_error_during_download(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
def mock_raise_response_error(arg):
raise PyiCloudAPIResponseError("Invalid global session", 100)
with mock.patch("time.sleep") as sleep_mock:
with mock.patch.object(PhotoAsset, "download") as pa_download:
pa_download.side_effect = mock_raise_response_error
# Let the initial authenticate() call succeed,
# but do nothing on the second try.
orig_authenticate = PyiCloudService.authenticate
def mocked_authenticate(self):
if not hasattr(self, "already_authenticated"):
orig_authenticate(self)
setattr(self, "already_authenticated", True)
with mock.patch.object(
PyiCloudService, "authenticate", new=mocked_authenticate
):
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--skip-videos",
"--skip-live-photos",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
# Error msg should be repeated 5 times
assert (
self._caplog.text.count(
"Session error, re-authenticating..."
)
== 5
)
self.assertIn(
"INFO Could not download IMG_7409.JPG! Please try again later.",
self._caplog.text,
)
# Make sure we only call sleep 4 times (skip the first retry)
self.assertEqual(sleep_mock.call_count, 4)
assert result.exit_code == 0
def test_handle_session_error_during_photo_iteration(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
def mock_raise_response_error(offset):
raise PyiCloudAPIResponseError("Invalid global session", 100)
with mock.patch("time.sleep") as sleep_mock:
with mock.patch.object(PhotoAlbum, "photos_request") as pa_photos_request:
pa_photos_request.side_effect = mock_raise_response_error
# Let the initial authenticate() call succeed,
# but do nothing on the second try.
orig_authenticate = PyiCloudService.authenticate
def mocked_authenticate(self):
if not hasattr(self, "already_authenticated"):
orig_authenticate(self)
setattr(self, "already_authenticated", True)
with mock.patch.object(
PyiCloudService, "authenticate", new=mocked_authenticate
):
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--skip-videos",
"--skip-live-photos",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
# Error msg should be repeated 5 times
assert (
self._caplog.text.count(
"Session error, re-authenticating..."
)
== 5
)
self.assertIn(
"INFO iCloud re-authentication failed! Please try again later.",
self._caplog.text,
)
# Make sure we only call sleep 4 times (skip the first retry)
self.assertEqual(sleep_mock.call_count, 4)
assert result.exit_code == -1
def test_handle_connection_error(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
def mock_raise_response_error(arg):
raise ConnectionError("Connection Error")
with mock.patch.object(PhotoAsset, "download") as pa_download:
pa_download.side_effect = mock_raise_response_error
# Let the initial authenticate() call succeed,
# but do nothing on the second try.
orig_authenticate = PyiCloudService.authenticate
def mocked_authenticate(self):
if not hasattr(self, "already_authenticated"):
orig_authenticate(self)
setattr(self, "already_authenticated", True)
with mock.patch("icloudpd.constants.WAIT_SECONDS", 0):
with mock.patch.object(
PyiCloudService, "authenticate", new=mocked_authenticate
):
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--skip-videos",
"--skip-live-photos",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
# Error msg should be repeated 5 times
assert (
self._caplog.text.count(
"Error downloading IMG_7409.JPG, retrying after 0 seconds..."
)
== 5
)
self.assertIn(
"INFO Could not download IMG_7409.JPG! Please try again later.",
self._caplog.text,
)
assert result.exit_code == 0
def test_handle_albums_error(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
def mock_raise_response_error():
raise PyiCloudAPIResponseError("Api Error", 100)
with mock.patch.object(PhotosService, "_fetch_folders") as pa_photos_request:
pa_photos_request.side_effect = mock_raise_response_error
# Let the initial authenticate() call succeed,
# but do nothing on the second try.
orig_authenticate = PyiCloudService.authenticate
def mocked_authenticate(self):
if not hasattr(self, "already_authenticated"):
orig_authenticate(self)
setattr(self, "already_authenticated", True)
with mock.patch("icloudpd.constants.WAIT_SECONDS", 0):
with mock.patch.object(
PyiCloudService, "authenticate", new=mocked_authenticate
):
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--skip-videos",
"--skip-live-photos",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
assert result.exit_code == 1
def test_missing_size(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with mock.patch.object(PhotoAsset, "download") as pa_download:
pa_download.return_value = False
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"3",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn(
"DEBUG Looking up all photos and videos from album All Photos...", self._caplog.text
)
self.assertIn(
f"INFO Downloading 3 original photos and videos to {base_dir} ...",
self._caplog.text,
)
# These error messages should not be repeated more than once
assert (
self._caplog.text.count(
"ERROR Could not find URL to download IMG_7409.JPG for size original!"
)
== 1
)
assert (
self._caplog.text.count(
"ERROR Could not find URL to download IMG_7408.JPG for size original!"
)
== 1
)
assert (
self._caplog.text.count(
"ERROR Could not find URL to download IMG_7407.JPG for size original!"
)
== 1
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
assert result.exit_code == 0
def test_size_fallback_to_original(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with mock.patch("icloudpd.download.download_media") as dp_patched:
dp_patched.return_value = True
with mock.patch("icloudpd.download.os.utime") as ut_patched:
ut_patched.return_value = None
with mock.patch.object(PhotoAsset, "versions") as pa:
pa.return_value = ["original", "medium"]
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--size",
"thumb",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn(
"DEBUG Looking up all photos and videos from album All Photos...",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading the first thumb photo or video to {base_dir} ...",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}",
self._caplog.text,
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
dp_patched.assert_called_once_with(
ANY,
ANY,
f"{os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}",
"original",
)
assert result.exit_code == 0
def test_force_size(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with mock.patch("icloudpd.download.download_media") as dp_patched:
dp_patched.return_value = True
with mock.patch.object(PhotoAsset, "versions") as pa:
pa.return_value = ["original", "medium"]
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--size",
"thumb",
"--force-size",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn(
"DEBUG Looking up all photos and videos from album All Photos...",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading the first thumb photo or video to {base_dir} ...",
self._caplog.text,
)
self.assertIn(
"ERROR thumb size does not exist for IMG_7409.JPG. Skipping...",
self._caplog.text,
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
dp_patched.assert_not_called
assert result.exit_code == 0
def test_invalid_creation_date(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with mock.patch.object(PhotoAsset, "created", new_callable=mock.PropertyMock) as dt_mock:
# Can't mock `astimezone` because it's a readonly property, so have to
# create a new class that inherits from datetime.datetime
class NewDateTime(datetime.datetime):
def astimezone(self, tz=None):
raise ValueError('Invalid date')
dt_mock.return_value = NewDateTime(2018,1,1,0,0,0)
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--skip-live-photos",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn(
"DEBUG Looking up all photos and videos from album All Photos...",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading the first original photo or video to {base_dir} ...",
self._caplog.text,
)
self.assertIn(
"ERROR Could not convert photo created date to local timezone (2018-01-01 00:00:00)",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading {os.path.join(base_dir, os.path.normpath('2018/01/01/IMG_7409.JPG'))}",
self._caplog.text,
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
assert result.exit_code == 0
@pytest.mark.skipif(sys.platform == 'win32',
reason="does not run on windows")
def test_invalid_creation_year(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with mock.patch.object(PhotoAsset, "created", new_callable=mock.PropertyMock) as dt_mock:
# Can't mock `astimezone` because it's a readonly property, so have to
# create a new class that inherits from datetime.datetime
class NewDateTime(datetime.datetime):
def astimezone(self, tz=None):
raise ValueError('Invalid date')
dt_mock.return_value = NewDateTime(5,1,1,0,0,0)
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--skip-live-photos",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn(
"DEBUG Looking up all photos and videos from album All Photos...",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading the first original photo or video to {base_dir} ...",
self._caplog.text,
)
self.assertIn(
"ERROR Could not convert photo created date to local timezone (0005-01-01 00:00:00)",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading {os.path.join(base_dir, os.path.normpath('5/01/01/IMG_7409.JPG'))}",
self._caplog.text,
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
assert result.exit_code == 0
def test_unknown_item_type(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with mock.patch("icloudpd.download.download_media") as dp_patched:
dp_patched.return_value = True
with mock.patch.object(PhotoAsset, "item_type", new_callable=mock.PropertyMock) as it_mock:
it_mock.return_value = 'unknown'
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn(
"DEBUG Looking up all photos and videos from album All Photos...",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading the first original photo or video to {base_dir} ...",
self._caplog.text,
)
self.assertIn(
"INFO Skipping IMG_7409.JPG, only downloading photos and videos. (Item type was: unknown)",
self._caplog.text,
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
dp_patched.assert_not_called
assert result.exit_code == 0
def test_download_and_dedupe_existing_photos(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
os.makedirs(os.path.join(base_dir, os.path.normpath("2018/07/31/")))
with open(os.path.join(base_dir, os.path.normpath("2018/07/31/IMG_7409.JPG")), "a") as f:
f.truncate(1)
with open(os.path.join(base_dir, os.path.normpath("2018/07/31/IMG_7409.MOV")), "a") as f:
f.truncate(1)
os.makedirs(os.path.join(base_dir, os.path.normpath("2018/07/30/")))
with open(os.path.join(base_dir, os.path.normpath("2018/07/30/IMG_7408.JPG")), "a") as f:
f.truncate(1151066)
with open(os.path.join(base_dir, os.path.normpath("2018/07/30/IMG_7408.MOV")), "a") as f:
f.truncate(1606512)
# Download the first photo, but mock the video download
orig_download = PhotoAsset.download
def mocked_download(self, size):
if not hasattr(PhotoAsset, "already_downloaded"):
response = orig_download(self, size)
setattr(PhotoAsset, "already_downloaded", True)
return response
return mock.MagicMock()
with mock.patch.object(PhotoAsset, "download", new=mocked_download):
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"5",
"--skip-videos",
# "--set-exif-datetime",
"--no-progress-bar",
"-d",
base_dir,
"--threads-num",
"1"
],
)
print_result_exception(result)
self.assertIn("DEBUG Looking up all photos from album All Photos...", self._caplog.text)
self.assertIn(
f"INFO Downloading 5 original photos to {base_dir} ...",
self._caplog.text,
)
self.assertIn(
f"INFO {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409-1884695.JPG'))} deduplicated.",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409-1884695.JPG'))}",
self._caplog.text,
)
self.assertIn(
f"INFO {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409-3294075.MOV'))} deduplicated.",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409-3294075.MOV'))}",
self._caplog.text,
)
self.assertIn(
f"INFO {os.path.join(base_dir, os.path.normpath('2018/07/30/IMG_7408.JPG'))} already exists.",
self._caplog.text,
)
self.assertIn(
f"INFO {os.path.join(base_dir, os.path.normpath('2018/07/30/IMG_7408.MOV'))} already exists.",
self._caplog.text,
)
self.assertIn(
"INFO Skipping IMG_7405.MOV, only downloading photos.", self._caplog.text
)
self.assertIn(
"INFO Skipping IMG_7404.MOV, only downloading photos.", self._caplog.text
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
# Check that file was downloaded
self.assertTrue(
os.path.exists(os.path.join(base_dir, os.path.normpath("2018/07/31/IMG_7409-1884695.JPG"))))
# Check that mtime was updated to the photo creation date
photo_mtime = os.path.getmtime(os.path.join(base_dir, os.path.normpath("2018/07/31/IMG_7409-1884695.JPG")))
photo_modified_time = datetime.datetime.utcfromtimestamp(photo_mtime)
self.assertEqual(
"2018-07-31 07:22:24",
photo_modified_time.strftime('%Y-%m-%d %H:%M:%S'))
self.assertTrue(
os.path.exists(os.path.join(base_dir, os.path.normpath("2018/07/31/IMG_7409-3294075.MOV"))))
photo_mtime = os.path.getmtime(os.path.join(base_dir, os.path.normpath("2018/07/31/IMG_7409-3294075.MOV")))
photo_modified_time = datetime.datetime.utcfromtimestamp(photo_mtime)
self.assertEqual(
"2018-07-31 07:22:24",
photo_modified_time.strftime('%Y-%m-%d %H:%M:%S'))
assert result.exit_code == 0
@pytest.mark.skipif(sys.platform == 'win32',
reason="does not run on windows -- wrong dates")
def test_download_photos_and_set_exif_exceptions(self):
base_dir = os.path.normpath("tests/fixtures/Photos")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with mock.patch.object(piexif, "insert") as piexif_patched:
piexif_patched.side_effect = InvalidImageDataError
with mock.patch(
"icloudpd.exif_datetime.get_photo_exif"
) as get_exif_patched:
get_exif_patched.return_value = False
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--skip-videos",
"--skip-live-photos",
"--set-exif-datetime",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn("DEBUG Looking up all photos from album All Photos...", self._caplog.text)
self.assertIn(
f"INFO Downloading the first original photo to {base_dir} ...",
self._caplog.text,
)
self.assertIn(
f"INFO Downloading {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}",
self._caplog.text,
)
self.assertIn(
f"DEBUG Setting EXIF timestamp for {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}: 2018:07:31 07:22:24", # TODO On windows it is picked as 00:22:24
self._caplog.text,
)
self.assertIn(
f"DEBUG Error setting EXIF data for {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}",
self._caplog.text,
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
assert result.exit_code == 0
def test_download_chinese(self):
base_dir = os.path.normpath("tests/fixtures/中文")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
with vcr.use_cassette("tests/vcr_cassettes/listing_photos.yml"):
# Pass fixed client ID via environment variable
os.environ["CLIENT_ID"] = "DE309E26-942E-11E8-92F5-14109FE0B321"
runner = CliRunner()
result = runner.invoke(
main,
[
"--username",
"jdoe@gmail.com",
"--password",
"password1",
"--recent",
"1",
"--skip-videos",
"--skip-live-photos",
"--set-exif-datetime",
"--no-progress-bar",
"--threads-num",
1,
"-d",
base_dir,
],
)
print_result_exception(result)
self.assertIn("DEBUG Looking up all photos from album All Photos...", self._caplog.text)
self.assertIn(
f'INFO Downloading the first original photo to {base_dir} ...',
self._caplog.text,
)
self.assertIn(
f"INFO Downloading {os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))}",
self._caplog.text,
)
self.assertNotIn(
"IMG_7409.MOV",
self._caplog.text,
)
self.assertIn(
"INFO All photos have been downloaded!", self._caplog.text
)
# Check that file was downloaded
self.assertTrue(
os.path.exists(os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG'))))
# Check that mtime was updated to the photo creation date
photo_mtime = os.path.getmtime(os.path.join(base_dir, os.path.normpath('2018/07/31/IMG_7409.JPG')))
photo_modified_time = datetime.datetime.utcfromtimestamp(photo_mtime)
self.assertEqual(
"2018-07-31 07:22:24",
photo_modified_time.strftime('%Y-%m-%d %H:%M:%S'))
assert result.exit_code == 0
| 43.418423
| 197
| 0.457811
|
9e4c9b4dbed5a7288df8e47a527e52813ef1d58a
| 912
|
py
|
Python
|
src/utils/ccxt/cancel_orders.py
|
YasunoriMATSUOKA/crypto-asset-easy-management
|
5c33fd8612b843ed39f0ec1fd84efa83f3967e42
|
[
"MIT"
] | null | null | null |
src/utils/ccxt/cancel_orders.py
|
YasunoriMATSUOKA/crypto-asset-easy-management
|
5c33fd8612b843ed39f0ec1fd84efa83f3967e42
|
[
"MIT"
] | 2
|
2020-12-05T09:31:01.000Z
|
2020-12-05T12:28:33.000Z
|
src/utils/ccxt/cancel_orders.py
|
YasunoriMATSUOKA/crypto-asset-easy-management
|
5c33fd8612b843ed39f0ec1fd84efa83f3967e42
|
[
"MIT"
] | null | null | null |
from logging import getLogger
import traceback
from .cancel_order import cancel_order
logger = getLogger("__main__").getChild(__name__)
def cancel_orders(exchange_name, orders):
logger.debug("start")
logger.debug(exchange_name)
logger.debug(orders)
results = []
for order in orders:
logger.debug(order)
try:
logger.debug("try search order_id")
order_id = order["id"]
logger.info("success")
except Exception as error:
logger.warning("failure")
logger.warning(error)
logger.debug(traceback.format_exc())
order_id = None
logger.debug(order_id)
if order_id is not None:
result = cancel_order(exchange_name, order_id)
else:
result = None
results.append(result)
logger.debug(results)
logger.debug("end")
return results
| 27.636364
| 58
| 0.619518
|
afacadf63efa7e559b9a34330dbb29071c238a06
| 4,785
|
py
|
Python
|
src/primaires/scripting/actions/remplir.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
src/primaires/scripting/actions/remplir.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
src/primaires/scripting/actions/remplir.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action remplir."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Remplit un conteneur de nourriture ou de potion."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.remplir_objet, "Objet", "Objet")
cls.ajouter_types(cls.remplir_proto_nb, "Objet", "str",
"Fraction")
@staticmethod
def remplir_objet(conteneur, objet):
"""Met l'objet dans le conteneur de nourriture.
Attention, l'objet conteneur ne peut en aucun cas être "flottant" mais
doit lui-même être contenu quelque part (sol d'une salle, inventaire
d'un personnage, autre conteneur...).
"""
if not conteneur.contenu:
raise ErreurExecution("{} n'est contenu nul part".format(
conteneur.get_nom()))
if conteneur.est_de_type("conteneur de potion"):
if conteneur.potion:
raise ErreurExecution("{} est plein".format(
conteneur.get_nom()))
if objet.contenu:
objet.contenu.retirer(objet)
conteneur.potion = objet
return
if not conteneur.est_de_type("conteneur de nourriture"):
raise ErreurExecution("{} n'est pas un conteneur".format(
conteneur.get_nom()))
if objet.poids_unitaire > conteneur.poids_max:
raise ErreurExecution("{} est plein".format(conteneur.get_nom()))
if objet.contenu:
objet.contenu.retirer(objet)
conteneur.nourriture.append(objet)
@staticmethod
def remplir_proto_nb(conteneur, prototype, nb):
"""Pose dans le conteneur nb objets du prototype précisé.
Attention, l'objet conteneur ne peut en aucun cas être "flottant" mais
doit lui-même être contenu quelque part (sol d'une salle, inventaire
d'un personnage, autre conteneur...).
"""
nb = int(nb)
if not prototype in importeur.objet.prototypes:
raise ErreurExecution("prototype {} introuvable".format(prototype))
prototype = importeur.objet.prototypes[prototype]
if not conteneur.contenu:
raise ErreurExecution("{} n'est contenu nul part".format(
conteneur.get_nom()))
if conteneur.est_de_type("conteneur de potion"):
if conteneur.potion:
raise ErreurExecution("{} est plein".format(
conteneur.get_nom()))
objet = importeur.objet.creer_objet(prototype)
conteneur.potion = objet
return
if not conteneur.est_de_type("conteneur de nourriture"):
raise ErreurExecution("{} n'est pas un conteneur".format(
conteneur.get_nom()))
poids_total = 0
for i in range(nb):
poids_total += prototype.poids
if poids_total > conteneur.poids_max:
raise ErreurExecution("{} est plein".format(
conteneur.get_nom()))
objet = importeur.objet.creer_objet(prototype)
conteneur.nourriture.append(objet)
| 43.899083
| 79
| 0.667294
|
730c7a4f03221f3b889dea09c31ccc963dd460cf
| 12,716
|
py
|
Python
|
mlonmcu/session/session.py
|
PhilippvK/mlonmcu
|
6b5ed9b2abe8d3caa18c20a604547513e8097b49
|
[
"Apache-2.0"
] | null | null | null |
mlonmcu/session/session.py
|
PhilippvK/mlonmcu
|
6b5ed9b2abe8d3caa18c20a604547513e8097b49
|
[
"Apache-2.0"
] | null | null | null |
mlonmcu/session/session.py
|
PhilippvK/mlonmcu
|
6b5ed9b2abe8d3caa18c20a604547513e8097b49
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2022 TUM Department of Electrical and Computer Engineering.
#
# This file is part of MLonMCU.
# See https://github.com/tum-ei-eda/mlonmcu.git for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import multiprocessing
from time import sleep
from datetime import datetime
import pandas as pd
from enum import Enum
from datetime import datetime
import tempfile
from pathlib import Path
import os
import logging
import concurrent.futures
from tqdm import tqdm
from .run import RunStage
from mlonmcu.session.run import Run
from mlonmcu.logging import get_logger
from mlonmcu.report import Report
from .postprocess.postprocess import SessionPostprocess
logger = get_logger() # TODO: rename to get_mlonmcu_logger
class SessionStatus(Enum):
CREATED = 0
OPEN = 1
CLOSED = 2
ERROR = 3
class Session:
def __init__(self, label="unnamed", idx=None, archived=False, dir=None):
timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
self.label = label + "_" + timestamp
self.idx = idx
self.status = SessionStatus.CREATED
self.opened_at = None
self.closed_at = None
self.runs = []
self.next_run_idx = 0
self.archived = archived
if dir is None:
assert not self.archived
self.tempdir = tempfile.TemporaryDirectory()
self.dir = Path(self.tempdir.name)
else:
self.tempdir = None
self.dir = dir
if not self.dir.is_dir():
self.dir.mkdir(parents=True)
self.runs_dir = self.dir / "runs"
if not os.path.exists(self.runs_dir):
os.mkdir(self.runs_dir)
if not self.archived:
self.open()
@property
def prefix(self):
return f"[session-{self.idx}] " if self.idx else ""
def create_run(self, *args, **kwargs):
idx = len(self.runs)
logger.debug("Creating a new run with id %s", idx)
run = Run(*args, idx=idx, session=self, **kwargs)
self.runs.append(run)
# TODO: move this to a helper function
run_link = run.dir.parent / "latest" # TODO: Create relative symlink using os.path.relpath for portability
if os.path.islink(run_link):
os.unlink(run_link)
os.symlink(run.dir, run_link)
return run
# def update_run(self): # TODO TODO
# pass
def get_reports(self):
reports = [run.get_report() for run in self.runs]
merged = Report()
merged.add(reports)
return merged
def enumerate_runs(self):
# Find start index
max_idx = -1
for run in self.runs:
if run.archived:
max_idx = max(max_idx, run.idx)
run_idx = max_idx + 1
for run in self.runs:
if not run.archived:
run.idx = run_idx
run._init_directory()
run_idx += 1
self.next_run_idx = run_idx
def request_run_idx(self):
ret = self.next_run_idx
self.next_run_idx += 1
# TODO: find a better approach for this
return ret
def process_runs(
self,
until=RunStage.DONE,
per_stage=False,
num_workers=1,
progress=False,
export=False,
context=None,
):
# TODO: Add configurable callbacks for stage/run complete
self.enumerate_runs()
assert num_workers > 0, "num_workers can not be < 1"
workers = []
results = []
workers = []
pbar = None
pbar2 = None
num_runs = len(self.runs)
num_failures = 0
stage_failures = {}
worker_run_idx = []
def _init_progress(total, msg="Processing..."):
global pbar
pbar = tqdm(
total=total,
desc=msg,
ncols=100,
bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}s]",
leave=None,
)
def _update_progress(count=1):
global pbar
pbar.update(count)
def _close_progress():
global pbar
if pbar:
pbar.close()
def _init_progress2(total, msg="Processing..."):
global pbar2
pbar2 = tqdm(
total=total,
desc=msg,
ncols=100,
bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}s]",
)
def _update_progress2(count=1):
global pbar2
pbar2.update(count)
def _close_progress2():
global pbar2
if pbar2:
pbar2.close()
def _process(run, until, skip):
run.process(until=until, skip=skip, export=export, context=context)
if progress:
_update_progress()
def _join_workers(workers):
nonlocal num_failures
results = []
for i, w in enumerate(workers):
try:
results.append(w.result())
except Exception as e:
logger.exception(e)
logger.error("An exception was thrown by a worker during simulation")
run_index = worker_run_idx[i]
run = self.runs[run_index]
if run.failing:
num_failures += 1
failed_stage = RunStage(run.next_stage).name
if failed_stage in stage_failures:
stage_failures[failed_stage].append(run_index)
else:
stage_failures[failed_stage] = [run_index]
if progress:
_close_progress()
return results
def _used_stages(runs, until):
used = []
for stage_index in list(range(RunStage.LOAD, until + 1)) + [RunStage.POSTPROCESS]:
stage = RunStage(stage_index)
if any(run.has_stage(stage) for run in runs):
used.append(stage)
return used
used_stages = _used_stages(self.runs, until)
skipped_stages = [stage for stage in RunStage if stage not in used_stages]
with concurrent.futures.ThreadPoolExecutor(num_workers) as executor:
if per_stage:
if progress:
_init_progress2(len(used_stages), msg=f"Processing stages")
for stage in used_stages:
run_stage = RunStage(stage).name
if progress:
_init_progress(len(self.runs), msg=f"Processing stage {run_stage}")
else:
logger.info(self.prefix + f"Processing stage {run_stage}")
for i, run in enumerate(self.runs):
if i == 0:
total_threads = min(len(self.runs), num_workers)
cpu_count = multiprocessing.cpu_count()
if (stage >= RunStage.COMPILE) and run.platform:
total_threads *= run.platform.num_threads
if total_threads > 2 * cpu_count:
if pbar2:
print()
logger.warning(
f"The chosen configuration leads to a maximum of {total_threads} threads being processed which heavily exceeds the available CPU resources ({cpu_count}). It is recommended to lower the value of 'mlif.num_threads'!"
)
if run.failing:
logger.warning(f"Skiping stage '{run_stage}' for failed run")
else:
worker_run_idx.append(i)
workers.append(executor.submit(_process, run, until=stage, skip=skipped_stages))
results = _join_workers(workers)
workers = []
worker_run_idx = []
if progress:
_update_progress2()
if progress:
_close_progress2()
else:
if progress:
_init_progress(len(self.runs), msg="Processing all runs")
else:
logger.info(self.prefix + "Processing all stages")
for i, run in enumerate(self.runs):
if i == 0:
total_threads = min(len(self.runs), num_workers)
cpu_count = multiprocessing.cpu_count()
if (until >= RunStage.COMPILE) and run.platform.name == "mlif":
total_threads *= (
run.platform.num_threads
) # TODO: This should also be used for non-mlif platforms
if total_threads > 2 * cpu_count:
if pbar2:
print()
logger.warning(
f"The chosen configuration leads to a maximum of {total_threads} being processed which heavily exceeds the available CPU resources (cpu_count). It is recommended to lower the value of 'mlif.num_threads'!"
)
worker_run_idx.append(i)
workers.append(executor.submit(_process, run, until=until, skip=skipped_stages))
results = _join_workers(workers)
if num_failures == 0:
logger.info("All runs completed successfuly!")
elif num_failures == 0:
logger.error("All runs have failed to complete!")
else:
num_success = num_runs - num_failures
logger.warning(f"{num_success} out or {num_runs} runs completed successfully!")
summary = "\n".join(
[
f"\t{stage}: \t{len(failed)} failed run(s): " + " ".join([str(idx) for idx in failed])
for stage, failed in stage_failures.items()
if len(failed) > 0
]
)
logger.info("Summary:\n" + summary)
report = self.get_reports()
logger.info("Postprocessing session report")
# Warning: currently we only support one instance of the same type of postprocess, also it will be applied to all rows!
session_postprocesses = []
for run in self.runs:
for postprocess in run.postprocesses:
if isinstance(postprocess, SessionPostprocess):
if postprocess.name not in [p.name for p in session_postprocesses]:
session_postprocesses.append(postprocess)
for postprocess in session_postprocesses:
artifacts = postprocess.post_session(report)
if artifacts is not None:
for artifact in artifacts:
# Postprocess has an artifact: write to disk!
logger.debug("Writting postprocess artifact to disk: %s", artifact.name)
artifact.export(self.dir)
report_file = Path(self.dir) / "report.csv"
report.export(report_file)
results_dir = context.environment.paths["results"].path
results_file = results_dir / f"{self.label}.csv"
report.export(results_file)
logger.info(self.prefix + "Done processing runs")
print_report = True
if print_report:
logger.info("Report:\n" + str(report.df))
def __repr__(self):
return f"Session(idx={self.idx},status={self.status},runs={self.runs})"
@property
def active(self):
return self.status == SessionStatus.OPEN
def open(self):
self.status = SessionStatus.OPEN
self.opened_at = datetime.now()
def close(self, err=None):
if err:
self.status = SessionStatus.ERROR
else:
self.status = SessionStatus.CLOSED
self.closed_at = datetime.now()
# TODO: implement close()? and use closing contextlib? for tempdir
| 37.510324
| 250
| 0.549387
|
54778344cae5839e3f67fcdc784bb33708ea2b48
| 9,018
|
py
|
Python
|
src/app.py
|
wolfgangB33r/ai-text-model-studio
|
c8a1c067aa048652fe749c6843320409b19d5b20
|
[
"Apache-2.0"
] | null | null | null |
src/app.py
|
wolfgangB33r/ai-text-model-studio
|
c8a1c067aa048652fe749c6843320409b19d5b20
|
[
"Apache-2.0"
] | null | null | null |
src/app.py
|
wolfgangB33r/ai-text-model-studio
|
c8a1c067aa048652fe749c6843320409b19d5b20
|
[
"Apache-2.0"
] | null | null | null |
import streamlit as st
import streamlit.components.v1 as components
import numpy as np
import pandas as pd
import os
import pickle
import base64
#text libraries
import re
from time import time # To time our operations
from gensim.models import Word2Vec
# used for text scraping
from bs4 import BeautifulSoup
import requests
from urllib.parse import urlparse
def inject(tag):
with open(os.path.dirname(st.__file__) + "/static/index.html", 'r') as file:
str = file.read()
if str.find(tag) == -1:
idx = str.index('<head>')
new_str = str[:idx] + tag + str[idx:]
with open(os.path.dirname(st.__file__) + "/static/index.html", 'w') as file:
file.write(new_str)
st.session_state['stopwords'] = {'also', 'often', 'may', 'use', 'within', 'ourselves', 'hers', 'between', 'yourself', 'but', 'again', 'there', 'about', 'once', 'during', 'out', 'very', 'having', 'with', 'they', 'own', 'an', 'be', 'some', 'for', 'do', 'its', 'yours', 'such', 'into', 'of', 'most', 'itself', 'other', 'off', 'is', 's', 'am', 'or', 'who', 'as', 'from', 'him', 'each', 'the', 'themselves', 'until', 'below', 'are', 'we', 'these', 'your', 'his', 'through', 'don', 'nor', 'me', 'were', 'her', 'more', 'himself', 'this', 'down', 'should', 'our', 'their', 'while', 'above', 'both', 'up', 'to', 'ours', 'had', 'she', 'all', 'no', 'when', 'at', 'any', 'before', 'them', 'same', 'and', 'been', 'have', 'in', 'will', 'on', 'does', 'yourselves', 'then', 'that', 'because', 'what', 'over', 'why', 'so', 'can', 'did', 'not', 'now', 'under', 'he', 'you', 'herself', 'has', 'just', 'where', 'too', 'only', 'myself', 'which', 'those', 'i', 'after', 'few', 'whom', 't', 'being', 'if', 'theirs', 'my', 'against', 'a', 'by', 'doing', 'it', 'how', 'further', 'was', 'here', 'than'}
st.session_state['ignore_stop_words'] = True
def text_to_words(raw_text, remove_stopwords=True):
# 1. Remove non-letters, but including numbers
letters_only = re.sub("[^0-9a-zA-Z]", " ", raw_text)
# 2. Convert to lower case, split into individual words
words = letters_only.lower().split()
if remove_stopwords:
stops = st.session_state['stopwords'] # In Python, searching a set is much faster than searching
meaningful_words = [w for w in words if not w in stops] # Remove stop words
words = meaningful_words
return words
def extract_visible_text(soup):
result = []
visible_text = soup.getText()
sentences = visible_text.splitlines()
for sentence in sentences:
words = text_to_words(sentence, remove_stopwords=st.session_state['ignore_stop_words'])
if len(words) > 5:
result.append(words)
return result
def fill_scrape_stats(url, result):
word_count = 0
char_count = 0
for s in result:
word_count = word_count + len(s)
for w in s:
char_count = char_count + len(w)
if 'url_scrape_stats' in st.session_state:
print({'Url' : url, 'Characters' : char_count, 'Words' : word_count, 'Sentences' : len(result)})
st.session_state['url_scrape_stats'].append({'Url' : url, 'Characters' : char_count, 'Words' : word_count, 'Sentences' : len(result)})
def crawler(url, maxurls, pages_crawled):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
result = extract_visible_text(soup)
fill_scrape_stats(url, result)
# now follow links
links = soup.find_all('a')
for link in links:
if 'href' in link.attrs:
if not link['href'].startswith('http'):
link['href'] = "https://" + urlparse(url).netloc.lower() + link['href']
if link['href'] not in pages_crawled:
pages_crawled.append(link['href'])
try:
if len(pages_crawled) < maxurls: # stop condition of maximum number of crawled pages
print(link['href'])
# only follow links within the same domain as the crawler was called for
if link['href'].lower().find(urlparse(url).netloc.lower()) != -1:
result.extend(crawler(link['href'], maxurls, pages_crawled))
except:
continue
return result
inject('<script type="text/javascript" src="https://js-cdn.dynatrace.com/jstag/148709fdc4b/bf74387hfy/3111aefc2c2c3580_complete.js" crossorigin="anonymous"></script>')
st.title('Text Scraping and Model Studio')
st.markdown('Data application for scraping Web texts from given Urls for the purpose of training word embedding models. Source at [GitHub](https://github.com/wolfgangB33r/ai-text-model-studio), read the companion [blog](https://www.smartlab.at/web-text-scraping-and-ai-model-training-with-streamlit/).')
st.subheader('Web text scraping')
st.markdown('Enter a Web Url below and start scraping all visual texts from that page.')
max_links = st.slider('Maximum number of links to follow', 0, 100, 1)
scraping_url = st.text_area('Url to scrape texts from (e.g.: texts from the book Pride And Prejudice)', 'https://www.gutenberg.org/cache/epub/1342/pg1342.html')
st.subheader('Text cleaning')
ignore_stop_words = st.checkbox('Ignore stopwords?', value = True)
if ignore_stop_words:
st.session_state['ignore_stop_words'] = ignore_stop_words
stop_word_text = st.text_area('Stopwords', 'must, much, us, could, would, also, often, may, use, within, ourselves, hers, between, yourself, but, again, there, about, once, during, out, very, having, with, they, own, an, be, some, for, do, its, yours, such, into, of, most, itself, other, off, is, s, am, or, who, as, from, him, each, the, themselves, until, below, are, we, these, your, his, through, don, nor, me, were, her, more, himself, this, down, should, our, their, while, above, both, up, to, ours, had, she, all, no, when, at, any, before, them, same, and, been, have, in, will, on, does, yourselves, then, that, because, what, over, why, so, can, did, not, now, under, he, you, herself, has, just, where, too, only, myself, which, those, i, after, few, whom, t, being, if, theirs, my, against, a, by, doing, it, how, further, was, here, than')
if stop_word_text:
a = stop_word_text.split(',')
ca = []
for i in a:
ca.append(i.strip())
st.session_state['stopwords'] = set(ca)
scrapebutton = st.button('Start scraping')
if scrapebutton:
st.session_state['url_scrape_stats'] = []
urls = scraping_url.splitlines()
st.session_state['sentences'] = []
for url in urls:
st.session_state['sentences'].extend(crawler(url, maxurls = max_links, pages_crawled = []))
print(len(st.session_state['sentences']))
if 'url_scrape_stats' in st.session_state:
df = st.dataframe(st.session_state['url_scrape_stats'])
base64_str = base64.b64encode(str(st.session_state['sentences']).encode('utf-8')).decode()
href = f'<a href="data:file/output_model;base64,{base64_str}" download="sentences.txt">Download scraped sentences as JSON file</a>'
st.markdown(href, unsafe_allow_html=True)
st.subheader('Train word2vec')
vector_size = st.slider('Embedding vector size of each word', 0, 500, 100)
window_size = st.slider('Word window size (5 e.g.: means two words before and two words after the input word are taken into account)', 0, 10, 5)
def download_model(model):
output_model = pickle.dumps(model)
b64 = base64.b64encode(output_model).decode()
href = f'<a href="data:file/output_model;base64,{b64}" download="myfile.pkl">Download trained model .pkl File</a>'
st.markdown(href, unsafe_allow_html=True)
trainbutton = st.button('Start training')
if trainbutton:
if 'sentences' in st.session_state:
# Initialize and train the model (this will take some time)
model = Word2Vec(vector_size=vector_size, window=window_size, min_count=1, workers=4)
model.build_vocab(st.session_state['sentences'], progress_per=10000)
t = time()
model.train(st.session_state['sentences'], total_examples=model.corpus_count, epochs=1, report_delay=1)
st.session_state['model'] = model
print('Time to train the model: {} mins'.format(round((time() - t) / 60, 2)))
print(len(model.wv))
#print(model.wv.most_similar(positive=["metric"]))
#model.save("./model/word2vec.model")
download_model(model)
model_info = f'Word2vec model vocabulary size: **{len(model.wv)}**'
st.markdown(model_info)
else:
st.markdown('Start with text scraping first.')
st.subheader('Evaluate the trained model')
st.markdown('Find the top-100 most similar words to the given word below.')
test_word = st.text_input('Check most similar words for', 'sister')
if test_word:
if 'model' in st.session_state:
if test_word.lower() in st.session_state['model'].wv:
similar = st.session_state['model'].wv.most_similar(positive=[test_word.lower()], topn=100)
st.dataframe(similar)
| 51.827586
| 1,077
| 0.649811
|
dcca6db5e5dd65481af153579556109f8ab7bad5
| 92,543
|
py
|
Python
|
build_system/flashFPGA2rsyocto.py
|
robseb/rsyocto
|
21c554a91f5be364212498beb8b946f358ec84ea
|
[
"MIT"
] | 69
|
2020-01-16T18:29:00.000Z
|
2022-02-23T07:19:17.000Z
|
build_system/flashFPGA2rsyocto.py
|
robseb/rsyocto
|
21c554a91f5be364212498beb8b946f358ec84ea
|
[
"MIT"
] | 13
|
2019-12-20T20:14:50.000Z
|
2022-01-25T17:21:19.000Z
|
build_system/flashFPGA2rsyocto.py
|
robseb/rsyocto
|
21c554a91f5be364212498beb8b946f358ec84ea
|
[
"MIT"
] | 20
|
2020-01-20T13:04:42.000Z
|
2022-02-18T01:41:55.000Z
|
#!/usr/bin/env python3.7
#
# ######## ###### ## ## ####### ###### ######## #######
# ## ## ## ## ## ## ## ## ## ## ## ## ##
# ## ## ## #### ## ## ## ## ## ##
# ######## ###### ## ## ## ## ## ## ##
# ## ## ## ## ## ## ## ## ## ##
# ## ## ## ## ## ## ## ## ## ## ## ##
# ## ## ###### ## ####### ###### ## #######
# ___ _ _ _ ___ _
# | _ ) _ _ (_) | | __| | / __| _ _ ___ | |_ ___ _ __
# | _ \ | || | | | | | / _` | \__ \ | || | (_-< | _| / -_) | ' \
# |___/ \_,_| |_| |_| \__,_| |___/ \_, | /__/ \__| \___| |_|_|_|
# |__/
#
#
# Robin Sebastian (https://github.com/robseb)
# Contact: git@robseb.de
# Repository: https://github.com/robseb/rsyocto
#
# Python Script to generate FPGA-Configuration files for the Linux and bootloader FPGA Configuration
# and to copy these file to the depending positions on rsyocto
#
# (2021-04-25) Vers.1.0
# first Version
#
# (2021-04-26) Vers.1.01
# fixing a issue with detection of the Intel Quartus Prime FPGA compile mode
#
# (2021-05-05) Vers.1.10
# fixing a issue with unlicensed IP during FPGA project compilation
# fixing a issue with multiple .sof FPGA files in a project
# adding the JTAG mode to enable the writing of
# unlicensed IP/regular FPGA-Configuration via JTAG
#
# (2021-05-09) Vers.1.101
# fixing a ERROR that during a JTAG Connection occurred
# new FPGA IP test mode (JTAG) by generating and executing a shell script
#
# (2021-05-09) Vers.1.102
# remove .cdf file after shell script executing
#
# (2021-05-09) Vers.1.103
# JTAG support for Linux
# removing the second terminal window for the FPGA IP Evaluation Mode
#
# (2021-05-09) Vers.1.104
# small bug fixes with JTAG mode
#
# (2021-05-09) Vers.1.105
# JTAG mode support for regular FPGAs without HPS (Hard Processor System)
#
version = "1.105"
#
#
#
############################################ Const ###########################################
#
#
#
DELAY_MS = 1 # Delay after critical tasks in milliseconds
QURTUS_DEF_FOLDER_LITE = "intelFPGA_lite"
QURTUS_DEF_FOLDER = "intelFPGA"
QURTUS_DEF_FOLDER_PRO = "intelFPGA_pro"
EDS_EMBSHELL_DIR = ["/embedded/embedded_command_shell.sh","\\embedded\\embedded_command_shell.bat"]
QUARTUS_CMDSHELL_EXE = ['quartus_sh','quartus_sh.exe']
#
# @brief default XML settings file name
#
FLASHFPGA_SETTINGS_XML_FILE_NAME = 'confFlashFPGA2rsyocto.xml'
#
# @brief default XML settings file
#
FLASHFPGA_SETTINGS_XML_FILE ='<?xml version="1.0" encoding = "UTF-8" ?>\n'+\
'<!-- Used by the Python script "flashFPGA2rsyocto.py" -->\n'+\
'<!-- to store the settings of the used development board -->\n'+\
'<!-- Description: -->\n'+\
'<!-- item "board" The Settings for the baord (Only one item allowed) -->\n'+\
'<!-- L "set_ip" => The IPv4 Address of the board -->\n'+\
'<!-- L "set_user" => The Linux User name of the board -->\n'+\
'<!-- L "set_password" => The Linux User password of the board -->\n'+\
'<!-- L "set_flashBoot" => Enable or Disable of the writing of the u-boot bootloader FPGA-Configuration file -->\n'+\
'<!-- L "Y" => Enable | "N" => Disable -->\n'+\
'<!-- set_quartus_prime_ver Intel Quartus Prime Version to use <Version><Version No> -->\n'+\
'<!-- L -> Quartus Prime Lite (e.g. L16.1) -->\n'+\
'<!-- S -> Quartus Prime Standard (e.g. S18.1) -->\n'+\
'<!-- P -> Quartus Prime Pro (e.g. P20.1) --> \n'+\
'<FlashFPGA2Linux>\n'+\
' <board set_ip="192.168.0.165" set_user="root" set_pw="eit" set_flashBoot="Y" set_quartus_prime_ver="L20.1" />\n'+\
'</FlashFPGA2Linux>\n'
RSYOCTO_BANNER_CHECK_LINE = ['created by Robin Sebastian (github.com/robseb)', \
'Contact: git@robseb.de', \
'https://github.com/robseb/rsyocto'\
]
RSYOCTO_FPGAWRITECONF_CHECK = 'Command to change the FPGA fabric configuration'
RSYOCTO_TEMPCOPYFOLDER = '.flashFPGA2rsyocto'
#
#
#
############################################ Github clone function ###########################################
#
#
#
import sys
try:
import paramiko
except ImportError as ex:
print('Msg: '+str(ex))
print('This Python Script uses "paramiko"')
print('to enable SSH access to SoC-FPGA board')
print('Use following pip command to install it:')
print('$ pip3 install paramiko')
sys.exit()
import os, platform, io, warnings
import time, math
from datetime import datetime
import shutil
import re
from threading import Thread
import subprocess, queue
from subprocess import DEVNULL
import xml.etree.ElementTree as ET
import glob
from pathlib import Path
import argparse
#
# @brief Class for automatization the entry FPGA configuration generation process
# and to write the FPGA-Configuration via SSH
#
class FlashFPGA2Linux(Thread):
## Intel Quartus Prime and Intel SoC-EDS related properties
EDS_Folder_dir : str # Directory of the Intel EDS folder
Quartus_proj_top_dir : str # Directory of the Quartus Project folder
Qpf_file_name : str # Name of the Quartus Project ".qpf"-file
Sof_file_name : str # Name of the Quartus Project ".sof"-file
sopcinfo_file_name : str # Name of the Quartus Project ".sopcinfo"-file
Qsys_file_name : str # Name of the Quartus Project ".qsys"-file
Handoff_folder_name : str # Name of the Quartus Project Hand-off folder
UbootSFP_default_preBuild_dir : str # Directory of the pre-build u-boot for the device
Sof_folder : str # Name of the Quartus Project folder containing the ".sof"-file
U_boot_socfpga_dir : str # Directory of u-boot SoC-FPGA folder
Uboot_default_file_dir : str # Directory of the pre-build default u-boot file
## SoC-FPGA Development board and rsyocto related properties
Device_id : int # SocFPGA ID (0: Cyclone V; 1: Arria V;2: Arria 10)
Device_name = ['Intel Cyclone V','Intel Arria V','Intel Arria 10']
unlicensed_ip_found : bool# Quartus project contains an unlicensed IP (e.g. NIOS II Core)
regular_fpga_project : bool # FPGA Project type: True: regular FPGA | False: SoC-FPGA
board_ip_addrs : '' # IPv4 Address of the SoC-FPGA Linux Distribution (rsyocto)
board_user : '' # SoC-FPGA Linux Distribution (rsyocto) Linux user name
board_pw : '' # SoC-FPGA Linux Distribution (rsyocto) Linux user password
use_jtag : False # Use JTAG to write the FPGA-Configuration
__temp_folder_dir : '' # Directory of the Temp folder on rsyocto
__temp_partfolder_dir : '' # Directory of the Temp partition folder
#
# @brief Directories to Intel FPGA shells
#
shell_quartus_dir : str # Directory of the Intel Quartus Prime command shell
## Network related properties
__sshClient : paramiko # Object of the SSH client connection to the baord
__sftpClient : paramiko # Object of the SFTP client connection to the board
__queue : queue.Queue # Queue of the SSH Thread
__SPLM = ['/','\\'] # Slash for Linux, Windows
__SPno = 0 # OS ID 0=Linux | 1=Windows
ThreadStatus : False # Was the SSH Thread executed successfully?
#
# @brief Constructor
# @param board_ip_addrs IPv4 Address of the SoC-FPGA Linux Distribution (rsyocto)
# Format 100.100.100.100
# @param board_user SoC-FPGA Linux Distribution (rsyocto) Linux user name
# @param board_pw SoC-FPGA Linux Distribution (rsyocto) Linux user password
# @prarm compile_project Before writing FPGA-Configuration compile the Intel Quartus
# Prime FPGA project
# @param QuartusForceVersion Quartus Prime Version to use <Version><Version No>
# L -> Quartus Prime Lite (e.g. L16.1)
# S -> Quartus Prime Standard (e.g. S18.1)
# P -> Quartus Prime Pro (e.g. P20.1)
# @param use_jtag Use JATG for writing the FPGA-Configuration
#
def __init__(self,board_ip_addrs, board_user,board_pw,compile_project,QuartusForceVersion, use_jtag):
# Read the input paramters
regex_pattern = "^([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$"
if not bool( re.match( regex_pattern, board_ip_addrs)):
print('[ERROR] The given IP Address is not in the proper format (0.0.0.0)')
sys.exit()
self.board_ip_addrs = board_ip_addrs
self.board_user = board_user
self.board_pw = board_pw
self.use_jtag = use_jtag
self.ThreadStatus = False
######################################### Find the Intel EDS Installation Path ####################################
if sys.platform =='linux':
EDS_Folder_def_suf_dir = os.path.join(os.path.join(os.path.expanduser('~'))) + '/'
self.__SPno = 0
else:
EDS_Folder_def_suf_dir = 'C:\\'
self.__SPno = 1
# 1.Step: Find the EDS installation path
quartus_standard_ver = False
# Loop to detect the case that the free Version of SoC EDS (EDS Standard [Folder:intelFPGA]) and
# the free Version of Quartus Prime (Quartus Lite [Folder:intelFPGA_lite]) are installed together
while(True):
if (os.path.exists(EDS_Folder_def_suf_dir+QURTUS_DEF_FOLDER)) and (not quartus_standard_ver):
self.EDS_Folder=EDS_Folder_def_suf_dir+QURTUS_DEF_FOLDER
quartus_standard_ver = True
elif(os.path.exists(EDS_Folder_def_suf_dir+QURTUS_DEF_FOLDER_LITE)):
self.EDS_Folder=EDS_Folder_def_suf_dir+QURTUS_DEF_FOLDER_LITE
quartus_standard_ver = False
else:
print('[ERROR] No Intel SoC EDS Installation Folder was found!')
sys.exit()
# 2.Step: Find the latest Intel SoC EDS Version No.
avlVer = []
for name in os.listdir(self.EDS_Folder):
if os.path.abspath(name):
try:
avlVer.append(float(name))
except Exception:
pass
if (len(avlVer)==0):
print('[ERROR] No valid Intel SoC EDS Version was found')
sys.exit()
avlVer.sort(reverse = True)
highestVer = avlVer[0]
self.EDS_Folder = self.EDS_Folder +self.__SPLM[self.__SPno]+ str(highestVer)
if (not(os.path.realpath(self.EDS_Folder))):
print('[ERROR] No valid Intel EDS Installation Folder was found!')
sys.exit()
if(highestVer < 18):
print('[ERROR] This script is designed for Intel SoC-EDS Version 18+ (18.1,19.1, 20.1, ...) ')
print(' You using Version '+str(highestVer)+' please update Intel EDS!')
sys.exit()
elif(highestVer > 20.1):
print('[WARNING] This script was designed for Intel EDS Version 19.1 and 20.1')
print(' Your version is newer. Errors may occur!')
# Check if the SOC-EDS Command Shell is available
if((not(os.path.isfile(self.EDS_Folder+EDS_EMBSHELL_DIR[self.__SPno])) )):
if( not quartus_standard_ver):
print('[ERROR] Intel SoC EDS Embedded Command Shell was not found!')
sys.exit()
else:
break
############################### Check that the script runs inside the Quartus project ###############################
self.Quartus_proj_top_dir =os.getcwd()
excpath = os.getcwd()
# Find the Quartus project (.qpf) file
self.Qpf_file_name = ''
for file in os.listdir(self.Quartus_proj_top_dir):
if ".qpf" in file:
self.Qpf_file_name =file
break
self.sopcinfo_file_name = ''
for file in os.listdir(self.Quartus_proj_top_dir):
if ".sopcinfo" in file:
self.sopcinfo_file_name =file
break
# Find the Quartus (.sof) (SRAM Object) file
self.Sof_file_name = ''
Sof_file_name_list =[]
self.Sof_folder = ''
# Looking in the top folder for the sof file
# Sort the file by the modification date (latest date --> first)
files = [s for s in os.listdir(self.Quartus_proj_top_dir)
if os.path.isfile(os.path.join(self.Quartus_proj_top_dir, s))]
files.sort(key=lambda s: os.path.getmtime(\
os.path.join(self.Quartus_proj_top_dir, s)),reverse=True)
for file in files:
if ".sof" in file:
Sof_file_name_list.append(file)
if len(Sof_file_name_list)==0:
# Looking inside the "output_files" and "output" folders
if os.path.isdir(self.Quartus_proj_top_dir+'/output_files'):
self.Sof_folder ='output_files'
if os.path.isdir(self.Quartus_proj_top_dir+'/output'):
self.Sof_folder = 'output'
# Sort the file by the modification date (latest date --> first)
files = [s for s in os.listdir(self.Quartus_proj_top_dir+\
self.__SPLM[self.__SPno]+self.Sof_folder)
if os.path.isfile(os.path.join(self.Quartus_proj_top_dir+\
self.__SPLM[self.__SPno]+self.Sof_folder, s))]
files.sort(key=lambda s: os.path.getmtime(\
os.path.join(self.Quartus_proj_top_dir+self.__SPLM[self.__SPno]+\
self.Sof_folder, s)),reverse=True)
for file in files:
if ".sof" in file:
Sof_file_name_list.append(file)
# Use the latest SOF file available inside the Quartus Prime Project
self.Sof_file_name==''
if len(Sof_file_name_list)>0:
self.Sof_file_name=Sof_file_name_list[0]
# Check if the file is older then 10min --> raise a warning!
current_time = datetime.now().timestamp()
modification_time = os.path.getmtime(self.Quartus_proj_top_dir+\
self.__SPLM[self.__SPno]+self.Sof_folder+\
self.__SPLM[self.__SPno]+self.Sof_file_name)
if modification_time+ 10*60 < current_time:
mod= datetime.fromtimestamp(modification_time).strftime('%d-%m-%Y %H:%M')
print('[WARNING] The used output file "'+self.Sof_folder+\
self.__SPLM[self.__SPno]+self.Sof_file_name+\
'" is older then 10 min! Modification Date: '+mod)
# Find the Platform Designer (.qsys) file
self.Qsys_file_name = ''
for file in os.listdir(self.Quartus_proj_top_dir):
if ".qsys" in file and not ".qsys_edit" in file:
self.Qsys_file_name =file
break
device_name_temp =''
# Does the SOF file contains an IP with a test licence, such as a NIOS II Core?
self.unlicensed_ip_found=False
if self.Sof_file_name.find("_time_limited")!=-1:
if self.use_jtag==False:
# Use the network for writting the FPGA-Configuration
print('********************************************************************************')
print('* Unlicensed IP inside the FPGA project was found! *')
print('* Generation of the ".rbf"- FPGA-Configuration is not enabled *')
print('* --> It is not allowed to generate a static FPGA-Configuration file *')
print('********************************************************************************')
print('* *')
print('* Use the argument "-j 1" to write the FPGA-Configuration via JTAG *')
print('* *')
print('********************************************************************************')
sys.exit()
else:
# Use JTAG
print('[WARNING] The FPGA project contains unlicensed IP. Only JTAG RAM writing allowed!')
self.unlicensed_ip_found=True
# Find the Platform Designer folder
if self.Qsys_file_name=='' or self.Qpf_file_name=='':
print('[ERROR] The script was not executed inside the Intel Quartus Prime project folder!')
print(' Please copy it into the top-project folder of a Intel Quartus Prime FPGA project')
print(' --- Required folder structure ---')
print(' YOUR_QURTUS_PROJECT_FOLDER ')
print(' | L-- PLATFORM_DESIGNER_FOLDER')
print(' | L-- platform_designer.qsys')
print(' | L-- _handoff')
print(' | L-- quartus_project.qpf')
print(' | L-- flashFPGA2rsyocto.py <<<<<<<<=====')
print(' Note: File names can be chosen freely\n')
sys.exit()
if self.Sof_file_name=='' and not compile_project:
print('[ERROR] The linked Intel Quartus Prime FPGA Project was not compiled!')
print(' For FPGA-Configuration file generation is this necessary!')
print(' Use the argument "-cf 1" to compile this FPGA project')
print(' and then to write the FPGA-Configuration with rsyocto')
sys.exit()
# Find the handoff folder
self.Handoff_folder_name = ''
handoff_folder_start_name =''
for file in os.listdir(self.Quartus_proj_top_dir):
if "_handoff" in file:
handoff_folder_start_name =file
break
folder_found = False
if not handoff_folder_start_name=='':
for folder in os.listdir(self.Quartus_proj_top_dir+self.__SPLM[self.__SPno]+handoff_folder_start_name):
if os.path.isdir(self.Quartus_proj_top_dir+self.__SPLM[self.__SPno]+handoff_folder_start_name+self.__SPLM[self.__SPno]+folder):
self.Handoff_folder_name = folder
if folder_found:
print('[ERROR] More than one folder inside the Quartus handoff folder "'+self.Handoff_folder_name+'" found! Please delete one!')
print(' NOTE: It is necessary to build the Prime Quartus Project for the bootloader generation!')
sys.exit()
folder_found = True
self.Handoff_folder_name = handoff_folder_start_name+self.__SPLM[self.__SPno]+self.Handoff_folder_name
# Find the "hps.xml"-file inside the handoff folder
handoff_xml_found =False
for file in os.listdir(self.Quartus_proj_top_dir+self.__SPLM[self.__SPno]+self.Handoff_folder_name):
if "hps.xml" == file:
handoff_xml_found =True
break
if not handoff_xml_found:
print('[ERROR] The "hps.xml" file inside the handoff folder was not found!')
print(' NOTE: It is necessary to build the Prime Quartus Project for the bootloader generation!')
sys.exit()
# Load the "hps.xml" file to read the device name
try:
tree = ET.parse(self.Quartus_proj_top_dir+self.__SPLM[self.__SPno]+self.Handoff_folder_name+self.__SPLM[self.__SPno]+'hps.xml')
root = tree.getroot()
except Exception as ex:
print(' [ERROR] Failed to parse "hps.xml" file!')
print(' Msg.: '+str(ex))
sys.exit()
device_name_temp =''
for it in root.iter('config'):
name = str(it.get('name'))
if name == 'DEVICE_FAMILY':
device_name_temp = str(it.get('value'))
break
if device_name_temp == '':
print('[ERROR] Failed to decode the device name inside "hps.xml"')
# Convert Device name
if device_name_temp == 'Cyclone V':
self.Device_id = 0
'''
elif device_name_temp == 'Arria V':
self.Device_id = 1
'''
elif device_name_temp == 'Arria 10':
self.Device_id = 2
print('[ERROR] The Arria 10 SX SoC-FPGA is right now not supported!')
print(' I am working on it...')
sys.exit()
## NOTE: ADD ARRIA V/ SUPPORT HERE
else:
print('[ERROR] Your Device ('+device_name_temp+') is not supported!')
sys.exit()
# For Arria 10 SX: The early I/O release must be enabled inside Quartus Prime!
early_io_mode =-1
if self.Device_id == 2:
for it in root.iter('config'):
name = str(it.get('name'))
if name == 'chosen.early-release-fpga-config':
early_io_mode = int(it.get('value'))
break
if not early_io_mode==1:
print('[ERROR] This build system supports only the Arria 10 SX SoC-FPGA')
print(' with the Early I/O release feature enabled!')
print(' Please enable Early I/O inside the Intel Quartus Prime project settings')
print(' and rebuild the project again')
print('Setting: "Enables the HPS early release of HPS IO" inside the general settings')
print('Note: Do not forget to enable it for the EMIF inside Qysis')
sys.exit()
else:
print('[INFO] HPS early release of HPS IO for the Intel Arria 10 SX SoC-FPGA is enabled')
else:
# It was no handoff folder found!
if self.use_jtag==False:
# Use the network for writting the FPGA-Configuration
print('********************************************************************************')
print('* This is a regular Quartus Prime FPGA project without a HPS *')
print('* (Hard Processor System) implementation! *')
print('* --> It is not possible to write the FPGA-Conf with the Linux rsyocto! *')
print('********************************************************************************')
print('* *')
print('* Use the argument "-j 1" to write the FPGA-Configuration via JTAG *')
print('* *')
print('********************************************************************************')
sys.exit()
else:
# Use JTAG
print('[INFO] The FPGA project has no HPS. FPGA-Config via JTAG is enabled')
self.regular_fpga_project=True
######################################## Force to cyclone V ########################################
device_name_temp == 'Cyclone V'
self.Device_id = 0
print('[INFO] A valid Intel Quartus Prime '+device_name_temp+' SoC-FPGA project was found')
################################ COMPILE THE INTEL QUARTUS PRIME FPGA PROJECT ################################
if compile_project or use_jtag:
quVers=0
if QuartusForceVersion =='':
print('[ERROR] For the Intel Quartus Prime FPGA Project compilation mode ')
print(' it is necessary to select the Intel Quartus Prime Version! ')
print(' Use the argument "-h" for help')
sys.exit()
if re.match("^[LSP]+[0-9]+[.]+[0-9]?$", QuartusForceVersion, re.I) == None:
print('ERROR: The selected Quartus Version is in the wrong format')
sys.exit()
# Decode the Version No and Version Type input
if not QuartusForceVersion.find('S')==-1:
quVers=1
elif not QuartusForceVersion.find('P')==-1:
quVers=2
quartus_folder= [QURTUS_DEF_FOLDER_LITE,QURTUS_DEF_FOLDER,QURTUS_DEF_FOLDER_PRO]
if self.__SPno== 0:
# Find the Linux default hard drive directory
sys_folder_dir = os.path.join(os.path.join(os.path.expanduser('~'))) + '/'
else:
# Windows C:// directory
sys_folder_dir = 'C:\\'
self.installDir_Quartus = sys_folder_dir+quartus_folder[quVers]+\
self.__SPLM[self.__SPno]+QuartusForceVersion[1:]
if not os.path.isdir(self.installDir_Quartus +self.__SPLM[self.__SPno]+"quartus"):
print('[ERROR] The chosen Intel Quartus Prime Version is not available \n'+\
' on this Computer ("'+self.installDir_Quartus +'")\n'+
' Please install this version or chose a diffrent Intel Quartus Version!\n'+\
' Use the argument "-h" to get help')
sys.exit()
# Check if the Quartus Prime "bin64" 64-bit version folder is there
self.installDir_Quartus_bin= self.installDir_Quartus +self.__SPLM[self.__SPno]+\
"quartus"+self.__SPLM[self.__SPno]+"bin64"
if not os.path.isdir(self.installDir_Quartus_bin):
self.installDir_Quartus_bin= self.installDir_Quartus +self.__SPLM[self.__SPno]+\
"quartus"+self.__SPLM[self.__SPno]+"bin"
if not os.path.isdir(self.installDir_Quartus_bin):
print('[ERROR] The Intel Quartus Prime bin or bin64 folder does not exist!\n'+\
' search dir: "'+self.installDir_Quartus_bin+'"')
self.installDir_Quartus_bin=''
sys.exit()
# Find the Quartus Prime Command Shell
self.shell_quartus_dir = self.installDir_Quartus_bin+self.__SPLM[self.__SPno]+\
QUARTUS_CMDSHELL_EXE[self.__SPno]
if not os.path.isfile(self.shell_quartus_dir):
print('[ERROR] The Intel Quartus Prime shell \n'+\
' was not found ("'+self.shell_quartus_dir+'")')
sys.exit()
print('[INFO] The vailed Intel Quartus Prime project was found ('+QuartusForceVersion+')')
if compile_project:
print('[INFO] Start compiling the Intel Quartus Prime FPGA project')
if not self.command_quartusShell_flow():
print('[ERROR] Compilation of the Intel Quartus Prime Project failed!')
sys.exit()
print('[INFO] Compiling the Intel Quartus Prime FPGA project is done')
#
# @brief Start the Quartus Prime deasign flow compatlation, routing with
# compilation, timing analysis, and programming file generation
# @param mode
# compile = Basic compilation
# implement = Run compilation up to route stage
# finalize = Perform pre-POF finalization operations
# recompile = Perform a Rapid Recompile after making a design change
# signalprobe = Run project signalprobing
# export_database = Export database
# import_database = import database
# @return success
def command_quartusShell_flow(self,mode='compile'):
if not (mode =='compile' or mode== 'implement' or mode == 'finalize' or \
mode == 'recompile' or mode== 'signalprobe' or mode =='export_database' or \
mode == 'import_database'):
print('[ERROR] The selected input mode is not allowed!')
return False
print('--> Start the Quartus Prime project design flow ')
print(' in the mode "'+mode+'"')
if self.Quartus_proj_top_dir =='':
print('[ERROR] The Quartus Project top folder is not specified')
return False
os.system(self.shell_quartus_dir+' --flow '+mode+' '+self.Quartus_proj_top_dir+\
self.__SPLM[self.__SPno]+self.Qpf_file_name)
print('\n--> The Design flow command was executed')
# Check that the output file (".sof") has be changed
if not os.path.isfile(self.Quartus_proj_top_dir+self.Sof_folder+\
self.__SPLM[self.__SPno]+self.Sof_file_name):
print('[ERROR] The output file (".sof") does not exist!')
modification_time = os.path.getmtime(self.Quartus_proj_top_dir+\
self.__SPLM[self.__SPno]+self.Sof_folder+\
self.__SPLM[self.__SPno]+self.Sof_file_name)
current_time = datetime.now().timestamp()
# Offset= 10 min
new_file=False
if modification_time+ 10*60 < current_time:
# Was a new File created
files = [s for s in os.listdir(self.Quartus_proj_top_dir+\
self.__SPLM[self.__SPno]+self.Sof_folder)
if os.path.isfile(os.path.join(self.Quartus_proj_top_dir+\
self.__SPLM[self.__SPno]+self.Sof_folder, s))]
files.sort(key=lambda s: os.path.getmtime(\
os.path.join(self.Quartus_proj_top_dir+self.__SPLM[self.__SPno]+\
self.Sof_folder, s)),reverse=True)
if not len(files)==0:
for file in files:
if ".sof" in file:
print(file)
modification_time = os.path.getmtime(file)
if modification_time+ 400*60 < current_time:
print('[ERROR] The compilation failed!')
return False
self.Sof_file_name=file
print('[NOTE] New FPGA-Configuration file name "'+file+'"')
new_file = True
break
else:
return False
if self.Sof_file_name.find('_time_limited')>-1: self.unlicensed_ip_found=True
else: self.unlicensed_ip_found=False
if self.unlicensed_ip_found and not self.use_jtag:
print('[ERROR] The compilation is done and contains a unlicensed IP!')
print(' It is not allowed to write a .rbf FPGA-Configuration file with it!')
print(' Use the argument "-j 1" to write it via JTAG to RAM')
return False
if not new_file:
print('[ERROR] The comand failed! The output file (".sof") is the same!')
return False
return True
#
# @brief Write the FPGA-Configuration with JTAG to RAM
# @return success
#
def command_jtag_writeConfRAM(self):
if not os.path.isfile(self.Quartus_proj_top_dir+self.__SPLM[self.__SPno]+self.Sof_folder+\
self.__SPLM[self.__SPno]+self.Sof_file_name):
print('[ERROR] The output file (".sof") does not exist! --> JTAG Flash impossible!')
return False
sof_file_dir = self.Quartus_proj_top_dir+self.__SPLM[self.__SPno]+self.Sof_folder
jtagconfig_cmd_dir = self.installDir_Quartus_bin+self.__SPLM[self.__SPno]
jtagconfig_cmd_dir+= 'jtagconfig' if self.__SPno==0 else 'jtagconfig.exe'
'''
C:\intelFPGA\18.1\quartus\bin64>jtagconfig.exe
1) DE-SoC [USB-1]
4BA00477 SOCVHPS
02D020DD 5CSEBA6(.|ES)/5CSEMA6/..
'''
#
### 1. Step: Run "jtagconfig" to scan the JTAG Chain
#
out_chain =''
err =''
try:
with subprocess.Popen(jtagconfig_cmd_dir, stdin=subprocess.PIPE,\
stdout=subprocess.PIPE,stderr = subprocess.PIPE) as edsCmdShell:
time.sleep(DELAY_MS)
out_chain, err = edsCmdShell.communicate()
except Exception as ex:
print('[ERROR] Failed to execute the "jtagconfig" command MSG:'+ str(ex))
return False
# Check that a vialed JTAG Debugger was connected
out_chain= out_chain.decode("utf-8")
err= err.decode("utf-8")
if out_chain=='' or err.find('No JTAG hardware available')>-1:
print('[ERROR] No supported JTAG Debugger was found!')
print(' Check the connection between the FPGA device and the debugger')
err= err.replace('\n','')
print(' MSG: '+err)
return False
if out_chain=='':
print('[ERROR] During JTAG Debugger connection attempt unknown Error occurred!')
err= err.replace('\n','')
print(' MSG: '+err)
return False
start_symbol_pos = out_chain.find('1)')
if self.__SPno==0:
JTAG_debugger_id_start_pos = out_chain.find('[1-')
else:
JTAG_debugger_id_start_pos = out_chain.find('[USB-1]')
if start_symbol_pos==-1 or JTAG_debugger_id_start_pos==-1:
print('[ERROR] No USB JTAG Debugger found! Only USB Debuggers are supported!')
return False
# At least one JTAG Debugger was connected --> read the ID of the Debugger
if self.__SPno==0:
JTAG_debugger_id = out_chain[start_symbol_pos+3:JTAG_debugger_id_start_pos+6]
else:
JTAG_debugger_id = out_chain[start_symbol_pos+3:JTAG_debugger_id_start_pos+7]
JTAG_debugger_id= JTAG_debugger_id.replace('\n','',100)
# Are more then one debugger connected --> not supported
if not out_chain.find('2)',JTAG_debugger_id_start_pos)==-1:
print('[ERROR] More then one USB JTAG Debugger found! Only one is allowed!')
print(' Disconnect one JTAG Debugger to use this script!')
return False
print('[INFO] A valid JTAG Debugger was found with the ID="'+JTAG_debugger_id+'"')
#
## 2. Step: Create a Chain Description File (.cdf)
#
# Check if a CDF file is allready there
cdf_file_name=self.Sof_file_name.replace('.sof', '.cdf')
if os.path.isfile(sof_file_dir+self.__SPLM[self.__SPno]+cdf_file_name):
try:
os.remove(sof_file_dir+self.__SPLM[self.__SPno]+cdf_file_name)
except Exception:
print('[ERROR] Failed to remove the old CDF File! Please remove it by hand!')
print(' File dir: "'+of_file_dir+self.__SPLM[self.__SPno]+cdf_file_name+'"')
return False
# Analyse the JTAG Chain
JTAG_id_list = []
Device_id_list=[]
first_line =True
for line in out_chain.splitlines():
if first_line:
first_line=False
continue
# Format <JTAG ID> <DEVICE ID>
jtag_id_start=2
jtag_id_end =0
# Find the <JTAG_ID>
for i in range(2,len(line)):
jtag_id_end=i
if not bool(re.match("^[A-F0-9]?$", line[i], re.I)):
break
if jtag_id_end>0:
JTAG_id_list.append(line[jtag_id_start:jtag_id_end])
# Find the <DEVICE ID>
# Find first non ' ' char pos
device_id_start=0
device_id_end=0
for i in range(jtag_id_end+1,len(line)):
if not line[i] ==' ':
device_id_start = i
break
if device_id_start > 0:
device_id_end=line.find('(',device_id_start)
if device_id_end==-1: device_id_end=len(line)
Device_id_list.append(line[device_id_start:device_id_end])
if (len(JTAG_id_list) ==0 or len(Device_id_list) ==0) or \
(not len(JTAG_id_list) == len(Device_id_list)):
print('[ERROR] Failed to decode JTAG Chain Scan output!')
return False
if len(JTAG_id_list)>2 or len(Device_id_list)>2:
print('[ERROR] More then 2 JTAG Devices inside the chain! This is is not supported!')
return False
if len(JTAG_id_list)==2 and Device_id_list[0].find('SOC')==-1:
print('[ERROR] JTAG Chain with 2 Devices found! The first is not the HPS...')
print(' This is not supported. Single Device or first device must the HPS!')
return False
# Check that the JTAG Chain family matches the Quartus Project one
wrong_device =False
if len(JTAG_id_list)==2:
if self.Device_id==0 and Device_id_list[0].find('SOCVHPS')==-1: wrong_device= True
if self.Device_id==1 and Device_id_list[0].find('SOVHPS')==-1: wrong_device= True
if self.Device_id==2 and Device_id_list[0].find('SOC10HPS')==-1: wrong_device= True
else:
if self.Device_id==0 and Device_id_list[0].find('5C')==-1: wrong_device= True
if self.Device_id==1 and Device_id_list[0].find('5A')==-1: wrong_device= True
if self.Device_id==2 and Device_id_list[0].find('10A')==-1: wrong_device= True
if wrong_device:
print('[ERROR] The FPGA Device family of the FPGA project was not found in the JTAG Chain!')
return False
cdf_file_content=''
# Create the JTAG Chain file
sof_file_dir_2 = sof_file_dir.replace('\\','/',50)+'/'
cfg_no =0
if len(JTAG_id_list)==2: cfg_no=1
# CDF file for FPGAs
cdf_file_content= '/* Generated file by "flashFPGA2rsyocto.py" by Robin Sebastian (git@robseb.de) */\n' + \
'JedecChain;\n' + \
' FileRevision(JESD32A);\n' + \
' DefaultMfr(6E);\n' +'\n'
if len(JTAG_id_list)==2:
cdf_file_content+= ' P ActionCode(Ign)\n'
cdf_file_content+= ' Device PartName('+Device_id_list[0]+') MfrSpec(OpMask(0));\n'
cdf_file_content+= ' P ActionCode(Cfg)\n' + \
' Device PartName('+Device_id_list[cfg_no]+') Path("'+sof_file_dir_2+\
'") File("'+self.Sof_file_name+'") MfrSpec(OpMask(1));\n' + \
'\n' + \
'ChainEnd;\n' + '\n' + \
'AlteraBegin;\n' + \
' ChainType(JTAG);\n' + \
'AlteraEnd;\n'
# Write the CDF File
cdf_file_dir = sof_file_dir+self.__SPLM[self.__SPno]+cdf_file_name
with open(cdf_file_dir,"w") as f:
f.write(cdf_file_content)
#
## 3. Step: Write the FPGA-Configuration with "quartus_pgm"
#
# quartus_pgm.exe -m JTAG -c 1 D:\Tresorit\Robin\FPGA\DE10STD_NIOS\DE10STDrsyocto_NIOS2_1\output_files\DE10STD.cdf
quartus_pgm_cmd_dir = self.shell_quartus_dir = self.installDir_Quartus_bin+self.__SPLM[self.__SPno]
quartus_pgm_cmd_dir+= 'quartus_pgm' if self.__SPno==0 else 'quartus_pgm.exe'
cmd = quartus_pgm_cmd_dir+' -m JTAG -c 1 '+cdf_file_dir
'''
print(cmd)
if self.__SPno==0:
# for Linux
cmd = [quartus_pgm_cmd_dir,' -c 1 ',' -m JTAG ',cdf_file_dir]
'''
if self.unlicensed_ip_found:
#
## 3.A Step: Write the FPGA-Configuration with "quartus_pgm"
# Create a BASH or Shell script for executing this command
#
# Remove the older BASH/SH File
sh_file_name=self.Sof_file_name.replace('.sof', '.sh' if self.__SPno==0 else '.bat')
if os.path.isfile(sof_file_dir+self.__SPLM[self.__SPno]+sh_file_name):
try:
os.remove(sof_file_dir+self.__SPLM[self.__SPno]+sh_file_name)
except Exception:
print('[ERROR] Failed to remove the old SH/BAT File! Please remove it by hand!')
print(' File dir: "'+of_file_dir+self.__SPLM[self.__SPno]+sh_file_name+'"')
# Create a new SH/BAT file
try:
with open(sh_file_name, "a") as f:
if self.__SPno==0: f.write('#!/bin/sh \n')
f.write(cmd+'\n')
f.write('echo "********************************************************************************"\n')
f.write('echo "* Unlicensed IP inside the FPGA project was found! *"\n')
f.write('echo "********************************************************************************"\n')
f.write('echo "* The FPGA-Conf. was written and the FPGA IP Evaluation Mode has started. *"\n')
f.write('echo "* Now it is enabled to test the IP. After this *"\n')
f.write('echo "* promped is closed the licence will expire... *"\n')
f.write('echo "********************************************************************************"\n')
f.write('echo "* Support the author Robin Sebastian (git@robseb.de) *"\n')
f.write('echo "********************************************************************************"\n')
if self.__SPno==0:
f.write('read -p "Type something to exit..." mainmenuinput\n')
else:
f.write('pause\n')
except Exception as ex:
self._print('[ERROR] Failed create the quartus_pgm JTAG flash shell script\n'+\
' MSG: '+str(ex))
return False
# Execute the shell script in a new terminal window
try:
#os.startfile(sh_file_name)
#os.system('gnome-terminal -x '+st_dir)
if self.__SPno==0:
st_dir= sh_file_name.replace(os.path.expanduser('~'), '~', 1)
os.chmod(sh_file_name, 0o775)
os.system('./'+st_dir)
else:
os.system(sh_file_name)
except Exception as ex:
print('[ERROR] Failed start the quartus_pgm JTAG flash shell script!\n'+\
' MSG.: '+str(ex))
return False
# Wait for the user
#inch = input('===> Type something to terminal the FPGA IP Evaluation Mode... $')
# Remove the Shell script file
if os.path.isfile(sof_file_dir+self.__SPLM[self.__SPno]+sh_file_name):
try:
os.remove(sof_file_dir+self.__SPLM[self.__SPno]+sh_file_name)
except Exception:
print('[ERROR] Failed to remove the old SH/BAT File! Please remove it by hand!')
print(' File dir: "'+of_file_dir+self.__SPLM[self.__SPno]+sh_file_name+'"')
# Remove the CDF script file
if os.path.isfile(sof_file_dir+self.__SPLM[self.__SPno]+cdf_file_name):
try:
os.remove(sof_file_dir+self.__SPLM[self.__SPno]+cdf_file_name)
except Exception:
print('[ERROR] Failed to remove the old CDF File! Please remove it by hand!')
print(' File dir: "'+of_file_dir+self.__SPLM[self.__SPno]+cdf_file_name+'"')
# Set status to true
self.ThreadStatus=True
return True
# For the case with a full licensed FPGA-Configuration
err=''
out_pgm=''
try:
if self.__SPno==0:
cmd = [quartus_pgm_cmd_dir,'-c1','-mJTAG',cdf_file_dir]
with subprocess.Popen(cmd,\
stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr = subprocess.PIPE) as edsCmdShell:
time.sleep(DELAY_MS)
out_pgm, err = edsCmdShell.communicate()
except Exception as ex:
print('[ERROR] Failed to execute the "quartus_pgm" command MSG:'+ str(ex))
return False
finally:
if os.path.isfile(sof_file_dir+self.__SPLM[self.__SPno]+cdf_file_name):
try:
os.remove(sof_file_dir+self.__SPLM[self.__SPno]+cdf_file_name)
except Exception:
print('[ERROR] Failed to remove the old CDF File! Please remove it by hand!')
print(' File dir: "'+of_file_dir+self.__SPLM[self.__SPno]+cdf_file_name+'"')
# Check that the FPGA Configuration was successfull with JTAG
out_pgm= out_pgm.decode("utf-8")
err= err.decode("utf-8")
if err=='' and out_pgm.find('Info: Quartus Prime Programmer was successful')>-1:
print('[INFO] FPGA-Configuration was written successfully via JTAG')
elif out_pgm.find('Intel FPGA IP Evaluation Mode feature that will not work after the hardware evaluation time expires')>-1:
print('[ERROR] Failed to write the FPGA-Configuration via JTAG!')
print(' The FPGA-Configuration file contains a unlicensed IP and Intel FPGA IP Evaluation Mode error occurred!')
print(' It looks like that Intel FPGA IP Evaluation mode server is allready running.')
print(' Close any currently open FPGA-Configurations with CMD + C and try it agin!')
print('************************ OUTPUT OF "quartus_pgm" ************************')
print(out_pgm)
return False
else:
print('[ERROR] Failed to write the FPGA-Configuration via JTAG')
err= err.replace('\n','')
print(' MSG: '+err)
print(' OUT: '+out_pgm)
return False
# Set status to true
self.ThreadStatus=True
return True
#
# @brief Ping a Network device
# @param host_or_ip IPv4 address of the device
# @param packets Number of Packages to send
# @param timeout Timout in sec
# @return Pinging the board was successful
#
def __ping(self, host_or_ip, packets=1, timeout=1000):
if platform.system().lower() == 'windows':
command = ['ping', '-n', str(packets), '-w', str(timeout), host_or_ip]
res= subprocess.run(command, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, \
stderr=subprocess.DEVNULL, creationflags=0x08000000)
return res.returncode == 0 and b'TTL=' in res.stdout
else:
ping_response =''
try:
'''
ping_response = subprocess.Popen(["/bin/ping", "-c5", "-w100",host_or_ip], stdout=subprocess.PIPE, \
stdin=subprocess.DEVNULL).stdout.read()
ping_response = ping_response.decode("utf-8")
'''
ping_response = subprocess.Popen(["timeout","5","ping", "-c5", "-w100",host_or_ip], stdout=subprocess.PIPE, \
stdin=subprocess.DEVNULL).stdout.read()
ping_response = ping_response.decode("utf-8")
except Exception:
return False
if ping_response.find('Host Unreachable')==-1:
return True
return False
#
# @brief Check the Network connection from the development machine to the embedded Linux Distribution
# @return Pinging the board was successful
#
def CheckNetworkConnection2Board(self):
return self.__ping(self.board_ip_addrs)
#
# @brief Establish a SSH connection the SoC-FPGA baord running rsyocto
#
def EstablishSSHcon(self):
Thread.__init__(self)
self.__queue = queue.Queue()
self.daemon = True
self.start()
#
# @brief Send a Linux Shell command via SSH to rsyocto
# @param cmd Linux Shell command to execute
# as string
# @param ignore_error Ignore all errors
# @return responce string of the command
#
def __sendCmd(self,cmd='',ignore_error=False):
ssh_stdin, ssh_stdout, ssh_stderr = self.__sshClient.exec_command(cmd)
err = ssh_stderr.read().decode("utf-8")
if not err == '':
if not ignore_error:
print('[ERROR] Failed to execute a Linux cmd via SSH!\n'+\
' CMD : "'+cmd+'"\n'+\
' ERROR: "'+err+'"')
return 'ERROR'
return ssh_stdout.read().decode("utf-8")
#
# @brief Decode the used diskspace of the
# rootfs of rsyocto in %
# @param str_df output of the "df" command
# @return available diskspace in %
#
def __decodeDiskSpace(self,str_df=''):
root_pos = str_df.find('/dev/root')
if root_pos==-1: return -1
line_end_pos = str_df.find('\n',root_pos)
if line_end_pos==-1: return -1
line = str_df[root_pos:line_end_pos]
'''
\Filesystem 1K-blocks Used Available Use% Mounted on
/dev/root 3978548 640080 3133036 17% /
'''
# Find the % character and number
perc_pos = line.find('%')
begin_number_pos =-1
if perc_pos==-1: return -1
for i in range(perc_pos-1,0,-1):
try:
vao = line[i]
null = int(line[i])
except ValueError:
begin_number_pos = i+1
break
if begin_number_pos==-1: return -1
number = line[begin_number_pos:perc_pos]
try:
number = int(number)
except ValueError:
return -1
return number
#
# @brief Decode the partition table of rsyocto
# and check that all execpected partitions
# are available
# @param str_lsblk output of the "lsblk" command
# @param mountingpoint name of a mounting point to found
# @return is the partition table vialed
#
def __decodePartitions(self,str_lsblk='',mountingpoint=''):
if str_lsblk.find('mmcblk0p1')==-1 or str_lsblk.find('mmcblk0p2')==-1 or \
str_lsblk.find('mmcblk0p3')==-1:
return False
if not mountingpoint=='' and str_lsblk.find(mountingpoint)==-1:
return False
return True
#
# @brief Check that are a FPGA-Configuration file on the rootfs
# with the "ls" command
# @param str_ls output of the "ls" command
# @param fpga_conf_name name of the FPGA-Configuration file to find
# @return was the file found?
#
def __checkforFPGAFiles(self,str_ls='',fpga_conf_name=''):
if str_ls=='' or fpga_conf_name=='' : return False
if str_ls.find(fpga_conf_name)==-1: return False
return True
#
# @brief Cleanup the SSH/SFTP Connection and the process of
# writing a new FPGA-Configuration to rsyocto
# @param remove_files Remove the temp files from the
# rsyocto rootfs
# @param close_connection Close the SSH/SFTP connection
#
def __cleanupSSH(self,remove_files=False, close_connection=True):
print('[INFO] Cleanup SSH- and SFTP connection to rsyocto')
if remove_files:
# Remove the old mounting point if available
try:
self. __sendCmd('sudo umount '+self.__temp_partfolder_dir,True)
except Exception:
pass
# Remove the temp folder
cmd = 'sudo rm -r '+self.__temp_folder_dir
try:
rm_mes = self. __sendCmd(cmd,True)
except Exception:
pass
if close_connection:
if not self.__sftpClient== None: self.__sftpClient.close()
if not self.__sshClient== None: self.__sshClient.close()
#
# @brief Override the run() function of Thread class
# Thread to for handling the SSH connection to SoC-FPGA baord
#
def run(self):
print('[INFO] Start to establish a SSH connection to the SoC-FPGA board with rsyocto')
# Start a new SSH client connection to the development board
self.__sshClient = None
self.__sftpClient= None
self.ThreadStatus= False
self.__sshClient = paramiko.SSHClient()
self.__sshClient.load_system_host_keys()
warnings.filterwarnings("ignore")
self.__temp_folder_dir = '/home/'+self.board_user+'/'+RSYOCTO_TEMPCOPYFOLDER
self.__temp_partfolder_dir= self.__temp_folder_dir+'/'+'bootloader'
self.__sshClient.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
#
## 1. Step: Establish a SSH connection to the board
#
self.__sshClient.connect(self.board_ip_addrs, username=self.board_user,
password=self.board_pw, allow_agent=False,
look_for_keys=False, banner_timeout=500)
#
## 2. Step: Check that the embedded Linux Distribution is okay for the tasks
#
# Load the rsyocto banner
banner = str(self.__sshClient._transport.get_banner())
# Check the the connected board is really rsyocto
is_not_rsyocto =False
for checkstr in RSYOCTO_BANNER_CHECK_LINE:
if not checkstr in banner:
is_not_rsyocto = True
break
if is_not_rsyocto:
print('[ERROR] The connected board does not run rsyocto!\n'+
' This script works only with together with the \n'+\
' embedded Linux Distribution rsyocto!\n'+\
' ==> github.com/robseb/rsyocto')
self.__cleanupSSH(False,True)
return False
# Check that the connected rsyocto runs on the same SoC-FPGA family as the
# the Intel Quartus Prime project
cmd = 'cat /usr/rsyocto/device.txt'
rsyocto_devicename = self. __sendCmd(cmd)
if not self.Device_name[self.Device_id] in rsyocto_devicename:
print('[ERROR] SoC-FPGA device of connected Board is incompatible \n'+\
' to this Intel Quartus Prime FPGA project!\n'+\
' Device of the Board : "'+\
rsyocto_devicename+'" \n'+\
' Quartus Prime Project device : "'+\
self.Device_name[self.Device_id]+'"')
self.__cleanupSSH(False,True)
return False
# Check that the "FPGA-writeConfig" command is available
#
fpga_writecmd_ret = self. __sendCmd("FPGA-writeConfig")
if not RSYOCTO_FPGAWRITECONF_CHECK in fpga_writecmd_ret:
print('[ERROR] The connect rsyocto Linux Distribution has no '+\
'"FPGA-writeConfig" Linux command of the rstools installed! \n'+\
' This command allows to write the FPGA-Configuration and is need by this script!')
self.__cleanupSSH(False,True)
return False
# Check that enough memory space is available on rsyocto for
# uploading the FPGA configuration files
cmd = 'df'
diskpace = self.__decodeDiskSpace(self. __sendCmd(cmd))
if diskpace==-1:
print('[ERROR] Failed to get the available diskpace from the embedded Linux')
self.__cleanupSSH(False,True)
return False
elif diskpace >= 99:
print('[ERROR] It is not enough diskspace left on rsyocto on the SoC-FPGA board \n'+\
' for uploading the FPGA-Configuration!\n'+\
' Disk space on the rootfs used: '+str(diskpace)+'%\n'+\
' At least 1% must be free available!')
self.__cleanupSSH(False,True)
return False
# Check that all partitions are available on rsyocto
# primary the bootloader partition
cmd = 'lsblk'
if not self.__decodePartitions(self. __sendCmd(cmd)):
print('[ERROR] Not all expected partitions available on rsyocto!\n'+\
' The bootloader partition could not be located!')
self.__cleanupSSH(False,True)
return False
print('[INFO] SSH Connection established to rsyocto ('+\
str(100-diskpace)+'% free disk space remains on the rootfs)')
#
## 3. Step: Transfering the FPGA-Configuration file
# that can be written by Linux to the temp folder
#
[rbf_dir,fpga_linux_file,fpga_boot_file] = self.__queue.get()
print('[INFO] Starting SFTP Data transfer!')
#Transfering files to and from the remote machine
self.__sftpClient = self.__sshClient.open_sftp()
# Remove the temp folder from rsyocto
self.__cleanupSSH(True,False)
# Create a new empty temp folder
cmd = 'mkdir '+self.__temp_folder_dir
if not self. __sendCmd(cmd)=='':
print('[ERROR] Failed to create a new temp folder on rsyocto!')
self.__cleanupSSH(False,True)
return False
# Copy the FPGA configuration file for writing with Linux to the rootfs
print('[INFO] Start coping the new Linux FPGA-Configuration file to rsyocto')
local_path = rbf_dir+fpga_linux_file
try:
self.__sftpClient.put(local_path, self.__temp_folder_dir+'/'+fpga_linux_file)
except Exception as ex:
print('[ERROR] Exception occurred during SFTP File transfer!\n'+\
' MSG. : "'+str(ex)+'"')
self.__cleanupSSH(True,True)
return False
# Check that the new FPGA-Configuration is now located on the rootfs
cmd = 'ls '+self.__temp_folder_dir
if not self.__checkforFPGAFiles(self. __sendCmd(cmd),fpga_linux_file):
print('[ERROR] The Linux FPGA-Configuration could not be found \n'+\
' in the bootloader partition!')
self.__cleanupSSH(True,True)
return False
# Write the new FPGA-Configuration to the FPGA-Fabric
print('[INFO] Changing the FPGA-Configuration of FPGA-Fabric with the new one')
cmd = 'FPGA-writeConfig -f '+self.__temp_folder_dir+'/'+fpga_linux_file
if self. __sendCmd(cmd).find('Succses: The FPGA runs now with')==-1:
print('[ERROR] Failed to write the FPGA Configuration!')
self.__cleanupSSH(True,True)
return False
# Remove the FPGA-Configuration file from the rootfs
cmd = 'sudo rm '+self.__temp_folder_dir+'/'+fpga_linux_file
rm_mes = self. __sendCmd(cmd)
if not rm_mes=='':
print('[ERROR] Failed to remove the Linux FPGA-Configuration file \n'+\
' from the rootfs!')
self.__cleanupSSH(True,True)
return False
print('[INFO] Running FPGA-Configuration was changed successfully')
#
## 4. Step: Copy the FPGA-Configuration file to the bootloader partition
#
#
if not fpga_boot_file=='':
# Create a new empty temp folder for the bootloader
cmd = 'mkdir '+self.__temp_partfolder_dir
if not self. __sendCmd(cmd)=='':
print('[ERROR] Failed to create a new bootloader temp folder on rsyocto!')
self.__cleanupSSH(False,True)
return False
# Remove the old mounting point if available
self. __sendCmd('sudo umount '+self.__temp_partfolder_dir,True)
# Mount the bootloader partition to the temp foler
self. __sendCmd('sudo mount /dev/mmcblk0p1 '+self.__temp_partfolder_dir)
# Check that the partition was mounted
cmd = 'lsblk'
if not self.__decodePartitions(self. __sendCmd(cmd),'/home/root/.flashFP'):
print('[ERROR] The mounting of the bootloader partition on rsyocto failed!')
self.__cleanupSSH(True,True)
return False
# Read the bootloader files and look for the FPGA-Configuration file
cmd = 'ls '+self.__temp_partfolder_dir
if not self.__checkforFPGAFiles(self. __sendCmd(cmd),fpga_boot_file):
print('[ERROR] The bootloader FPGA-Configuration could not be found \n'+\
' in the bootloader partition!')
self.__cleanupSSH(True,True)
return False
# Remove the old FPGA-Configuration file from the bootloader partition
print('[INFO] Removing the old bootloader FPGA-Configuration from rsyocto')
cmd = 'sudo rm '+self.__temp_partfolder_dir+'/'+fpga_boot_file
rm_mes = self. __sendCmd(cmd)
if not rm_mes=='':
print('[ERROR] Failed to remove the old FPGA-Configuration file \n'+\
' from the bootloader partition!')
self.__cleanupSSH(True,True)
return False
# Copy the new FPGA-Configuration file to the bootloader partition
print('[INFO] Copying the new bootloader FPGA-Configuration to rsyocto')
local_path_bootconf = rbf_dir+fpga_boot_file
try:
self.__sftpClient.put(local_path_bootconf, self.__temp_partfolder_dir+'/'+fpga_boot_file)
except Exception as ex:
print('[ERROR] Exception occurred during SFTP File transfer!\n'+\
' MSG. : "'+str(ex)+'"')
self.__cleanupSSH(True,True)
return False
# Check that the new FPGA-Configuration is inside the partition folder
cmd = 'ls '+self.__temp_partfolder_dir
if not self.__checkforFPGAFiles(self. __sendCmd(cmd),fpga_boot_file):
print('[ERROR] The new bootloader FPGA-Configuration could not be found \n'+\
' in the bootloader partition!')
self.__cleanupSSH(True,True)
return False
# Remove the old mounting point if available
self. __sendCmd('sudo umount '+self.__temp_partfolder_dir,True)
# Remove the mounting point folder
cmd = 'sudo rm -r '+self.__temp_partfolder_dir
rm_mes = self. __sendCmd(cmd)
if not rm_mes=='':
print('[ERROR] Failed to remove the mounting point folder \n'+\
' from the rootfs!')
# ADD CLEAN UP
self.__sftpClient.close()
self.__sshClient.close()
return False
print('[INFO] Bootloader FPGA-Configuration was changed successfully')
# Clean up
self.__cleanupSSH(True,True)
print('[INFO] SSH Thread and SFTP Data transfer done')
self.ThreadStatus= True
return True
except Exception as ex:
print('[ERROR] Failed to open SSH network connection to the board!\n'+
' Msg.: "'+str(ex)+'"')
print(' Maybe try to remove the SSH-Keys from the SSH folder')
self.__cleanupSSH(True,True)
#
#
# @brief Create a FPGA configuration file for configure the FPGA during boot or with Linux in case this
# feature was selected inside the u-boot script
# @param boot_linux Generate configuration for
# False : Written during boot (Passive Parallel x8;
# File name: <as in uboot script>.rbf)
# True : Can be written by Linux (Passive Parallel x16;
# File name: <as in uboot script>_linux.rbf)
# @param linux_filename ".rfb" output file name for the configuration with Linux
# @param linux_copydir the location where the output Linux FPGA configuration file should be copied
# @return success
#
def GenerateFPGAconf(self, boot_linux =False, linux_filename='', linux_copydir=''):
if self.Device_id==2 and boot_linux:
print('[ERROR] FPGA configuration file that can be written by Linux (HPS)')
print(' is for the Arria 10 SX right now not supported!')
return True # Ignore this message
gen_fpga_conf=False
early_io_mode =False
# 3.a Generate the FPGA configuration file
if self.Sof_folder =='':
sof_file_dir = self.Quartus_proj_top_dir
else:
sof_file_dir = self.Quartus_proj_top_dir+self.__SPLM[self.__SPno]+self.Sof_folder
# Remove the old rbf file from the Quartus project top folder
if os.path.isfile(sof_file_dir+self.__SPLM[self.__SPno]+linux_filename):
try:
os.remove(sof_file_dir+self.__SPLM[self.__SPno]+linux_filename)
except Exception:
print('[ERROR] Failed to remove the old project folder FPGA config file')
try:
with subprocess.Popen(self.EDS_Folder+\
EDS_EMBSHELL_DIR[self.__SPno], stdin=subprocess.PIPE,stdout=DEVNULL) as edsCmdShell:
time.sleep(DELAY_MS)
if not boot_linux:
print('[INFO] Generating a new FPGA-Configuration file for configuration during boot')
sof_file_dir2 = sof_file_dir.replace('\\', '/')
b = bytes(' cd '+sof_file_dir2+' \n', 'utf-8')
edsCmdShell.stdin.write(b)
# Enable HPS Early I/O Realse mode for the Arria 10 SX
if self.Device_id==2:
pre_fix =' --hps '
print('[NOTE] The FPGA configuration wil be in HPS early I/O realse mode generated')
else:
pre_fix =''
b = bytes('quartus_cpf -c '+pre_fix+' '+self.Sof_file_name+' '+linux_filename+' \n','utf-8')
edsCmdShell.stdin.write(b)
else:
print('[INFO] Generating a new FPGA-Configuration file for configuration with the Linux')
sof_file_dir2 = sof_file_dir.replace('\\', '/')
b = bytes(' cd '+sof_file_dir2+' \n', 'utf-8')
edsCmdShell.stdin.write(b)
b = bytes('quartus_cpf -m FPP -c '+self.Sof_file_name+' '+linux_filename+' \n','utf-8')
edsCmdShell.stdin.write(b)
edsCmdShell.communicate()
time.sleep(DELAY_MS)
except Exception as ex:
print('[ERROR] Failed to start the Intel SoC EDS Command Shell! MSG:'+ str(ex))
return False
# Check that the generated rbf configuration file is now available
if self.Device_id==2:
# Configuration file should be generated in early I/O relase mode (Arria 10 SX)
if not os.path.isfile(sof_file_dir+self.__SPLM[self.__SPno]+rbf_config_name_body+'.periph.rbf') or \
not os.path.isfile(sof_file_dir+self.__SPLM[self.__SPno]+rbf_config_name_body+'.core.rbf'):
print('[ERROR] Failed to generate the FPGA configuration file')
return False
else:
# Configuration file should be generated in normal mode
if not os.path.isfile(sof_file_dir+self.__SPLM[self.__SPno]+linux_filename):
print('[ERROR] Failed to generate the FPGA configuration file')
return False
if not boot_linux:
## For the uboot FPGA configuration file
try:
if self.Device_id==2:
if not os.path.isfile(self.U_boot_socfpga_dir+'/tools/mkimage'):
print('[ERROR] The mkimage appliation ('+self.U_boot_socfpga_dir+'/tools/mkimage)')
print(' does not exist')
print(' FPGA Configuration file generation is not possible')
print(' --> Runing the u-boot build process once to clone u-boot to "/software"')
return False
try:
shutil.copy2(self.U_boot_socfpga_dir+'/tools/mkimage',sof_file_dir+'/mkimage')
except Exception:
print('[ERROR] Failed to copy the "mkimage" application ')
return False
print('[INFO] Generate the .its HPS Early I/O Realse configuration file ')
ITS_FILE_CONTENT = ' /dts-v1/; '+ \
'/ { '+ \
' description = "FIT image with FPGA bistream"; '+ \
' #address-cells = <1>; '+ \
' '+ \
' images { '+ \
' fpga-periph-1 { '+ \
' description = "FPGA peripheral bitstream"; '+ \
' data = /incbin/("'+rbf_config_name_body+'.periph.rbf'+'"); '+ \
' type = "fpga"; '+ \
' arch = "arm"; '+ \
' compression = "none"; '+ \
' }; '+ \
' '+ \
' fpga-core-1 { '+ \
' description = "FPGA core bitstream"; '+ \
' data = /incbin/("'+rbf_config_name_body+'.core.rbf'+'");'+ \
' type = "fpga"; '+ \
' arch = "arm"; '+ \
' compression = "none"; '+ \
' }; '+ \
' }; '+ \
' '+ \
' configurations { '+ \
' default = "config-1"; '+ \
' config-1 { '+ \
' description = "Boot with FPGA early IO release config"; '+ \
' fpga = "fpga-periph-1"; '+ \
' }; '+ \
' }; '+ \
' }; '
if os.path.isfile(sof_file_dir+'/fit_spl_fpga.its'):
os.remove(sof_file_dir+'/fit_spl_fpga.its')
if os.path.isfile(sof_file_dir+self.__SPLM[self.__SPno]+FIT_FPGA_FILE_NAME):
os.remove(sof_file_dir+self.__SPLM[self.__SPno]+FIT_FPGA_FILE_NAME)
with open(sof_file_dir+'/fit_spl_fpga.its', "a") as f:
f.write(ITS_FILE_CONTENT)
print('[INFO] Create the FIT image with the FPGA programming files (used by SFP)')
#
# mkimage -E -f board/altera/arria10-socdk/fit_spl_fpga.its fit_spl_fpga.itb
# -E => place data outside of the FIT structure
# -f => input filename for FIT source
#
os.system('cd '+sof_file_dir+' && mkimage -E -f fit_spl_fpga.its '+FIT_FPGA_FILE_NAME+' \n')
os.remove(sof_file_dir+'/mkimage')
os.remove(sof_file_dir+'/fit_spl_fpga.its')
# Check that the output file is generated
if not os.path.isfile(sof_file_dir+self.__SPLM[self.__SPno]+FIT_FPGA_FILE_NAME):
print('[ERROR] The .itb FPGA configuration file was not generated!')
return False
# Copy the file to the VFAT partition
if os.path.isfile(self.Vfat_folder_dir+self.__SPLM[self.__SPno]+FIT_FPGA_FILE_NAME):
os.remove(self.Vfat_folder_dir+self.__SPLM[self.__SPno]+FIT_FPGA_FILE_NAME)
shutil.move(sof_file_dir+self.__SPLM[self.__SPno]+FIT_FPGA_FILE_NAME, \
self.Vfat_folder_dir+self.__SPLM[self.__SPno])
except Exception as ex:
print('[ERROR] Failed to move the rbf configuration '+ \
'file to the vfat folder MSG:'+str(ex))
return False
return True
# @param rbf_dir Direcotory of the FPGA-Configuration file
# @param fpga_linux_file FPGA Configuration file name that are written by Linux
# @param fpga_boot_file FPGA Configuration file name that are written by the bootloader
# '' -> Bootloader FPGA-Configuration file change disabled
def startCopingFPGAconfig(self,rbf_dir,fpga_linux_file,fpga_boot_file):
if not os.path.isdir(rbf_dir):
print('[ERROR] The Direcotory of the FPGA-Configuration Folder on the Computer does not exsit!')
return False
if not os.path.isfile(rbf_dir+self.__SPLM[self.__SPno]+fpga_linux_file):
print('[ERROR] The FPGA-Configuration for Linux file on the Computer does not exsit!\n'+\
' File Dir: "'+rbf_dir+self.__SPLM[self.__SPno]+fpga_linux_file+'"')
return False
if not fpga_boot_file=='' and not os.path.isfile(rbf_dir+self.__SPLM[self.__SPno]+fpga_boot_file):
print('[ERROR] The FPGA-Configuration for the bootloader file on the Computer does not exsit!\n'+\
' File Dir: "'+rbf_dir+self.__SPLM[self.__SPno]+fpga_boot_file+'"')
return False
# Check that the SSH thread is running
if not self.is_alive() or self.__queue == None:
print('[ERROR] The SSH Clinet Thread is not running!\n'+\
' A upload of the FPGA-Configuration files via SFTP is not posibile!\n'+\
' Check the output of the SSH Thread!')
return False
# Write the data to the Queue
it = [rbf_dir,fpga_linux_file,fpga_boot_file]
self.__queue.put(it)
return True
#
# @brief Prase input arguments to enable to special modes
# Read and store settings inside a XML file
#
def praseInputArgs():
### Progress the user arguments
arg_set_ip = ''
arg_set_user = ''
arg_set_pw = ''
arg_set_flashBoot = False
arg_compile_project = False
arg_quartus_ver = ''
arg_use_jtag = False
flashBoot_chnaged = False
quartusver_changed = False
# Create the default XML setting file
if not os.path.exists(FLASHFPGA_SETTINGS_XML_FILE_NAME):
with open(FLASHFPGA_SETTINGS_XML_FILE_NAME,"w") as f:
f.write(FLASHFPGA_SETTINGS_XML_FILE)
print('[INFO] The XML setting file "'+FLASHFPGA_SETTINGS_XML_FILE_NAME+'" was created')
# Was the script started with a additional argument specified?
if len(sys.argv)>1:
# Select the posibile input arguments
parser = argparse.ArgumentParser()
parser.add_argument('-ip','--set_ipaddres', required=False, help='Set the IPv4 Address of the board')
parser.add_argument('-us','--set_user', required=False, help='Set the Linux username of the board')
parser.add_argument('-pw','--set_password', required=False, help='Set the Linux user password of the board')
parser.add_argument('-cf','--en_complie_project', required=False, help='Complile the Intel Quartus Prime '+\
'FPGA project (use "-cf 1")')
parser.add_argument('-fb','--en_flashBoot', required=False, \
help='Enable or Disable of the writing of the u-boot bootloader FPGA-Configuration file'\
'FPGA-Configuration [ 0: Disable]')
parser.add_argument('-j','--use_jtag', required=False, \
help='Use JTAG via a JTAG Blaster to write the FPGA-Configuration (use "-j 1")')
parser.add_argument('-qv','--set_quartus_prime_ver',required=False, \
help=' Set the Intel Quartus Prime Version \n'+\
' Note: Only requiered for FPGA Project Compilation! |\n'+\
' Quartus Prime Version to use <Version><Version No> |\n'+\
' L -> Quartus Prime Lite (e.g. L16.1) |\n'+\
' S -> Quartus Prime Standard (e.g. S18.1) | \n'+\
' P -> Quartus Prime Pro (e.g. P20.1)\n')
args = parser.parse_args()
# Set the IP Address of the Board
if args.set_ipaddres != None:
# Check that the input is a vailed IPv4-Address
regex_pattern = "^([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$"
if not bool( re.match( regex_pattern, args.set_ipaddres)):
print('[ERROR] The given IP Address is not in the proper format (0.0.0.0)')
sys.exit()
arg_set_ip = args.set_ipaddres
print('[INFO] IP Address of the board was set to "'+arg_set_ip+'"')
# Set the Linux user name of the baord
if args.set_user != None: arg_set_user=args.set_user
# Set the Linux user password of the baord
if args.set_password != None: arg_set_pw=args.set_password
# Complie the Intel Quartus Prime FPGA project
if args.en_complie_project != None:
try: tmp = int(args.en_complie_project)
except Exception:
print('[ERROR] Failed to convert the [--en_complie_project/-cf] input argument!')
print(' Only integer numbers are allowed!')
sys.exit()
if tmp >0:
arg_compile_project= True
print('[INFO] Compile the Intel Quartus Pirme FPGA Project is enabeled')
# Set the Intel Quartus Prime project version
if args.set_quartus_prime_ver != None:
if re.match("^[LSP]+[0-9]+[.]+[0-9]?$",args.set_quartus_prime_ver, re.I) == None:
print('[ERROR] The selected Quartus Version is in the wrong format!')
print(' Quartus Prime Version to use <Version><Version No> \n'+\
' L -> Quartus Prime Lite (e.g. L16.1) \n'+\
' S -> Quartus Prime Standard (e.g. S18.1) \n'+\
' P -> Quartus Prime Pro (e.g. P20.1)')
sys.exit()
arg_quartus_ver=args.set_quartus_prime_ver
quartusver_changed = True
print('[INFO] The Intel Quartus Prime Version is set to "'+arg_quartus_ver+'"')
# Enable or Disable of the writing of the u-boot bootloader FPGA-Configuration file
if args.en_flashBoot != None:
try: tmp = int(args.en_flashBoot)
except Exception:
print('[ERROR] Failed to convert the [--en_flashBoot/-fb] input argument!')
print(' Only integer numbers are allowed!')
sys.exit()
flashBoot_chnaged = True
if tmp==0:
print('[INFO] Writing of the u-boot FPGA-Configuration file disbaled')
arg_set_flashBoot=False
else:
print('[INFO] Writing of the u-boot FPGA-Configuration file enabled')
arg_set_flashBoot=True
# Use JTAG
if args.use_jtag != None:
try: tmp = int(args.use_jtag)
except Exception:
print('[ERROR] Failed to convert the [--use_jtag/-j] input argument!')
print(' Only integer numbers are allowed!')
sys.exit()
if tmp >0:
arg_use_jtag= True
print('[INFO] Use JTAG with a JTAG Blaster instate Network is enabled')
############################################ Write settings to a XML file ###########################################
try:
tree = ET.parse(FLASHFPGA_SETTINGS_XML_FILE_NAME)
root = tree.getroot()
except Exception as ex:
print('[ERROR] Failed to prase the "'+FLASHFPGA_SETTINGS_XML_FILE_NAME+'" file!')
print(' Msg.: '+str(ex))
sys.exit()
# Write the new IP address to the XML file
if not arg_set_ip=='':
for elem in root.iter('board'):
elem.set('set_ip', arg_set_ip)
# Write the new Linux User name to the XML file
if not arg_set_user=='':
for elem in root.iter('board'):
elem.set('set_user', arg_set_user)
# Write the new Linux User password to the XML file
if not arg_set_pw=='':
for elem in root.iter('board'):
elem.set('set_pw', arg_set_pw)
# Write the new Linux User password to the XML file
if flashBoot_chnaged:
for elem in root.iter('board'):
if arg_set_flashBoot: elem.set('set_flashBoot','Y')
else: elem.set('set_flashBoot','N')
# Write the Intel Quartus Prime Version to the XML file
if quartusver_changed:
for elem in root.iter('board'):
elem.set('set_quartus_prime_ver',arg_quartus_ver)
# Flash settings
tree.write(FLASHFPGA_SETTINGS_XML_FILE_NAME)
# In set mode end script here
if arg_set_ip or arg_set_user or arg_set_pw or flashBoot_chnaged:
sys.exit()
################################### Read the settings from the XML file ##################################
try:
tree = ET.parse(FLASHFPGA_SETTINGS_XML_FILE_NAME)
root = tree.getroot()
except Exception as ex:
print('[ERROR] Failed to prase the "'+FLASHFPGA_SETTINGS_XML_FILE_NAME+'" file!')
print(' Msg.: '+str(ex))
sys.exit()
for part in root.iter('board'):
try:
arg_set_ip = str(part.get('set_ip'))
arg_set_user = str(part.get('set_user'))
arg_set_pw = str(part.get('set_pw'))
arg_set_pw = str(part.get('set_pw'))
arg_quartus_ver = str(part.get('set_quartus_prime_ver'))
if str(part.get('set_flashBoot'))=='Y':
arg_set_flashBoot = True
except Exception as ex:
print(' [ERROR] Decoding of the XML file "'+FLASHFPGA_SETTINGS_XML_FILE_NAME+\
'" failed')
print(' Msg.: '+str(ex))
sys.exit()
return arg_set_ip, arg_set_user,arg_set_pw,arg_set_flashBoot,arg_compile_project,arg_quartus_ver,arg_use_jtag
############################################ ############################################
############################################ MAIN ############################################
############################################ ############################################
if __name__ == '__main__':
############################################ Runtime environment check ###########################################
# Check properly Python Version
if sys.version_info[0] < 3:
print('[ERROR] This script can not work with your Python Version!')
print(" Use Python 3.x for this script!")
sys.exit()
if sys.platform =='linux': SPno = 0
else: SPno = 1
SPLM = ['/','\\'] # Linux, Windows
# Enable and read input arguments or the settings from a XML file
arg_set_ip, arg_set_user,arg_set_pw,arg_set_flashBoot,\
arg_compile_project,arg_quartus_ver,arg_use_jtag = praseInputArgs()
############################################################################################################################################
arg_use_jtag = True
############################################################################################################################################
print('****** Flash FPGA Configuration to rsyocto via SSH/SFTP or JTAG (Ver.: '+version+') ******')
#
## 1. Step: Read the execution environment and scan the Intel Quartus Prime FPGA project
#
flashFPGA2Linux = FlashFPGA2Linux(arg_set_ip, arg_set_user,\
arg_set_pw,arg_compile_project,arg_quartus_ver,arg_use_jtag)
#
## 2.Step: Check the network connection to the baord
# --> Only requiered for the non JTAG mode
#
if not arg_use_jtag:
if not flashFPGA2Linux.CheckNetworkConnection2Board():
print('[ERROR] It was not posibile to ping rsyocto with the given IP-Address '+\
'"'+arg_set_ip+'"!\n'+\
' Please check the network connection of this computer'+\
' and of the SoC-FPGA board\n'+\
' You can change the IP-Address with the attribute: "-ip"')
sys.exit()
#
## 3. Step: Start the SSH/SFT Thread to establish a connection
#
flashFPGA2Linux.EstablishSSHcon()
#
## 4. Step: Generate the FPGA-Configuration files
#
rbf_dir = flashFPGA2Linux.Quartus_proj_top_dir+SPLM[SPno]+flashFPGA2Linux.Sof_folder
# Generate a FPGA Configuration file that can be written by Linux (rsyocto)
linux_fpga_file_name = 'rsyocto_fpga_conf.rbf'
if not flashFPGA2Linux.GenerateFPGAconf(True,linux_fpga_file_name,rbf_dir):
print('[ERROR] Failed to generate the Linux FPGA-Configuration file')
sys.exit()
# Generate a FPGA Configuration file that can be written by u-boot
boot_fpga_file_name= ''
if arg_set_flashBoot:
boot_fpga_file_name= 'socfpga.rbf'
if not flashFPGA2Linux.GenerateFPGAconf(False,boot_fpga_file_name,rbf_dir):
print('[ERROR] Failed to generate the u-boot (bootloader) FPGA-Configuration file')
sys.exit()
#
## 5.Step: Coyp the FPGA-Configuration files via SSH to rsyocto and write the FPGA-Fabric with it
#
if arg_use_jtag:
# Write the FPGA-Configuration only with JTAG
flashFPGA2Linux.command_jtag_writeConfRAM()
else:
flashFPGA2Linux.startCopingFPGAconfig(rbf_dir,linux_fpga_file_name,boot_fpga_file_name)
# Wait until the SSH Thread is done
flashFPGA2Linux.join()
# Remove the FPGA-Configuration files from the Intel Quartus Prime Project folder
if os.path.isfile(rbf_dir+SPLM[SPno]+linux_fpga_file_name):
try:
os.remove(rbf_dir+SPLM[SPno]+linux_fpga_file_name)
except Exception:
pass
if arg_set_flashBoot and os.path.isfile(rbf_dir+SPLM[SPno]+boot_fpga_file_name):
try:
os.remove(rbf_dir+SPLM[SPno]+boot_fpga_file_name)
except Exception:
pass
if flashFPGA2Linux.ThreadStatus:
print('[SUCCESS] Support the author Robin Sebastian (git@robseb.de)')
# EOF
| 48.68122
| 238
| 0.524318
|
06ab79f3f11d7bde938a6ddb30aa47b82f0a6b1d
| 7,194
|
py
|
Python
|
src/python/pants/backend/python/goals/repl.py
|
betaboon/pants
|
05ec375c8bfcaa0396c673847bb139326883cc08
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/goals/repl.py
|
betaboon/pants
|
05ec375c8bfcaa0396c673847bb139326883cc08
|
[
"Apache-2.0"
] | 1
|
2022-02-22T18:15:03.000Z
|
2022-02-22T18:15:03.000Z
|
src/python/pants/backend/python/goals/repl.py
|
ryanking/pants
|
e45b00d2eb467b599966bca262405a5d74d27bdd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from typing import Iterable
from pants.backend.python.subsystems.ipython import IPython
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import PythonResolveField
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest
from pants.backend.python.util_rules.pex import Pex, PexRequest
from pants.backend.python.util_rules.pex_environment import PexEnvironment
from pants.backend.python.util_rules.pex_from_targets import (
InterpreterConstraintsRequest,
NoCompatibleResolveException,
RequirementsPexRequest,
)
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.repl import ReplImplementation, ReplRequest
from pants.engine.fs import Digest, MergeDigests
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import Target, TransitiveTargets, TransitiveTargetsRequest
from pants.engine.unions import UnionRule
from pants.util.docutil import bin_name
from pants.util.logging import LogLevel
def validate_compatible_resolve(root_targets: Iterable[Target], python_setup: PythonSetup) -> None:
"""Eagerly validate that all roots are compatible.
We already end up checking this in pex_from_targets.py, but this is a more eager check so that
we have a better error message.
"""
root_resolves = {
root[PythonResolveField].normalized_value(python_setup)
for root in root_targets
if root.has_field(PythonResolveField)
}
if len(root_resolves) > 1:
raise NoCompatibleResolveException(
python_setup,
"The input targets did not have a resolve in common",
root_targets,
(
"To work around this, choose which resolve you want to use from above. "
f'Then, run `{bin_name()} peek :: | jq -r \'.[] | select(.resolve == "example") | '
f'.["address"]\' | xargs {bin_name()} repl`, where you replace "example" with the '
"resolve name, and possibly replace the specs `::` with what you were using "
"before. This will result in opening a REPL with only targets using the desired "
"resolve."
),
)
class PythonRepl(ReplImplementation):
name = "python"
@rule(level=LogLevel.DEBUG)
async def create_python_repl_request(
request: PythonRepl, pex_env: PexEnvironment, python_setup: PythonSetup
) -> ReplRequest:
validate_compatible_resolve(request.targets, python_setup)
interpreter_constraints, transitive_targets = await MultiGet(
Get(InterpreterConstraints, InterpreterConstraintsRequest(request.addresses)),
Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses)),
)
requirements_request = Get(Pex, RequirementsPexRequest(request.addresses, internal_only=True))
local_dists_request = Get(
LocalDistsPex,
LocalDistsPexRequest(
request.addresses,
internal_only=True,
interpreter_constraints=interpreter_constraints,
),
)
sources_request = Get(
PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure, include_files=True)
)
requirements_pex, local_dists, sources = await MultiGet(
requirements_request, local_dists_request, sources_request
)
merged_digest = await Get(
Digest,
MergeDigests(
(requirements_pex.digest, local_dists.pex.digest, sources.source_files.snapshot.digest)
),
)
complete_pex_env = pex_env.in_workspace()
args = complete_pex_env.create_argv(
request.in_chroot(requirements_pex.name), python=requirements_pex.python
)
chrooted_source_roots = [request.in_chroot(sr) for sr in sources.source_roots]
extra_env = {
**complete_pex_env.environment_dict(python_configured=requirements_pex.python is not None),
"PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots),
"PEX_PATH": request.in_chroot(local_dists.pex.name),
}
return ReplRequest(digest=merged_digest, args=args, extra_env=extra_env)
class IPythonRepl(ReplImplementation):
name = "ipython"
@rule(level=LogLevel.DEBUG)
async def create_ipython_repl_request(
request: IPythonRepl, ipython: IPython, pex_env: PexEnvironment, python_setup: PythonSetup
) -> ReplRequest:
validate_compatible_resolve(request.targets, python_setup)
interpreter_constraints, transitive_targets = await MultiGet(
Get(InterpreterConstraints, InterpreterConstraintsRequest(request.addresses)),
Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses)),
)
requirements_request = Get(Pex, RequirementsPexRequest(request.addresses, internal_only=True))
sources_request = Get(
PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure, include_files=True)
)
ipython_request = Get(
Pex,
PexRequest(
output_filename="ipython.pex",
main=ipython.main,
requirements=ipython.pex_requirements(),
interpreter_constraints=interpreter_constraints,
internal_only=True,
),
)
requirements_pex, sources, ipython_pex = await MultiGet(
requirements_request, sources_request, ipython_request
)
local_dists = await Get(
LocalDistsPex,
LocalDistsPexRequest(
request.addresses,
internal_only=True,
interpreter_constraints=interpreter_constraints,
sources=sources,
),
)
merged_digest = await Get(
Digest,
MergeDigests(
(
requirements_pex.digest,
local_dists.pex.digest,
local_dists.remaining_sources.source_files.snapshot.digest,
ipython_pex.digest,
)
),
)
complete_pex_env = pex_env.in_workspace()
args = list(
complete_pex_env.create_argv(request.in_chroot(ipython_pex.name), python=ipython_pex.python)
)
if ipython.options.ignore_cwd:
args.append("--ignore-cwd")
chrooted_source_roots = [request.in_chroot(sr) for sr in sources.source_roots]
extra_env = {
**complete_pex_env.environment_dict(python_configured=ipython_pex.python is not None),
"PEX_PATH": os.pathsep.join(
[
request.in_chroot(requirements_pex.name),
request.in_chroot(local_dists.pex.name),
]
),
"PEX_EXTRA_SYS_PATH": os.pathsep.join(chrooted_source_roots),
}
return ReplRequest(digest=merged_digest, args=args, extra_env=extra_env)
def rules():
return [
*collect_rules(),
UnionRule(ReplImplementation, PythonRepl),
UnionRule(ReplImplementation, IPythonRepl),
]
| 35.97
| 100
| 0.705449
|
a911fb3c7f52998d145b5ab3e5265a65106304ce
| 6,289
|
py
|
Python
|
core/ops/modules/ms_deform_attn.py
|
damien911224/RAFT
|
39fc8fb4566444b0d7a5a9a196bfde073515aca1
|
[
"BSD-3-Clause"
] | null | null | null |
core/ops/modules/ms_deform_attn.py
|
damien911224/RAFT
|
39fc8fb4566444b0d7a5a9a196bfde073515aca1
|
[
"BSD-3-Clause"
] | null | null | null |
core/ops/modules/ms_deform_attn.py
|
damien911224/RAFT
|
39fc8fb4566444b0d7a5a9a196bfde073515aca1
|
[
"BSD-3-Clause"
] | null | null | null |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_
from ..functions import MSDeformAttnFunction
def _is_power_of_2(n):
if (not isinstance(n, int)) or (n < 0):
raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n)))
return (n & (n-1) == 0) and n != 0
class MSDeformAttn(nn.Module):
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""
Multi-Scale Deformable Attention Module
:param d_model hidden dimension
:param n_levels number of feature levels
:param n_heads number of attention heads
:param n_points number of sampling points per attention head per feature level
"""
super().__init__()
if d_model % n_heads != 0:
raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
_d_per_head = d_model // n_heads
# you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
if not _is_power_of_2(_d_per_head):
warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
"which is more efficient in our CUDA implementation.")
self.im2col_step = 64
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
constant_(self.sampling_offsets.weight.data, 0.)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.)
constant_(self.attention_weights.bias.data, 0.)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.)
def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):
"""
:param query (N, Length_{query}, C)
:param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
:param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
:param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
:param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
:param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
:return output (N, Length_{query}, C)
"""
N, Len_q, _ = query.shape
N, Len_in, _ = input_flatten.shape
assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
value = self.value_proj(input_flatten)
if input_padding_mask is not None:
value = value.masked_fill(input_padding_mask[..., None], float(0))
value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] \
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] \
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(
'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))
output = MSDeformAttnFunction.apply(
value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)
output = self.output_proj(output)
return output, attention_weights
| 54.215517
| 153
| 0.611067
|
1f0a9e0e747458acfcc4c9e830a1e763a5e20770
| 11,834
|
py
|
Python
|
src/m5_more_sequences.py
|
zouz-sean/12-MoreSequences
|
e34c38f531f0f6317e0bb986c497be7d10dc0dd6
|
[
"MIT"
] | null | null | null |
src/m5_more_sequences.py
|
zouz-sean/12-MoreSequences
|
e34c38f531f0f6317e0bb986c497be7d10dc0dd6
|
[
"MIT"
] | null | null | null |
src/m5_more_sequences.py
|
zouz-sean/12-MoreSequences
|
e34c38f531f0f6317e0bb986c497be7d10dc0dd6
|
[
"MIT"
] | null | null | null |
"""
This module lets you practice various patterns
for ITERATING through SEQUENCES, including:
-- Beginning to end
-- Other ranges (e.g., backwards and every-3rd-item)
-- The COUNT/SUM/etc pattern
-- The FIND pattern (via LINEAR SEARCH)
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Zhengxiao Zou.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
""" Calls the TEST functions in this module. """
test_sum_radii()
test_count_last_n_odds()
test_index_of_first_negative()
test_contains_an_a()
# ----------------------------------------------------------------------
# Many problems simply iterate (loop) through ALL of the sequence,
# as in the sum_radii problem below.
# ----------------------------------------------------------------------
def test_sum_radii():
""" Tests the sum_radii function. """
print()
print('--------------------------------------------------')
print('Testing the sum_radii function:')
print('--------------------------------------------------')
# Test 1 is ALREADY DONE (here).
print()
circle1 = rg.Circle(rg.Point(100, 100), 25)
circle2 = rg.Circle(rg.Point(100, 100), 50)
circle3 = rg.Circle(rg.Point(100, 100), 10)
expected = 85
seq = (circle1, circle2, circle3)
actual = sum_radii(seq)
print('Expected:', expected)
print('Actual: ', actual)
# Test 2 is ALREADY DONE (here).
print()
circle1 = rg.Circle(rg.Point(200, 20), 80)
circle2 = rg.Circle(rg.Point(300, 100), 60)
circle3 = rg.Circle(rg.Point(100, 150), 0)
circle4 = rg.Circle(rg.Point(0, 0), 30)
expected = 170
seq = (circle1, circle2, circle3, circle4)
actual = sum_radii(seq)
print('Expected:', expected)
print('Actual: ', actual)
def sum_radii(circles):
"""
What comes in:
-- a sequence of rg.Circle objects
What goes out:
Returns the sum of the radii of the given sequence of rg.Circles.
Side effects: None.
Example: If
circle1 = rg.Circle(rg.Point(999, 100), 25)
circle2 = rg.Circle(rg.Point(888, 200), 50)
circle3 = rg.Circle(rg.Point(777, 300), 10)
then sum_radii([circle1, circle2, circle3])
returns 25 + 50 + 10, which is 85.
Type hints:
:type circles: list[rg.Circle] or tuple(rg.Circle)
"""
# ------------------------------------------------------------------
# Done: 2. Implement and test this function.
# The testing code is already written for you (above).
#
# Note: No fair using "slices" on ANY of these problems,
# if you happen to know what they are.
#
# Likewise, no fair using any builtin methods on sequences
# or strings, if you happen to know any.
#
# Instead, use explicit loops, as you have for other problems.
# ------------------------------------------------------------------
total = 0
for k in circles:
total = total + k.radius
return total
# ----------------------------------------------------------------------
# Some problems iterate (loop) through PART of the sequence,
# perhaps BACKWARDS, as in the count_last_n_odds problem below.
# ----------------------------------------------------------------------
def test_count_last_n_odds():
""" Tests the count_last_n_odds function. """
print()
print('--------------------------------------------------')
print('Testing the count_last_n_odds function:')
print('--------------------------------------------------')
# Six tests - ALREADY DONE (here).
seq = [1, 5, 88, 44, 33, 77, 10, 12, 9]
answer1 = count_last_n_odds(seq, 0)
answer2 = count_last_n_odds(seq, 1)
answer3 = count_last_n_odds(seq, 6)
answer4 = count_last_n_odds(seq, 7)
answer5 = count_last_n_odds(seq, 8)
answer6 = count_last_n_odds(seq, 9)
print()
print('Test set #1 of count_last_n_odds:',
answer1, answer2, answer3, answer4, answer5, answer6)
print('The above should be: 0 1 3 3 4 5')
# Six more tests - ALREADY DONE (here).
seq = [17, 88, -5, -10, 0]
answer1 = count_last_n_odds(seq, 0)
answer2 = count_last_n_odds(seq, 1)
answer3 = count_last_n_odds(seq, 2)
answer4 = count_last_n_odds(seq, 3)
answer5 = count_last_n_odds(seq, 4)
answer6 = count_last_n_odds(seq, 5)
print()
print('Test set #2 of count_last_n_odds:',
answer1, answer2, answer3, answer4, answer5, answer6)
print('The above should be: 0 0 0 1 1 2')
def count_last_n_odds(integers, n):
"""
What comes in:
-- a sequence of integers
-- a non-negative integer n that is less than or equal to
the length of the given sequence
What goes out: Returns the number of odd integers
in the last n items of the given sequence.
Side effects: None.
Examples:
If the sequence is (13, 66, 15, 3), then:
count_last_n_odds(sequence, 0) is 0 [no odds]
count_last_n_odds(sequence, 1) is 1 [1 odd, namely 3]
count_last_n_odds(sequence, 2) is 2 [2 odds, namely 3 and 15]
count_last_n_odds(sequence, 3) is 2 [2 odds, namely 3 and 15]
count_last_n_odds(sequence, 4) is 3 [3 odds: 3, 15 and 13]
Type hints:
:type integers: list[int] or tuple[int]
:type n: int
"""
# ------------------------------------------------------------------
# Done: 3. Implement and test this function.
# The testing code is already written for you (above).
# ------------------------------------------------------------------
count = 0
for k in range(len(integers) - 1, len(integers) - n - 1, -1):
if integers[k] % 2 is not 0:
count = count + 1
return count
# ----------------------------------------------------------------------
# Some problems iterate (loop) through PART of the sequence,
# stopping when the loop FINDS something of interest
# (or continuing to the end if it does NOT find the thing of interest),
# as in the following problems:
# ----------------------------------------------------------------------
def test_index_of_first_negative():
""" Tests the index_of_first_negative function. """
print()
print('--------------------------------------------------')
print('Testing the index_of_first_negative function:')
print('--------------------------------------------------')
# Test 1:
print()
expected = 3
actual = index_of_first_negative([90, 0, 20, -5, 30, -10, 15])
print('Expected:', expected)
print('Actual: ', actual)
# Test 2:
print()
expected = 0
actual = index_of_first_negative([-5, 30, -10, 15])
print('Expected:', expected)
print('Actual: ', actual)
# Test 3:
print()
expected = 4
actual = index_of_first_negative([5, 30, 10, 15, -1])
print('Expected:', expected)
print('Actual: ', actual)
# Test 4:
print()
expected = None
actual = index_of_first_negative([5, 30, 10, 15, 1, 6])
print('Expected:', expected)
print('Actual: ', actual)
if actual == 'None':
print(' Your answer is WRONG.')
print(' You returned the STRING \'None\'')
print(' when you should have returned just None')
def index_of_first_negative(numbers):
"""
What comes in:
-- a sequence of numbers
What goes out: Returns the INDEX of the first negative number
in the given sequence of numbers, or None if the sequence
contains no negative numbers.
Note: "first" negative number means the negative number
whose index is smallest -- see the examples.
Side effects: None.
Examples: If the argument is:
-- [4, 30, -19, 8, -3, -50, 100], this function returns 2
since the first negative number is -19, which is at index 2
-- [-8, 44, 33], this function returns 0
since the first negative number is -8, which is at index 0
-- [1, 29, 22, 8], this function returns None
since the list contains no negative numbers
Type hints:
:type integers: list[float] or tuple[float]
"""
# ------------------------------------------------------------------
# Done: 4. Implement and test this function.
# The testing code is already written for you (above).
#
####################################################################
# IMPORTANT: None is a built-in constant.
# Do NOT return the STRING 'None'.
####################################################################
# ------------------------------------------------------------------
for k in range(len(numbers)):
if numbers[k] < 0:
return k
def test_contains_an_a():
""" Tests the contains_an_a function. """
print()
print('--------------------------------------------------')
print('Testing the contains_an_a function:')
print('--------------------------------------------------')
# Tests:
actual1 = contains_an_a('nope')
actual2 = contains_an_a('yes a is here')
actual3 = contains_an_a('many aaaaas aaa aaa')
actual4 = contains_an_a('not until the very end is a')
actual5 = contains_an_a('a @ the beginning')
actual6 = contains_an_a('')
actual7 = contains_an_a('BLAH BLAH BLAH')
actual8 = contains_an_a('BLAH BLAH BLAH \t MORE BLAH')
actual9 = contains_an_a('BLAH BLAH BLAH \t MORE BLaH')
actuals = (actual1, actual2, actual3, actual4, actual5, actual6,
actual7, actual8, actual9)
expecteds = (False, True, True, True, True, False,
False, False, True)
for k in range(len(actuals)):
print()
print('Expected:', expecteds[k])
print('Actual: ', actuals[k])
if type(actuals[k]) is str and str(expecteds[k]) == actuals[k]:
print('Your code FAILED this test for contains_an_a.')
print(' You appear to have returned the STRING:')
print(' "' + actuals[k] + '"')
print(' instead of the built-in constant:')
print(' ' + str(expecteds[k]))
def contains_an_a(s):
"""
What comes in:
-- a string
What goes out: Returns True if the given string contains
the character 'a'. Returns False if the given string
does not contain the character 'a'.
Side effects: None.
Examples:
-- contains_an_a('blah blah blah') returns True
-- contains_an_a('BLAH BLAH BLAH') returns False
-- contains_an_a('abc') returns True
-- contains_an_a('') returns False
Type hints:
:type s: str
"""
# ------------------------------------------------------------------
# Done: 5. Implement and test this function.
# The testing code is already written for you (above).
#
####################################################################
# IMPORTANT:
# -- True and False are built-in constants.
# Do NOT return the STRINGs 'True' and 'False'.
####################################################################
#
# Implementation requirement:
# Use an explicit loop, as you have done in the other problems.
# No fair using the count or find string methods.
# ------------------------------------------------------------------
for k in s:
if k == 'a':
return True
else:
return False
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 37.213836
| 72
| 0.517999
|
f043065fc7940cc951735ff5fdebee0fe1ade86b
| 2,221
|
py
|
Python
|
getzipcodes/main.py
|
loganjhennessy/get-zip-codes
|
9823ca937f32ba5044e6abc61e08a20f893e667d
|
[
"MIT"
] | null | null | null |
getzipcodes/main.py
|
loganjhennessy/get-zip-codes
|
9823ca937f32ba5044e6abc61e08a20f893e667d
|
[
"MIT"
] | 2
|
2021-04-30T20:34:06.000Z
|
2021-06-01T21:43:17.000Z
|
getzipcodes/main.py
|
loganjhennessy/get-zip-codes
|
9823ca937f32ba5044e6abc61e08a20f893e667d
|
[
"MIT"
] | null | null | null |
import argparse
import json
from getzipcodes.log import get_configured_logger
from getzipcodes.zipcoderequest import ZipCodeRequest
from google.cloud import datastore
logger = get_configured_logger(__name__, "INFO")
def get_arguments():
parser = argparse.ArgumentParser(
prog="getzipcodes",
description="Get Zip Codes utility script. By default, the script "
"will output JSON-formatted zip codes for the specified "
"city to the console. Note that you must set a ZIP_KEY"
"environment variable to your zipcodeapi.com key.")
parser.add_argument(
dest="city", help="City in which to search for zip codes.")
parser.add_argument(
dest="state",
help="Two-character state in which to search for zip codes.")
parser.add_argument(
"-f",
"--file",
metavar="<file.json>",
default=False,
help="path to file to output zip codes in JSON")
parser.add_argument(
"-d", "--datastore", action="store_true", help="output to datastore")
args = parser.parse_args()
return args.city.lower(), args.state.lower(), args.file, args.datastore
def write_results(city, state, zipcodes, file, ds):
output = {
"city": city,
"state": state,
"zipcodes": list(map(int, zipcodes))
}
print(json.dumps(output, indent=4))
if file:
with open(file, "w") as f:
f.write(json.dumps(output, indent=4))
logger.info("Output saved to {}".format(file))
if ds:
ds_client = datastore.Client()
kind = "CityZipCodeMap"
name = '-'.join([city, state])
key = ds_client.key(kind, name)
city_zip_map = datastore.Entity(key=key)
city_zip_map["city"] = city
city_zip_map["state"] = state
city_zip_map["zipcodes"] = zipcodes
ds_client.put(city_zip_map)
logger.info("Output uploaded to {} kind with key {}.".format(kind, key))
def main():
city, state, file, ds = get_arguments()
zcr = ZipCodeRequest(city, state)
zipcodes = zcr.execute()
write_results(city, state, zipcodes, file, ds)
if __name__ == "__main__":
main()
| 30.847222
| 80
| 0.624043
|
ab6ced591b64d45574407aeba0e6d3b2753b8a1b
| 35
|
py
|
Python
|
evechem_api/security/exceptions.py
|
mylesgallagher/evechemapi
|
d096a2d13b84c3ac15fedf9795177c619f96a36d
|
[
"MIT"
] | null | null | null |
evechem_api/security/exceptions.py
|
mylesgallagher/evechemapi
|
d096a2d13b84c3ac15fedf9795177c619f96a36d
|
[
"MIT"
] | null | null | null |
evechem_api/security/exceptions.py
|
mylesgallagher/evechemapi
|
d096a2d13b84c3ac15fedf9795177c619f96a36d
|
[
"MIT"
] | null | null | null |
class KeyNotFound(Exception):
pass
| 17.5
| 29
| 0.828571
|
c2f8eba274865d18cbd6ab5560589760467482f7
| 445
|
py
|
Python
|
ygo/duel_menu.py
|
Timtam/yugioh-game
|
a45e13872c52bb16dc91a92525f5e83e95e790b5
|
[
"MIT"
] | 23
|
2017-09-23T13:29:17.000Z
|
2022-03-02T19:03:11.000Z
|
ygo/duel_menu.py
|
Timtam/yugioh-game
|
a45e13872c52bb16dc91a92525f5e83e95e790b5
|
[
"MIT"
] | 160
|
2017-09-15T13:24:30.000Z
|
2022-02-11T15:10:34.000Z
|
ygo/duel_menu.py
|
Timtam/yugioh-game
|
a45e13872c52bb16dc91a92525f5e83e95e790b5
|
[
"MIT"
] | 21
|
2017-09-15T13:17:29.000Z
|
2022-01-31T09:28:06.000Z
|
from gsb.intercept import Menu
from .parsers.duel_parser import DuelParser
class DuelMenu(Menu):
def handle_line(self, con, line):
con.player.seen_waiting = False
for s, c in DuelParser.command_substitutions.items():
if line.startswith(s):
line = c+" "+line[1:]
break
cmd, args = self.split(line)
if cmd in DuelParser.commands:
DuelParser.handle_line(con, line)
else:
super().handle_line(con, line)
| 26.176471
| 56
| 0.689888
|
9de040f1f3dcf5251bdfc9524d292982fabdae4f
| 255
|
py
|
Python
|
02-Dynamic-Urls/02-Hours-Ahead/mysite/polls/urls.py
|
wu-wenxiang/Training-Django-Public
|
56072d750107ba3be7789ac7845badb830c96c83
|
[
"Apache-2.0"
] | 1
|
2019-06-19T08:56:54.000Z
|
2019-06-19T08:56:54.000Z
|
04-Models/01-Migrate/mysite/polls/urls.py
|
wu-wenxiang/Training-Django-Public
|
56072d750107ba3be7789ac7845badb830c96c83
|
[
"Apache-2.0"
] | null | null | null |
04-Models/01-Migrate/mysite/polls/urls.py
|
wu-wenxiang/Training-Django-Public
|
56072d750107ba3be7789ac7845badb830c96c83
|
[
"Apache-2.0"
] | 3
|
2018-06-28T03:36:11.000Z
|
2020-09-25T08:04:20.000Z
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^time/plus/(\d{1,2})', views.hours_ahead, name='hours_ahead'),
url(r'^time/', views.current_datetime, name='current_datetime'),
url(r'', views.index, name='index'),
]
| 28.333333
| 72
| 0.670588
|
f4be29e841a78c721d5bf274a83b41b7f1b763a8
| 7,859
|
py
|
Python
|
bindings/torch/tinycudann/modules.py
|
TE-KazukiYoshiyama/tiny-cuda-nn
|
e1ccb40deb23b60ac9869e808f945aaf830a3db7
|
[
"BSD-3-Clause"
] | null | null | null |
bindings/torch/tinycudann/modules.py
|
TE-KazukiYoshiyama/tiny-cuda-nn
|
e1ccb40deb23b60ac9869e808f945aaf830a3db7
|
[
"BSD-3-Clause"
] | null | null | null |
bindings/torch/tinycudann/modules.py
|
TE-KazukiYoshiyama/tiny-cuda-nn
|
e1ccb40deb23b60ac9869e808f945aaf830a3db7
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
from torch.autograd.function import once_differentiable
from tinycudann_bindings import _C
def _torch_precision(tcnn_precision):
if tcnn_precision == _C.Precision.Fp16:
return torch.half
elif tcnn_precision == _C.Precision.Fp32:
return torch.float
else:
raise ValueError(f"Unknown precision {tcnn_precision}")
class _module_function(torch.autograd.Function):
@staticmethod
def forward(ctx, native_tcnn_module, input, params, loss_scale):
# If no output gradient is provided, no need to
# automatically materialize it as torch.zeros.
ctx.set_materialize_grads(False)
native_ctx, output = native_tcnn_module.fwd(input, params)
ctx.save_for_backward(input, params, output)
ctx.native_tcnn_module = native_tcnn_module
ctx.native_ctx = native_ctx
ctx.loss_scale = loss_scale
return output
@staticmethod
@once_differentiable
def backward(ctx, doutput):
if doutput is None:
return None, None, None, None
input, params, output = ctx.saved_tensors
with torch.no_grad():
scaled_grad = doutput * ctx.loss_scale
input_grad, weight_grad = ctx.native_tcnn_module.bwd(ctx.native_ctx, input, params, output, scaled_grad)
input_grad = None if input_grad is None else (input_grad / ctx.loss_scale)
weight_grad = None if weight_grad is None else (weight_grad / ctx.loss_scale)
return None, input_grad, weight_grad, None
class Module(torch.nn.Module):
def __init__(self, seed=1337):
super(Module, self).__init__()
self.native_tcnn_module = self._native_tcnn_module()
self.dtype = _torch_precision(self.native_tcnn_module.param_precision())
self.seed = seed
initial_params = self.native_tcnn_module.initial_params(seed)
self.params = torch.nn.Parameter(initial_params, requires_grad=True)
self.register_parameter(name="params", param=self.params)
self.loss_scale = 128.0 if self.native_tcnn_module.param_precision() == _C.Precision.Fp16 else 1.0
def forward(self, x):
# TCNN only supports batch sizes that are a multiple of 128. Apply the corresponding padding here.
batch_size = x.shape[0]
batch_size_granularity = int(_C.batch_size_granularity())
padded_batch_size = (batch_size + batch_size_granularity-1) // batch_size_granularity * batch_size_granularity
x_padded = x if batch_size == padded_batch_size else torch.nn.functional.pad(x, [0, 0, 0, padded_batch_size - batch_size])
output = _module_function.apply(
self.native_tcnn_module,
x_padded.to(torch.float).contiguous(),
self.params.to(_torch_precision(self.native_tcnn_module.param_precision())).contiguous(),
self.loss_scale
)
return output[:batch_size, :self.n_output_dims]
def __getstate__(self):
"""Return state values to be pickled."""
state = self.__dict__.copy()
# Avoid pickling native objects
del state["native_tcnn_module"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
# Reconstruct native entries
self.native_tcnn_module = self._native_tcnn_module()
def extra_repr(self):
return f"n_input_dims={self.n_input_dims}, n_output_dims={self.n_output_dims}, seed={self.seed}, dtype={self.dtype}, hyperparams={self.native_tcnn_module.hyperparams()}"
class NetworkWithInputEncoding(Module):
"""
Input encoding, followed by a neural network.
This module is more efficient than invoking individual `Encoding`
and `Network` modules in sequence.
Takes a `torch.float` input tensor of shape `[:, n_input_dims]` and maps
it to a tensor of shape `[:, n_output_dims]`.
The output tensor can be either of type `torch.float` or `torch.half`,
depending on which performs better on the system.
Parameters
----------
n_input_dims : `int`
Determines the shape of input tensors as `[:, n_input_dims]`
n_output_dims : `int`
Determines the shape of output tensors as `[:, n_output_dims]`
encoding_config: `dict`
Configures the encoding. Possible configurations are documented at
https://github.com/NVlabs/tiny-cuda-nn/blob/master/DOCUMENTATION.md
network_config: `dict`
Configures the neural network. Possible configurations are documented at
https://github.com/NVlabs/tiny-cuda-nn/blob/master/DOCUMENTATION.md
seed: `int`
Seed for pseudorandom parameter initialization
"""
def __init__(self, n_input_dims, n_output_dims, encoding_config, network_config, seed=1337):
self.n_input_dims = n_input_dims
self.n_output_dims = n_output_dims
self.encoding_config = encoding_config
self.network_config = network_config
super(NetworkWithInputEncoding, self).__init__(seed=seed)
def _native_tcnn_module(self):
return _C.create_network_with_input_encoding(self.n_input_dims, self.n_output_dims, self.encoding_config, self.network_config)
class Network(Module):
"""
Neural network.
Takes a `torch.float` input tensor of shape `[:, n_input_dims]` and maps
it to a tensor of shape `[:, n_output_dims]`.
The output tensor can be either of type `torch.float` or `torch.half`,
depending on which performs better on the system.
Parameters
----------
n_input_dims : `int`
Determines the shape of input tensors as `[:, n_input_dims]`
n_output_dims : `int`
Determines the shape of output tensors as `[:, n_output_dims]`
network_config: `dict`
Configures the neural network. Possible configurations are documented at
https://github.com/NVlabs/tiny-cuda-nn/blob/master/DOCUMENTATION.md
seed: `int`
Seed for pseudorandom parameter initialization
"""
def __init__(self, n_input_dims, n_output_dims, network_config, seed=1337):
self.n_input_dims = n_input_dims
self.n_output_dims = n_output_dims
self.network_config = network_config
super(Network, self).__init__(seed=seed)
def _native_tcnn_module(self):
return _C.create_network(self.n_input_dims, self.n_output_dims, self.network_config)
class Encoding(Module):
"""
Input encoding to a neural network.
Takes a `torch.float` input tensor of shape `[:, n_input_dims]` and maps
it to a `dtype` tensor of shape `[:, self.n_output_dims]`, where
`self.n_output_dims` depends on `n_input_dims` and the configuration
`encoding_config`.
Parameters
----------
n_input_dims : `int`
Determines the shape of input tensors as `[:, n_input_dims]`
encoding_config: `dict`
Configures the encoding. Possible configurations are documented at
https://github.com/NVlabs/tiny-cuda-nn/blob/master/DOCUMENTATION.md
seed: `int`
Seed for pseudorandom parameter initialization
dtype: `torch.dtype`
Precision of the output tensor and internal parameters. A value
of `None` corresponds to the optimally performing precision,
which is `torch.half` on most systems. A value of `torch.float`
may yield higher numerical accuracy, but is generally slower.
A value of `torch.half` may not be supported on all systems.
"""
def __init__(self, n_input_dims, encoding_config, seed=1337, dtype=None):
self.n_input_dims = n_input_dims
self.encoding_config = encoding_config
if dtype is None:
self.precision = _C.preferred_precision()
else:
if dtype == torch.float32:
self.precision = _C.Precision.Fp32
elif dtype == torch.float16:
self.precision = _C.Precision.Fp16
else:
raise ValueError(f"Encoding only supports fp32 or fp16 precision, but got {dtype}")
super(Encoding, self).__init__(seed=seed)
self.n_output_dims = self.native_tcnn_module.n_output_dims()
def _native_tcnn_module(self):
return _C.create_encoding(self.n_input_dims, self.encoding_config, self.precision)
| 37.42381
| 171
| 0.768037
|
5eebed3c94fcf94cf6b410bb3a1f39ad483f2428
| 2,201
|
py
|
Python
|
apps/amcm/migrations/0013_auto_20211028_1455.py
|
agsneutron/asociacion_mexicana_cuarto_milla
|
4657e1f494eb572e9b40b2804e012cdfd6193c51
|
[
"MIT"
] | null | null | null |
apps/amcm/migrations/0013_auto_20211028_1455.py
|
agsneutron/asociacion_mexicana_cuarto_milla
|
4657e1f494eb572e9b40b2804e012cdfd6193c51
|
[
"MIT"
] | null | null | null |
apps/amcm/migrations/0013_auto_20211028_1455.py
|
agsneutron/asociacion_mexicana_cuarto_milla
|
4657e1f494eb572e9b40b2804e012cdfd6193c51
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-10-28 19:55
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('amcm', '0012_ejemplares_estatus'),
]
operations = [
migrations.AddField(
model_name='pago',
name='estatus_credito',
field=models.CharField(choices=[('PAGADO', 'PAGADO'), ('CREDITO', 'CREDITO')], default='PAGADO', max_length=15, verbose_name='Estatus del Pago'),
),
migrations.AlterField(
model_name='pago',
name='fechaPago',
field=models.DateField(blank=True, default=datetime.datetime(2021, 10, 28, 19, 55, 43, 416182, tzinfo=utc), null=True, verbose_name='Fecha del Pago'),
),
migrations.AlterField(
model_name='pago',
name='fechaRegistro',
field=models.DateField(default=datetime.datetime(2021, 10, 28, 19, 55, 43, 416215, tzinfo=utc), verbose_name='Fecha de Registro'),
),
migrations.AlterField(
model_name='recibo',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2021, 10, 28, 19, 55, 43, 416925, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.CreateModel(
name='Credito',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('importe', models.FloatField(verbose_name='Importe')),
('fecha_registro', models.DateField(default=datetime.datetime(2021, 10, 28, 19, 55, 43, 417472, tzinfo=utc), verbose_name='Fecha de registro')),
('pagado', models.CharField(choices=[('SI', 'SI'), ('NO', 'NO')], default='NO', max_length=15, verbose_name='Pagado?')),
('pago', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='amcm.pago', verbose_name='Pago')),
],
options={
'verbose_name': 'Crédito',
'verbose_name_plural': 'Créditos',
},
),
]
| 43.156863
| 162
| 0.598819
|
3dc450a2cd08567ed786fa1e94c6394424816655
| 5,800
|
py
|
Python
|
dash/html/Legend.py
|
bkzhn/dash
|
2975e001cf017919929b0ebad1f1d1e14fa32f93
|
[
"MIT"
] | 1
|
2018-01-21T15:49:49.000Z
|
2018-01-21T15:49:49.000Z
|
dash/html/Legend.py
|
sthagen/dash
|
b3918ff798a51462687ff36e9e56c079c9f463cb
|
[
"MIT"
] | null | null | null |
dash/html/Legend.py
|
sthagen/dash
|
b3918ff798a51462687ff36e9e56c079c9f463cb
|
[
"MIT"
] | 1
|
2017-08-18T05:01:03.000Z
|
2017-08-18T05:01:03.000Z
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Legend(Component):
"""A Legend component.
Legend is a wrapper for the <legend> HTML5 element.
For detailed attribute info see:
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/legend
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
The children of this component.
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- accessKey (string; optional):
Keyboard shortcut to activate or add focus to the element.
- aria-* (string; optional):
A wildcard aria attribute.
- className (string; optional):
Often used with CSS to style elements with common properties.
- contentEditable (string; optional):
Indicates whether the element's content is editable.
- contextMenu (string; optional):
Defines the ID of a <menu> element which will serve as the
element's context menu.
- data-* (string; optional):
A wildcard data attribute.
- dir (string; optional):
Defines the text direction. Allowed values are ltr (Left-To-Right)
or rtl (Right-To-Left).
- draggable (string; optional):
Defines whether the element can be dragged.
- hidden (a value equal to: 'hidden', 'HIDDEN' | boolean; optional):
Prevents rendering of given element, while keeping child elements,
e.g. script elements, active.
- key (string; optional):
A unique identifier for the component, used to improve performance
by React.js while rendering components See
https://reactjs.org/docs/lists-and-keys.html for more info.
- lang (string; optional):
Defines the language used in the element.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- n_clicks (number; default 0):
An integer that represents the number of times that this element
has been clicked on.
- n_clicks_timestamp (number; default -1):
An integer that represents the time (in ms since 1970) at which
n_clicks changed. This can be used to tell which button was
changed most recently.
- role (string; optional):
The ARIA role attribute.
- spellCheck (string; optional):
Indicates whether spell checking is allowed for the element.
- style (dict; optional):
Defines CSS styles which will override styles previously set.
- tabIndex (string; optional):
Overrides the browser's default tab order and follows the one
specified instead.
- title (string; optional):
Text to be displayed in a tooltip when hovering over the element."""
@_explicitize_args
def __init__(
self,
children=None,
id=Component.UNDEFINED,
n_clicks=Component.UNDEFINED,
n_clicks_timestamp=Component.UNDEFINED,
key=Component.UNDEFINED,
role=Component.UNDEFINED,
accessKey=Component.UNDEFINED,
className=Component.UNDEFINED,
contentEditable=Component.UNDEFINED,
contextMenu=Component.UNDEFINED,
dir=Component.UNDEFINED,
draggable=Component.UNDEFINED,
hidden=Component.UNDEFINED,
lang=Component.UNDEFINED,
spellCheck=Component.UNDEFINED,
style=Component.UNDEFINED,
tabIndex=Component.UNDEFINED,
title=Component.UNDEFINED,
loading_state=Component.UNDEFINED,
**kwargs
):
self._prop_names = [
"children",
"id",
"accessKey",
"aria-*",
"className",
"contentEditable",
"contextMenu",
"data-*",
"dir",
"draggable",
"hidden",
"key",
"lang",
"loading_state",
"n_clicks",
"n_clicks_timestamp",
"role",
"spellCheck",
"style",
"tabIndex",
"title",
]
self._type = "Legend"
self._namespace = "dash_html_components"
self._valid_wildcard_attributes = ["data-", "aria-"]
self.available_properties = [
"children",
"id",
"accessKey",
"aria-*",
"className",
"contentEditable",
"contextMenu",
"data-*",
"dir",
"draggable",
"hidden",
"key",
"lang",
"loading_state",
"n_clicks",
"n_clicks_timestamp",
"role",
"spellCheck",
"style",
"tabIndex",
"title",
]
self.available_wildcard_properties = ["data-", "aria-"]
_explicit_args = kwargs.pop("_explicit_args")
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != "children"}
for k in []:
if k not in args:
raise TypeError("Required argument `" + k + "` was not specified.")
super(Legend, self).__init__(children=children, **args)
| 31.868132
| 84
| 0.594483
|
ed5c5bc9737d603a40bd70ed1095235ad9a7a2d7
| 4,005
|
py
|
Python
|
mysql-dst/mysql-cluster/ndb/ndbapi-examples/third_party/googletest/googletest/test/googletest-env-var-test.py
|
SJTU-IPADS/dst
|
897b929a692642cbf295c105d9d6e64090abb673
|
[
"Apache-2.0"
] | 9
|
2020-12-17T01:59:13.000Z
|
2022-03-30T16:25:08.000Z
|
mysql-dst/mysql-cluster/ex/third_party/googletest/googletest/test/googletest-env-var-test.py
|
SJTU-IPADS/dst
|
897b929a692642cbf295c105d9d6e64090abb673
|
[
"Apache-2.0"
] | 1
|
2021-07-30T12:06:33.000Z
|
2021-07-31T10:16:09.000Z
|
mysql-dst/mysql-cluster/storage/ndb/ndbapi-examples/third_party/googletest/googletest/test/googletest-env-var-test.py
|
SJTU-IPADS/dst
|
897b929a692642cbf295c105d9d6e64090abb673
|
[
"Apache-2.0"
] | 1
|
2021-08-01T13:47:07.000Z
|
2021-08-01T13:47:07.000Z
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-env-var-test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs googletest-env-var-test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def testXmlOutputFile(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
SetEnvVar('GTEST_OUTPUT', None)
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
"""Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT."""
SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml')
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
if __name__ == '__main__':
gtest_test_utils.Main()
| 33.940678
| 79
| 0.7196
|
ec82b4a18b0d2a3be4108d78b9eb8050d6859f58
| 2,476
|
py
|
Python
|
data_loader.py
|
arttorres0/simple-image-classifier
|
9002608fadd0a4d2c3b92f4381fc443032a0516c
|
[
"MIT"
] | 1
|
2020-04-11T04:05:13.000Z
|
2020-04-11T04:05:13.000Z
|
data_loader.py
|
arttorres0/simple-image-classifier
|
9002608fadd0a4d2c3b92f4381fc443032a0516c
|
[
"MIT"
] | null | null | null |
data_loader.py
|
arttorres0/simple-image-classifier
|
9002608fadd0a4d2c3b92f4381fc443032a0516c
|
[
"MIT"
] | null | null | null |
import torch
from torchvision import datasets, transforms, models
def get_datasets(data_directory):
'''
Reads a file path and returns datasets for Training, Validation and Testing data
'''
#ensures correct file path concatenation
if data_directory[-1] != "/":
data_directory = data_directory + "/"
train_dir = data_directory + 'train'
valid_dir = data_directory + 'valid'
test_dir = data_directory + 'test'
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
validation_and_test__transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
train_data = datasets.ImageFolder(train_dir, transform = train_transforms)
validation_data = datasets.ImageFolder(valid_dir, transform = validation_and_test__transforms)
test_data = datasets.ImageFolder(test_dir, transform = validation_and_test__transforms)
return train_data, validation_data, test_data
def get_dataloaders(train_data, validation_data, test_data):
'''
Reads Training, Validation and Testing datasets and returns respective dataloaders
'''
trainloader = torch.utils.data.DataLoader(train_data, batch_size = 50, shuffle = True)
validationloader = torch.utils.data.DataLoader(validation_data, batch_size = 50)
testloader = torch.utils.data.DataLoader(test_data)
return trainloader, validationloader, testloader
| 48.54902
| 98
| 0.528271
|
9dcc4358a7ab5a9ac3ccaded43a0ef7d63c842d6
| 40,595
|
py
|
Python
|
hjwebbrowser/browser.py
|
hayj/WebBrowser
|
ddfdbe3a0e1de48e18b15051ec3264062b16aa4f
|
[
"MIT"
] | 1
|
2020-02-19T22:08:56.000Z
|
2020-02-19T22:08:56.000Z
|
hjwebbrowser/browser.py
|
hayj/WebBrowser
|
ddfdbe3a0e1de48e18b15051ec3264062b16aa4f
|
[
"MIT"
] | null | null | null |
hjwebbrowser/browser.py
|
hayj/WebBrowser
|
ddfdbe3a0e1de48e18b15051ec3264062b16aa4f
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from datatools.jsonreader import *
from datatools.url import *
from machinelearning.bandit import *
from machinelearning.function import *
from datatools.csvreader import *
from systemtools.basics import *
from systemtools.duration import *
from systemtools.file import *
from systemtools.logger import *
from systemtools.location import *
from systemtools.system import *
import selenium
from selenium import webdriver
try:
import sh
except Exception as e:
print(e)
import random
import html2text
import re
from networktools import ipgetter
from threading import Thread, Lock
import math
import numpy
from hjwebbrowser.utils import *
from domainduplicate.domainduplicate import *
from enum import Enum
import time
from hjwebbrowser.utils import *
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome.options import Options
import zipfile
from collections import OrderedDict
import psutil
from hjwebbrowser import config as wbConf
def ipToSeed(ip):
try:
ip = ip.replace(".", "")
return int(ip)
except:
return strToInt(ip)
def queueMean(queue, defaultValue=0.0):
if queue is None:
return defaultValue
l = list(queue)
l = removeNone(l)
if len(l) == 0:
return defaultValue
return sum(l) / len(l)
REQUEST_STATUS = Enum("REQUEST_STATUS", "success error404 timeout timeoutWithContent refused duplicate invalid exception")
DRIVER_TYPE = Enum("DRIVER_TYPE", "chrome phantomjs")
browserDuplicates = None
def getBrowserDomainDuplicateSingleton(*args, **kwargs):
global browserDuplicates
if browserDuplicates is None:
browserDuplicates = DomainDuplicate(*args, **kwargs)
return browserDuplicates
class Browser():
"""
# html and get methods explanation:
We have 2 scenaries:
* You gave a htmlCallback to the __init__. This scenario is usefull if you want to consider
the page download as a critical part (network bottleneck). And you also want to wait the ajax
to be loaded but you don't consider this wait as a network bottleneck.
* So you have to call "get" method, this will cacheLock the object,
get the url, then start a thread, finally return True if the request succeeded.
The thread will wait, call the "html" method (for the htmlCallback) and finally
release the cacheLock.
* You didn't give any callback:
* You need to use "html" method and give an url. No thread will be executed.
No cacheLock will be locked. "html" method will call "get" method and just return data
to the caller.
# Scrolling down before or after the ajax wait
If, for instance you want to scroll down or other things in the browser, you can give
callbacks to the browser : beforeAjaxSleepCallback and afterAjaxSleepCallback
Paramètre de Chromium : Ne conserver les données locales que jusqu'à ce que je quitte ma session de navigation
"""
headers = \
{
"Accept": \
[
"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
# "*/*",
],
# If we set this, we got encrypted web page source:
"Accept-Encoding": \
[
"gzip, deflate, br",
"br, gzip, deflate",
"gzip, deflate",
],
"Accept-Language": \
[
# "fr-fr",
"en-US,*",
"en-US,en;q=0.5",
"en-US,en;q=0.9",
# "fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3",
# "fr-FR,fr;q=0.8,en-US;q=0.6,en;q=0.4",
],
"User-Agent": \
[
# "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0_3 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A432 Safari/604.1",
# "Googlebot/2.1 (+http://www.google.com/bot.html)",
# "Googlebot/2.1 (+http://www.googlebot.com/bot.html)",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36", # laptop
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:56.0) Gecko/20100101 Firefox/56.0", # laptop
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36", # laptop
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36", # laptop
# "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:16.0) Gecko/20120815 Firefox/16.0", # laptop, but not for twitter
# "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36", # laptop, but not for twitter
# "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36", # laptop, but not for twitter
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36", # laptop
# "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36", # laptop for Twitter
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0_3 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) CriOS/61.0.3163.73 Mobile/15A432 Safari/602.1",
]
}
def __init__(
self,
driverType=DRIVER_TYPE.chrome,
chromeDriverPath=None,
phantomjsPath=None, # "/home/hayj/Programs/headlessbrowsers/phantomjs-2.1.1-linux-x86_64/bin/phantomjs",
logger=None,
proxy=None,
name=None,
verbose=True,
loadImages=False,
beforeAjaxSleepCallback=None,
afterAjaxSleepCallback=None,
htmlCallback=None,
defaultScore=None,
durationHistoryCount=60, # 60 is a good choice
pageLoadTimeout=25,
maxDuplicatePerDomain=20,
checkProxyTimeout=5,
ajaxSleep=1.0, # > 3.0 for production crawl task
useTimeoutGet=True, # Use False here, True is not yet well implemented
headless=False,
useFastError404Detection=True,
durationHistory=None,
domainDuplicateParams={},
isInvalidFunct=None,
incognito=False,
disableNotifications=False,
noSandbox=None, # Default from the config
disableDevShmUsage=False, # Default from the config
chromeExtensions=None,
):
self.logger = logger
self.verbose = verbose
self.chromeExtensions = chromeExtensions
if self.chromeExtensions is None:
self.chromeExtensions = []
if isinstance(self.chromeExtensions, str):
self.chromeExtensions = [self.chromeExtensions]
self.incognito = incognito
self.disableNotifications = disableNotifications
if "logger" not in domainDuplicateParams:
domainDuplicateParams["logger"] = self.logger
if "verbose" not in domainDuplicateParams:
domainDuplicateParams["verbose"] = self.verbose
self.noSandbox = noSandbox
self.disableDevShmUsage = disableDevShmUsage
if self.noSandbox is None:
self.noSandbox = wbConf.noSandbox
self.urlParser = URLParser(logger=self.logger, verbose=self.verbose)
self.useFastError404Detection = useFastError404Detection
self.useTimeoutGet = useTimeoutGet
self.lastIsDuplicate = False
self.maxDuplicatePerDomain = maxDuplicatePerDomain
self.duplicates = getBrowserDomainDuplicateSingleton(**domainDuplicateParams)
self.duplicates.setMaxDuplicates(self.maxDuplicatePerDomain)
self.beforeAjaxSleepCallback = beforeAjaxSleepCallback
self.afterAjaxSleepCallback = afterAjaxSleepCallback
self.name = name
if self.name is None:
self.name = getRandomName()
self.pageLoadTimeout = pageLoadTimeout
self.defaultScore = defaultScore
if self.defaultScore is None:
self.defaultScore = self.pageLoadTimeout
self.checkProxyTimeout = checkProxyTimeout
self.ajaxSleep = ajaxSleep
self.htmlCallback = htmlCallback
self.isInvalidFunct = isInvalidFunct
self.lastGetIsOk = None
self.crawlingElement = None
self.durationHistoryCount = durationHistoryCount
self.durationHistory = durationHistory
if self.durationHistory is None:
self.durationHistory = getFixedLengthQueue(self.durationHistoryCount)
# print("c" * 500)
self.cacheLock = Lock()
# Driver:
self.driverType = driverType
self.headless = headless
if self.driverType == DRIVER_TYPE.phantomjs:
self.headless = True
self.chromeDriverPath = chromeDriverPath
self.phantomjsPath = phantomjsPath
self.driver = None
self.proxy = proxy
self.loadImages = loadImages
self.initDriver()
def setWindowSize(self, x, y):
self.driver.set_window_size(x, y)
def setWindowPosition(self, x, y):
self.driver.set_window_position(x, y)
def getCrawlerName(self):
if self.driverType == DRIVER_TYPE.phantomjs:
return "phantomjs"
elif self.driverType == DRIVER_TYPE.chrome:
return "chrome"
def initDriver(self):
if self.driver is not None:
logInfo(self.name + " will be killed!", self)
okQuit = self.quit()
if okQuit:
logInfo(self.name + " killed!", self)
else:
logInfo(self.name + " can not be killed properly!", self)
logInfo(self.name + " initialization...", self)
if self.driverType == DRIVER_TYPE.phantomjs:
self.generatePhantomjsHeader()
self.initSeleniumDriver()
else:
self.initSeleniumDriver()
self.generateRandomWindow()
def initSeleniumDriver(self, retry=True):
try:
if self.driverType == DRIVER_TYPE.chrome:
params = self.getChromeServiceArgs()
if self.chromeDriverPath is not None:
params["executable_path"] = self.chromeDriverPath
self.driver = webdriver.Chrome(**params)
elif self.driverType == DRIVER_TYPE.phantomjs:
params = {}
if self.phantomjsPath is not None:
params["executable_path"] = self.phantomjsPath
params["service_args"] = self.getPhantomJSServiceArgs()
self.driver = webdriver.PhantomJS(**params)
else:
raise Exception("Not yet implemented!")
self.driver.set_page_load_timeout(self.pageLoadTimeout)
except Exception as e:
if "Too many open files" in str(e) and "/tmp" in str(e):
clearRtmp(startsWith="tmp", olderHour=4, verbose=True)
if retry:
time.sleep(2)
self.initSeleniumDriver(retry=False)
else:
logException(e, self, message=self.name + " driver can't be init", location="initSeleniumDriver")
def clone(self):
return Browser \
(
headless=self.headless,
driverType=self.driverType,
chromeDriverPath=self.chromeDriverPath,
phantomjsPath=self.phantomjsPath,
logger=self.logger,
proxy=self.proxy,
name=None,
verbose=self.verbose,
loadImages=self.loadImages,
beforeAjaxSleepCallback=self.beforeAjaxSleepCallback,
afterAjaxSleepCallback=self.afterAjaxSleepCallback,
htmlCallback=self.htmlCallback,
defaultScore=self.defaultScore,
durationHistoryCount=self.durationHistoryCount,
pageLoadTimeout=self.pageLoadTimeout,
maxDuplicatePerDomain=self.maxDuplicatePerDomain,
checkProxyTimeout=self.checkProxyTimeout,
ajaxSleep=self.ajaxSleep,
useTimeoutGet=self.useTimeoutGet,
durationHistory=self.durationHistory,
)
def log(self, text):
if self.logger is not None:
log(text, self)
else:
print(text)
def getProxy(self):
return self.proxy
def randomHeader(self, seedWithProxy=True):
header = {}
if self.proxy is None or not seedWithProxy:
for key, values in Browser.headers.items():
header[key] = random.choice(values)
else:
headers = dict(Browser.headers)
headers = sortBy(headers, index=0)
ip = self.proxy["ip"]
theSeed = ipToSeed(ip)
rd = Random()
rd.setSeed(theSeed)
for key, values in headers:
choice = rd.getRandomInt(len(values) - 1)
value = values[choice]
header[key] = value
rd.resetSeed()
return header
def generatePhantomjsHeader(self):
header = self.randomHeader()
userAgent = header["User-Agent"]
del header["Accept-Encoding"]
del header["User-Agent"]
for key, value in header.items():
capabilityKey = 'phantomjs.page.customHeaders.{}'.format(key)
webdriver.DesiredCapabilities.PHANTOMJS[capabilityKey] = value
webdriver.DesiredCapabilities.PHANTOMJS['phantomjs.page.settings.userAgent'] = userAgent
def randomWindow(self, seedWithProxy=True):
if self.proxy is None or not seedWithProxy:
return (getRandomInt(1100, 2000), getRandomInt(800, 1200))
else:
ip = self.proxy["ip"]
theSeed = ipToSeed(ip)
randomInts = getRandomInt(900, 1300, seed=theSeed, count=2)
width = randomInts[0] + 600
height = randomInts[1]
return (width, height)
def generateRandomWindow(self):
if self.driver is not None:
try:
(width, height) = self.randomWindow()
self.driver.set_window_size(width, height)
except Exception as e:
logException(e, self, location="generateRandomWindow")
def getScrapedHeader(self):
headerSources = \
[
("https://www.whatismybrowser.com/detect/what-http-headers-is-my-browser-sending", ".table"),
# ("http://www.procato.com/my+headers", ".containerInner"),
# ("http://httpbin.org/headers", None),
]
urlParser = self.urlParser
allHeaders = {}
for url, cssSelector in headerSources:
try:
domain = urlParser.getDomain(url, urlLevel=URLLEVEL.ALL)
self.driver.get(url)
if cssSelector is not None:
header = self.driver.find_element_by_css_selector(cssSelector).text
header = header.strip().split("\n")
newHeader = []
for current in header:
if not (current.lower().startswith("host") or current.lower().startswith("referer")):
newHeader.append(current)
header = newHeader
else:
header = self.driver.page_source
allHeaders[domain] = header
except: pass
return listToStr(allHeaders)
def meanDuration(self):
return queueMean(self.durationHistory, defaultValue=self.defaultScore)
def getDriverData(self):
lastUrl = None
title = None
html = None
try:
lastUrl = self.driver.current_url
title = self.driver.title.strip()
html = self.driver.page_source
except Exception as e:
if not isinstance(e, TimeoutException):
logError(str(type(e)), self)
logError("Exception location: browser.getDriverData()\n" + str(e), self)
# else:
# logException(e, self)
return (title, html, lastUrl)
def acquire(self):
self.cacheLock.acquire()
def tryRelease(self):
try:
self.cacheLock.release()
except Exception as e:
logError("Exception location: browser.tryRelease()\n" + str(e), self)
def timeoutGet(self, crawlingElement):
if self.useTimeoutGet:
def threadedGet():
try:
self.driver.get(crawlingElement.data)
except Exception as e:
if not isinstance(e, TimeoutException):
logException(e, self, location="threadedGet")
theThread = Thread(target=threadedGet)
theThread.start()
theThread.join(1.3 * self.pageLoadTimeout)
if theThread.isAlive():
self.initDriver()
raise Exception("The timeout didn't work.")
else:
self.driver.get(crawlingElement.data)
@staticmethod
def isGoodStatus(status):
if status.name in "success error404 timeoutWithContent".split(" "):
return True
else:
return False
def get(self, crawlingElement, pipCallback=None):
"""
This function return True if the request succeeded.
"""
# We convert the url:
crawlingElement = tryUrlToCrawlingElement(crawlingElement)
# Here we have a callback, we have to cacheLock the object:
if self.htmlCallback is not None:
self.acquire()
try:
# And now we can get the html and retain time duration:
tt = TicToc()
tt.tic(display=False)
# logInfo("Launching get for: " + str(url))
if crawlingElement.type == CrawlingElement.TYPE.pipedMessage:
pipCallback(self, crawlingElement) # Here the url is a piped message
else:
self.stopLoadingAndLoadBlank()
# logInfo("Get starting...", self)
self.timeoutGet(crawlingElement)
# logInfo("Get DONE", self)
# logInfo("get done for: " + str(url))
# global debugCount
# debugCount += 1
# For chrome we must try to get the source to see if we are on a timeout exception:
try:
self.driver.page_source
except Exception as e:
if isinstance(e, TimeoutException):
raise TimeoutException()
# We finally got something without exception, so we try to get data:
(title, html, lastUrl) = self.getDriverData()
# But here, if the status is not success, we set diffTime as the max:
diffTime = tt.tic(display=False)
if Browser.isGoodStatus(self.getStatus(True, crawlingElement, lastUrl, title, html)):
diffTime = self.pageLoadTimeout
# We add the score to the history:
self.durationHistory.append(diffTime)
# And we keep currentUrl and ok status for the "html()" method:
self.currentCrawlingElement = crawlingElement
self.lastGetIsOk = True
self.lastIsDuplicate = False
if title is not None and html is not None \
and crawlingElement.type == CrawlingElement.TYPE.uniqueUrl:
self.lastIsDuplicate = self.duplicates.isDuplicate \
(
lastUrl,
title,
html
)
# Finally we exec the finally statement and we return True (i.e. request ok):
return True
except Exception as e:
if not isinstance(e, TimeoutException):
logError("Exception location: browser.get()\n" + str(e), self)
# Here we got a timeout, so we set the score as the badest:
self.durationHistory.append(self.pageLoadTimeout)
# And we keep url and ok status for the "html()" method:
self.currentCrawlingElement = crawlingElement
self.lastGetIsOk = False
# Finally we exec the finally statement and we return False (i.e. failed):
return False
# The finally is executed before the return statement
finally:
# If the request succeeded:
if self.lastGetIsOk:
# First if this is a duplicates (i.e. a "you've been blocked" page for instance),
# we don't need to sleep but we call the callback to keep aware the crawler:
if self.lastIsDuplicate:
theThread = Thread(target=self.noAjaxSleepThenCallback)
theThread.start()
# Then if we don't have any callback, the caller of this funct is the
# "html()" method of this object, so we just need to sleep:
elif self.htmlCallback is None:
self.doAjaxSleep()
# Else if we actually have a right web page without timeout
# We sleep and we call the callback:
else:
theThread = Thread(target=self.ajaxSleepThenCallback)
theThread.start()
# If we got a timeout, we don't need to sleep:
else:
# If there are no callback, we don't do anything:
if self.htmlCallback is None:
pass
# Else we don't sleep but call the callback:
# Or we have to sleep because we can have a timeoutWithContent...
else:
theThread = Thread(target=self.noAjaxSleepThenCallback)
theThread.start()
# self.tryRelease()
def timeoutStopLoadingAndLoadBlank(self):
self.stopLoadingSucceded = False
self.loadBlankSucceded = False
def threadedSLLB():
self.stopLoadingSucceded = self.stopLoading()
self.loadBlankSucceded = self.loadBlank()
theThread = Thread(target=threadedSLLB)
theThread.start()
theThread.join(15)
if not self.loadBlankSucceded or theThread.isAlive():
errorMessage = "Can't load blank for " + self.name
logError(errorMessage, self)
self.initDriver() # Kill the driver
def stopLoadingAndLoadBlank(self):
self.timeoutStopLoadingAndLoadBlank()
def loadBlank(self):
try:
self.driver.get("about:blank")
return True
except Exception as e:
# logException(e, self, location="Browser.loadBlank()")
logError("Exception caught in Browser.loadBlank().", self)
return False
def stopLoading(self):
try:
self.driver.execute_script("window.stop();")
self.driver.find_elements_by_css_selector("*")[0].send_keys(Keys.CONTROL + 'Escape')
return True
except Exception as e:
# logException(e, self, location="Browser.stopLoading()")
logError("Exception caught in Browser.stopLoading() " + str(e), self)
return False
def doAjaxSleep(self):
if self.beforeAjaxSleepCallback is not None:
self.beforeAjaxSleepCallback(self)
if self.ajaxSleep > 0.0:
time.sleep(self.ajaxSleep)
if self.afterAjaxSleepCallback is not None:
self.afterAjaxSleepCallback(self)
def noAjaxSleepThenCallback(self):
try:
self.html()
except Exception as e:
logError("Exception location: browser.noAjaxSleepThenCallback()\n" + str(e), self)
self.tryRelease()
def ajaxSleepThenCallback(self):
self.doAjaxSleep()
try:
self.html()
except Exception as e:
logError("Exception location: browser.ajaxSleepThenCallback()\n" + str(e), self)
# Here we terminated the sleep and the callback, so we can unlock the object:
self.tryRelease()
def isTimeoutWithContent(self, lastUrl, title, html):
"""
Return False if it is a true timeout
True instead (in case we got content)
"""
if lastUrl is None or lastUrl.strip() == "" \
or title is None or title.strip() == "" \
or html is None or html.strip() == "" \
or lastUrl == "about:blank":
return False
if "</body>" in html \
and len(html) > 100:
return True
return False
def getStatus(self, ok, crawlingElement, lastUrl, title, html):
"""
The only difference to call this method in "get()" vs in "html()"
is the the lastUrl can change du to js redirection...
The html can change too, but it's not important to get the status
"""
if not ok:
if self.isTimeoutWithContent(lastUrl, title, html):
currentStatus = self.getStatus(True, crawlingElement, lastUrl, title, html)
if currentStatus == REQUEST_STATUS.success:
return REQUEST_STATUS.timeoutWithContent
else:
return currentStatus
else:
return REQUEST_STATUS.timeout
elif isRefused(html, lastUrl):
return REQUEST_STATUS.refused
elif isInvalidHtml(html):
return REQUEST_STATUS.invalid
elif is404Error(html, fast=self.useFastError404Detection):
return REQUEST_STATUS.error404
elif crawlingElement.type == CrawlingElement.TYPE.uniqueUrl \
and self.duplicates.isDuplicate(lastUrl, title, html):
return REQUEST_STATUS.duplicate
elif self.isInvalidFunct is not None and self.isInvalidFunct(lastUrl, html, self):
return REQUEST_STATUS.invalid
else:
return REQUEST_STATUS.success
def html(self, crawlingElement=None):
"""
This function return data. Call "get" method instead if you gave a htmlCallback.
"""
# We convert the url:
crawlingElement = tryUrlToCrawlingElement(crawlingElement)
currentCrawlingElement = crawlingElement
if currentCrawlingElement is None:
currentCrawlingElement = self.currentCrawlingElement
# We construct data:
data = \
{
"proxy": str(self.proxy),
"crawlingElement": currentCrawlingElement,
"url": str(currentCrawlingElement.data),
"domain": self.urlParser.getDomain(currentCrawlingElement.data, urlLevel=URLLEVEL.SMART),
"browser": self.driverType.name,
"lastUrl": None,
"lastUrlDomain": None,
"html": None,
"title": None,
"status": None,
}
try:
# Here it's the user who call this method:
if crawlingElement is not None:
ok = self.get(crawlingElement)
# Here if the htmlCallback is not None, it's the get method which call html():
elif self.htmlCallback is not None:
crawlingElement = self.currentCrawlingElement
# We convert the url:
crawlingElement = tryUrlToCrawlingElement(crawlingElement)
ok = self.lastGetIsOk
else:
logError("You can't be in both scenarios described in the doc. Please use the html method instead.", self)
exit()
# No we try to get some data:
(title, html, lastUrl) = self.getDriverData()
# And we get the status:
status = self.getStatus(ok, crawlingElement, lastUrl, title, html)
# Now we got all data, so we can make the data dict:
data["status"] = status
data["lastUrl"] = lastUrl
data["lastUrlDomain"] = self.urlParser.getDomain(lastUrl, urlLevel=URLLEVEL.SMART)
data["html"] = html
data["title"] = title
# And we log informations:
ip = " "
if self.proxy is not None:
ip = " (" + self.proxy["ip"] + ") "
logInfo(str(status.name) + " from " + self.name + ip + str(crawlingElement.data), self)
if status == REQUEST_STATUS.duplicate:
logInfo("Title of the duplicated page: " + str(title), self)
except Exception as e:
logException(e, self, location="browser.html()")
data["status"] = REQUEST_STATUS.exception
# Now if we have a callback, we have to throw the data:
if self.htmlCallback is not None:
self.htmlCallback(data, self)
# Or we just return it:
else:
return data
return None
def getIp(self):
proxyForData = None
if self.proxy is not None:
proxyForData = self.proxy["ip"]
return proxyForData
def getUserAgent(self):
return self.driver.execute_script("return navigator.userAgent")
def setProxy(self, proxy):
self.proxy = Proxy(proxy)
if self.driverType == DRIVER_TYPE.chrome or self.driverType == DRIVER_TYPE.phantomjs:
logWarning("Please recreate a Browser to set the proxy...")
# def launch(self, urlList):
# if not isinstance(urlList, list):
# urlList = [urlList]
# if len(urlList) == 1 and (urlList[0] is None or urlList[0] == ""):
# return None
#
# for currentUrl in urlList:
# self.driver.get(currentUrl)
# return self.driver.page_source
def getPids(self):
pids = None
# https://stackoverflow.com/questions/10752512/get-pid-of-browser-launched-by-selenium
try:
if self.driverType == DRIVER_TYPE.phantomjs:
pids = [self.driver.service.process.pid]
else:
p = psutil.Process(self.driver.service.process.pid)
pids = []
for current in p.children(recursive=True):
pids.append(current.pid)
except Exception as e:
logException(e, self, location="getPids")
return pids
def killPids(self, pids):
if pids is None or len(pids) == 0:
return False
atLeastOneFailed = False
for pid in pids:
try:
p = psutil.Process(pid)
p.kill() # or p.terminate()
except Exception as e:
if not isinstance(e, psutil.NoSuchProcess):
logException(e, self, location="Browser.killPids()")
atLeastOneFailed = True
if atLeastOneFailed:
return False
else:
return True
def kill(self):
pids = self.getPids()
return self.killPids(pids)
def close(self):
self.quit()
def timeoutQuit(self):
def threadedQuit():
self.driver.quit()
theThread = Thread(target=threadedQuit)
theThread.start()
theThread.join(15)
if theThread.isAlive():
errorMessage = "Can't quit " + self.name
# logError(errorMessage, self)
raise Exception(errorMessage)
def quit(self):
closed = False
i = 0
while not closed and i < 3:
try:
pids = self.getPids()
self.timeoutQuit()
self.killPids(pids)
closed = True
except Exception as e:
# logException(e, self, location="browser.closed()")
logError("Exception caught in Browser.quit().", self)
closed = self.kill()
if not closed:
time.sleep(0.2)
i += 1
return closed
def getPhantomJSServiceArgs(self):
if self.proxy is None:
return None
type = "http"
if dictContains(self.proxy, "type"):
type = self.proxy["type"]
params = \
[
'--proxy=' + self.proxy["ip"] + ':' + self.proxy["port"],
'--proxy-type=' + type,
]
if dictContains(self.proxy, "user"):
params.append('--proxy-auth=' + self.proxy["user"] + ':' + self.proxy["password"])
if self.loadImages:
params.append('--load-images=yes')
else:
params.append('--load-images=no')
return params
def getChromeServiceArgs(self):
"""
You can't use both a proxy auth and the headless option
So the ip of the machine have to be whitlisted by your proxies provider
"""
options = Options()
for ext in self.chromeExtensions:
options.add_extension(ext)
options.add_experimental_option('w3c', False)
if self.proxy is not None and not self.headless:
user = "null"
password = "null"
if dictContains(self.proxy, "user"):
user = self.proxy["user"]
if dictContains(self.proxy, "password"):
password = self.proxy["password"]
user = '"' + user + '"'
password = '"' + password + '"'
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome Proxy",
"permissions": [
"proxy",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequest",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = """
var config = {
mode: "fixed_servers",
rules: {
singleProxy: {
scheme: "http",
host: \"""" + self.proxy["ip"] + """\",
port: parseInt(""" + self.proxy["port"] + """)
},
bypassList: ["foobar.com"]
}
};
chrome.proxy.settings.set({value: config, scope: "regular"}, function() {});
function callbackFn(details) {
return {
authCredentials: {
username: """ + user + """,
password: """ + password + """
}
};
}
chrome.webRequest.onAuthRequired.addListener(
callbackFn,
{urls: ["<all_urls>"]},
['blocking']
);
"""
proxyPluginName = "proxy_auth_plugin"
pluginTmpDir = tmpDir(subDir=proxyPluginName)
purgeOldFiles(pluginTmpDir + "/" + proxyPluginName + "*.zip", 1000.0)
pluginfile = pluginTmpDir + '/' + proxyPluginName + '_' + getRandomStr() + '.zip'
with zipfile.ZipFile(pluginfile, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
options.add_extension(pluginfile)
elif self.proxy is not None and self.headless:
# Here you must whitelist you ip to don't need user password:
options.add_argument('--proxy-server=' + self.proxy["ip"] + ":" + self.proxy["port"])
options.add_argument("--start-maximized")
if self.incognito:
options.add_argument('--incognito')
if self.disableNotifications:
options.add_argument("--disable-notifications")
if self.noSandbox:
logWarning("The --no-sandbox option is set in Chrome driver.")
options.add_argument('--no-sandbox')
if self.disableDevShmUsage:
logWarning("The --disable-dev-shm-usage option is set in Chrome driver.")
options.add_argument('--disable-dev-shm-usage')
if self.headless:
options.add_argument('headless')
(width, height) = self.randomWindow()
options.add_argument('window-size=' + str(width) + 'x' + str(height))
# Now we set the header:
header = self.randomHeader()
options.add_argument("user-agent=" + header["User-Agent"]) # WORKS
# options.add_argument("accept=" + header["Accept"]) # Doesn't work
# options.add_argument("accept-encoding=" + header["Accept-Encoding"]) # Doesn't work
options.add_experimental_option('prefs', {'intl.accept_languages': "en-US,en"}) # WORKS but only en-US,en;q=0.9, it doesn't work with header["Accept-Language"]
params = {}
params["chrome_options"] = options
return params
def linkedInConnexion(self, user=u"opuire.malaire@tutanota.com", password=u"753êµ$UfD5"):
"""
# class login-email et login-password find_element_by_class_name
"""
usernameInput = self.browser.find_element_by_class_name("login-email")
passwordInput = self.browser.find_element_by_class_name("login-password")
usernameInput.send_keys(user)
passwordInput.send_keys(password)
self.browser.find_element_by_class_name("submit-button").click()
def checkProxy(self):
"""
This method return False if the proxy is not correctly set.
"""
if self.proxy is None:
logError("Proxy not correctly set.", self)
return False
else:
webSiteList = \
[
"http://fr.geoipview.com",
# On this web site with a proxy, the page load a lot of "near ip", so it's slow:
# "http://www.localiser-ip.com",
"http://www.mon-ip.com",
"https://www.expressvpn.com/what-is-my-ip",
"https://www.adresseip.com",
]
def getWebSiteIP(url):
try:
data = self.html(url)["html"]
ip = re.search("\d+[.]\d+[.]\d+[.]\d+", data).group(0)
return ip
except Exception as e:
# logWarning("Ip not found in " + url + " " + str(e), self)
return None
self.driver.set_page_load_timeout(self.checkProxyTimeout)
previousAjaxSleep = self.ajaxSleep
self.ajaxSleep = 0.0
ipWhithoutProxy = getIP()
success = False
# log("This computer ip is " + ipWhithoutProxy, self)
for current in webSiteList:
proxyIP = getWebSiteIP(current)
if proxyIP is not None:
if self.proxy["ip"] != proxyIP:
break
if proxyIP == ipWhithoutProxy:
break
success = True
break
self.ajaxSleep = previousAjaxSleep
self.driver.set_page_load_timeout(self.pageLoadTimeout)
if success:
log("Successfully init " + self.name + " with proxy " + proxyIP, self)
return True
else:
logWarning(self.name + " failed to use proxy " + self.proxy["ip"], self)
return False
if __name__ == "__main__":
from unshortener import config as unsConf
unsConf.useMongodb = False
from domainduplicate import config as ddConf
ddConf.useMongodb = False
from datastructuretools import config as dsConf
dsConf.useMongodb = False
b = Browser()
| 39.222222
| 167
| 0.571302
|
04ce6afddd327e7b85c3b9e960d4c6bc2b1765d5
| 16,792
|
py
|
Python
|
mpEngineProdCons.py
|
ekristen/appcompatprocessor
|
6c847937c5a836e2ce2fe2b915f213c345a3c389
|
[
"Apache-2.0"
] | 152
|
2017-04-02T18:13:19.000Z
|
2022-03-19T18:46:12.000Z
|
mpEngineProdCons.py
|
ekristen/appcompatprocessor
|
6c847937c5a836e2ce2fe2b915f213c345a3c389
|
[
"Apache-2.0"
] | 19
|
2017-05-03T13:48:06.000Z
|
2020-08-18T16:20:25.000Z
|
mpEngineProdCons.py
|
ekristen/appcompatprocessor
|
6c847937c5a836e2ce2fe2b915f213c345a3c389
|
[
"Apache-2.0"
] | 26
|
2017-04-19T16:00:51.000Z
|
2021-06-12T10:07:46.000Z
|
import settings
import logging
import multiprocessing
import Queue
import time
from datetime import timedelta, datetime
from appAux import psutil_phymem_usage
import gc
logger = logging.getLogger(__name__)
dying_workers = []
# Auto-balancing Producer-Consumer class
def rate_limited(period, damping = 1.0):
'''
Prevent a method from being called
if it was previously called before
a time widows has elapsed.
:param period: The time window after which method invocations can continue.
:param damping: A factor by which to dampen the time window.
:return function: Decorated function that will forward method invocations if the time window has elapsed.
'''
frequency = damping / float(period)
def decorate(func):
last_called = [0.0]
def func_wrapper(*args, **kargs):
elapsed = time.time() - last_called[0]
left_to_wait = frequency - elapsed
if left_to_wait > 0:
# time.sleep(left_to_wait)
# print left_to_wait
return None
ret = func(*args, **kargs)
last_called[0] = time.time()
return ret
return func_wrapper
return decorate
class MPEngineProdCons(object):
def __init__(self, maxCores, producer_Class, consumer_Class, governorOffFlag = False):
logger.debug("mpEngine initializing")
self.governorOffFlag = governorOffFlag
self.maxCores = maxCores
self.__deleting__ = False
self.__internalLock__ = multiprocessing.Lock()
self.killed_event = multiprocessing.Event()
# Producers
self.num_producers = 0
self.next_worker_num = 0
self.producer_Class = producer_Class
self.producer_pool = []
self.producer_pool_exitEvent = []
self.producer_task_queue = multiprocessing.JoinableQueue()
self.producer_results_queue = multiprocessing.JoinableQueue()
self.producer_pool_progress = multiprocessing.Value('i', 0)
# Consumers
self.num_consumers = 0
self.next_consumer_num = 0
self.consumer_Class = consumer_Class
self.consumer_pool = []
# Note: consumer_pool_exitEvent is used both to notify a worker it should end and for the worker to notify it has dones so
self.consumer_pool_exitEvent = []
self.consumer_task_queue = self.producer_results_queue
self.consumer_results_queue = multiprocessing.JoinableQueue()
self.consumer_pool_progress = multiprocessing.Value('i', 0)
# Tasks
self.num_tasks = multiprocessing.Value('i', 0)
self.tasks_added = False
# Rebalance checks
self._rebalance_last_kick = datetime.now()
self.rebalance_backoff_timer = 60 * 1
self._rebalance_mem_last_kick = datetime.now()
self.rebalance_mem_backoff_timer = 60 * 2
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
print exc_type, exc_value, traceback
self.__del__
return self
def __del__(self):
# Lock internal
self.__internalLock__.acquire()
if not self.__deleting__:
logger.debug("Bringing down mpEngine")
self.__deleting__ = True
while self.num_producers > 0: self.removeProducer(True)
while self.num_consumers > 0: self.removeConsumer(True)
logger.debug("mpEngine down")
# Release internal
self.__internalLock__.release()
def check_mpEngineStatus(self):
status_ok = True
assert(len(self.producer_pool) == self.num_producers)
assert(len(self.producer_pool) == len(self.producer_pool_exitEvent))
assert(len(self.consumer_pool) == self.num_consumers)
assert(len(self.consumer_pool) == len(self.consumer_pool_exitEvent))
# Check all the processes we believe we have are really alive
for (worker_num, worker, extra_arg_list) in self.producer_pool:
if not worker.is_alive():
logger.error("check_mpEngineStatus error, dead producer process: %s / %s" % (worker_num, worker.name))
status_ok = False
for (worker_num, worker, extra_arg_list) in self.consumer_pool:
if not worker.is_alive():
logger.error("check_mpEngineStatus error, dead consumer process: %s / %s" % (worker_num, worker.name))
status_ok = False
if self.killed_event.is_set(): status_ok = False
return status_ok
def working(self):
# Internal check
if self.killed_event.is_set() or not self.check_mpEngineStatus():
print("\n\n\n==>Tearing down mpEngine, we've been killed! out of memory? (give me 60 secs to try to shutdown cleanly)")
logger.error("Tearing down mpEngine, we've been killed!")
self.endConsumers()
time.sleep(2)
self.endProducers()
time.sleep(2)
return False
# Check if we still have work to do
if (self.get_num_tasks() != self.getProgressConsumers()): return True
else:
logger.debug("mpEngine work finished!")
# Wait until all our workers have exited
while self.num_producers > 0: self.removeProducer(True)
while self.num_consumers > 0: self.removeConsumer(True)
return False
def addTaskList(self, task_list):
if not self.tasks_added:
with self.num_tasks.get_lock():
for new_task in task_list:
self.num_tasks.value += 1
self.producer_task_queue.put(new_task)
return True
else:
logger.error("Can only add tasks once!")
return False
def getProgress(self):
return (self.num_producers, self.num_consumers, self.get_num_tasks(), self.getProgressProducers(), self.getProgressConsumers())
def get_num_tasks(self):
with self.num_tasks.get_lock():
return self.num_tasks.value
def getProgressProducers(self):
return self.producer_pool_progress.value
def getProgressConsumers(self):
return self.consumer_pool_progress.value
def getProducerCount(self):
return self.num_producers
def addProducer(self, extra_arg_list = []):
if self.num_producers < self.maxCores:
# Lock internal
self.__internalLock__.acquire()
new_worker_num = self.next_worker_num
logger.debug("Adding Producer-%d" % (new_worker_num))
self.producer_pool_exitEvent.append(multiprocessing.Event())
self.producer_pool.append((new_worker_num, self.producer_Class(
self.producer_task_queue, self.producer_results_queue, self.get_num_tasks(), self.get_num_tasks(), self.producer_pool_progress,
self.producer_pool_exitEvent[-1], self.killed_event, extra_arg_list), extra_arg_list))
self.producer_pool[-1][1].daemon = False # Remove for debugging
self.producer_pool[-1][1].start()
# Update worker count
self.num_producers += 1
# Update next worker num
self.next_worker_num += 1
# Release internal
self.__internalLock__.release()
logger.debug("Producer-%d added" % new_worker_num)
else:
logger.error("Attempted to start workers beyond the maxCores setting")
def removeProducer(self, noLock = False):
if self.num_producers > 0:
# Lock internal
if not noLock: self.__internalLock__.acquire()
# Remove last worker from worker pool
(worker_num, producer, extra_arg_list) = self.producer_pool.pop()
logger.debug("Removing Producer-%d" % worker_num)
# Remove last worker's exitFlag
producer_exitEvent = self.producer_pool_exitEvent.pop()
# Set the worker's exit event
if not producer_exitEvent.is_set():
logger.debug("Producer-%d exitEvent SET" % worker_num)
producer_exitEvent.set()
# Update producer count
self.num_producers -= 1
# Release internal
if not noLock: self.__internalLock__.release()
else:
logger.error("Attempted to remove producer from empty pool.")
def startProducers(self, num_producers, extra_arg_list = []):
logger.debug("Starting producers")
if num_producers is None:
for i in xrange(self.maxCores - 1): self.addProducer(extra_arg_list)
else:
for i in xrange(num_producers): self.addProducer(extra_arg_list)
def restartProducers(self):
logger.debug("Restarting producers")
extra_arg_list_list = []
current_num_producers = self.num_producers
# Shut them all down
for i in xrange(current_num_producers):
# Grab extra_arg_list
(worker_num, producer, extra_arg_list) = self.producer_pool[-1]
extra_arg_list_list.append(extra_arg_list)
self.removeProducer(True)
# Start them all up again
for i in xrange(current_num_producers):
self.addProducer(extra_arg_list_list.pop())
logger.debug("Restarting producers - done")
def endProducers(self):
logger.debug("Ending all producers")
for i in xrange(self.num_producers): self.removeProducer()
def getConsumerCount(self):
return self.num_consumers
def addConsumer(self, extra_arg_list = []):
if self.num_consumers < self.maxCores:
# Lock internal
self.__internalLock__.acquire()
new_worker_num = self.next_worker_num
logger.debug("Adding Consumer-%d" % (new_worker_num))
self.consumer_pool_exitEvent.append(multiprocessing.Event())
self.consumer_pool.append((new_worker_num, self.consumer_Class(
self.consumer_task_queue, self.consumer_results_queue, self.get_num_tasks(), self.producer_pool_progress, self.consumer_pool_progress,
self.consumer_pool_exitEvent[-1], self.killed_event, extra_arg_list), extra_arg_list))
self.consumer_pool[-1][1].daemon = False # Remove for debugging
self.consumer_pool[-1][1].start()
# Update consumer count
self.num_consumers += 1
# Update next worker num
self.next_worker_num += 1
# Release internal
self.__internalLock__.release()
logger.debug("Consumer-%d added" % new_worker_num)
else:
logger.error("Attempted to start workers beyond the maxCores setting")
def removeConsumer(self, noLock = True):
if self.num_consumers > 0:
# Lock internal
if not noLock: self.__internalLock__.acquire()
# Remove last worker from worker pool
(worker_num, consumer, extra_arg_list) = self.consumer_pool.pop()
logger.debug("Removing Consumer-%d" % worker_num)
# Remove last worker's exitFlag
consumer_exitEvent = self.consumer_pool_exitEvent.pop()
# Set the worker's exit event
if not consumer_exitEvent.is_set():
logger.debug("Consumer-%d exitEvent SET" % worker_num)
consumer_exitEvent.set()
# Wait for the worker to acknowledge he has shutdown:
while consumer_exitEvent.is_set():
logger.debug("Waiting for Consumer-%d to shutdown" % worker_num)
time.sleep(1)
# Update consumer count
self.num_consumers -= 1
# Release internal
if not noLock: self.__internalLock__.release()
else:
logger.error("Attempted to remove consumer from empty pool.")
def startConsumers(self, num_consumers, extra_arg_list = []):
logger.debug("Starting consumers")
if num_consumers is None:
for i in xrange(self.maxCores - 1): self.addConsumer(extra_arg_list)
else:
for i in xrange(num_consumers): self.addConsumer(extra_arg_list)
def restartConsumers(self):
logger.debug("Restarting consumers")
extra_arg_list_list = []
current_num_consumers = self.num_consumers
# Shut them all down
for i in xrange(current_num_consumers):
# Grab extra_arg_list
(worker_num, consumer, extra_arg_list) = self.consumer_pool[-1]
extra_arg_list_list.append(extra_arg_list)
self.removeConsumer(True)
# Give them time to actually shutdown
time.sleep(1)
# Start them all up again
for i in xrange(current_num_consumers):
self.addConsumer(extra_arg_list_list.pop())
logger.debug("Restarting consumers - done")
def endConsumers(self):
logger.debug("Ending all consumers")
for i in xrange(self.num_consumers): self.removeConsumer()
def grabResults(self):
results = []
try:
while True:
next_result = self.consumer_results_queue.get_nowait()
results.append(next_result)
except Queue.Empty:
pass
return results
@rate_limited(1.0 / 10.0)
def rebalance(self):
if self.governorOffFlag:
return
progProducers = self.getProgressProducers()
progConsumers = self.getProgressConsumers()
num_tasks = self.get_num_tasks()
elapsed_backoff_time = (datetime.now() - self._rebalance_last_kick).seconds
logger.debug("Starting balancing (timer: %d/%d)" % (elapsed_backoff_time, self.rebalance_backoff_timer))
# Kill producers if all tasks have been served
if num_tasks == progProducers and progProducers > 0:
self.endProducers()
return
# Restart paused production on backoff timer or if we have at least 20% memory available
if self.num_producers == 0 and ((elapsed_backoff_time > self.rebalance_backoff_timer) or psutil_phymem_usage() < 80):
logger.debug("Rebalancing, restarting production")
self.addProducer()
return
# Memory governor
# Pause production if we're over 90%
if psutil_phymem_usage() > 90:
logger.debug("Rebalancing, mem > 90%, pausing production")
self.endProducers()
self._rebalance_last_kick = datetime.now()
return
# Reduce production if we're over 75%
if psutil_phymem_usage() > 75 and self.num_producers > 1:
if (datetime.now() - self._rebalance_mem_last_kick).seconds > self.rebalance_mem_backoff_timer:
logger.debug("Rebalancing, mem > 75%")
self.removeProducer()
self._rebalance_mem_last_kick = datetime.now()
return
# Memory pressure check
if psutil_phymem_usage() > 70:
if (datetime.now() - self._rebalance_mem_last_kick).seconds > self.rebalance_mem_backoff_timer:
logger.debug("Rebalancing mem, recycling processes")
self.restartConsumers()
self.restartProducers()
gc.collect()
self._rebalance_mem_last_kick = datetime.now()
logger.debug("Rebalancing mem, recycling processes - done")
return
else:
logger.debug("Rebalance (Memory pressure check) postponed, waiting for rebalance_backoff_timer")
# We wait until tasks are moving along to start rebalancing stuff
if progProducers < (num_tasks / 10):
return
# Auto-balancing
if progProducers > progConsumers * 2:
if self.num_producers > 1:
if elapsed_backoff_time > self.rebalance_backoff_timer:
logger.debug("Rebalancing, too many producers")
self.removeProducer()
self._rebalance_last_kick = datetime.now()
return
else: logger.debug("Rebalance postponed, waiting for rebalance_backoff_timer")
elif progProducers < progConsumers * 1.20:
if num_tasks > progProducers * 1.20:
if psutil_phymem_usage() < 70 and elapsed_backoff_time > self.rebalance_backoff_timer:
logger.debug("Rebalancing")
self.addProducer()
self._rebalance_last_kick = datetime.now()
return
else: logger.debug("Rebalance (Auto-balancing) postponed, waiting for rebalance_backoff_timer")
logger.debug("Balancing done")
| 37.482143
| 150
| 0.632563
|
ce5860a9e378c6b7b32ad76416b1ded3dfc30fa1
| 6,605
|
py
|
Python
|
Contents/Libraries/Shared/ftfy/bad_codecs/sloppy.py
|
jippo015/Sub-Zero.bundle
|
734e0f7128c05c0f639e11e7dfc77daa1014064b
|
[
"MIT"
] | 1,553
|
2015-11-09T02:17:06.000Z
|
2022-03-31T20:24:52.000Z
|
Contents/Libraries/Shared/ftfy/bad_codecs/sloppy.py
|
saiterlz/Sub-Zero.bundle
|
1a0bb9c3e4be84be35d46672907783363fe5a87b
|
[
"MIT"
] | 691
|
2015-11-05T21:32:26.000Z
|
2022-03-17T10:52:45.000Z
|
Contents/Libraries/Shared/ftfy/bad_codecs/sloppy.py
|
saiterlz/Sub-Zero.bundle
|
1a0bb9c3e4be84be35d46672907783363fe5a87b
|
[
"MIT"
] | 162
|
2015-11-06T19:38:55.000Z
|
2022-03-16T02:42:41.000Z
|
# coding: utf-8
r"""
Decodes single-byte encodings, filling their "holes" in the same messy way that
everyone else does.
A single-byte encoding maps each byte to a Unicode character, except that some
bytes are left unmapped. In the commonly-used Windows-1252 encoding, for
example, bytes 0x81 and 0x8D, among others, have no meaning.
Python, wanting to preserve some sense of decorum, will handle these bytes
as errors. But Windows knows that 0x81 and 0x8D are possible bytes and they're
different from each other. It just hasn't defined what they are in terms of
Unicode.
Software that has to interoperate with Windows-1252 and Unicode -- such as all
the common Web browsers -- will pick some Unicode characters for them to map
to, and the characters they pick are the Unicode characters with the same
numbers: U+0081 and U+008D. This is the same as what Latin-1 does, and the
resulting characters tend to fall into a range of Unicode that's set aside for
obselete Latin-1 control characters anyway.
These sloppy codecs let Python do the same thing, thus interoperating with
other software that works this way. It defines a sloppy version of many
single-byte encodings with holes. (There is no need for a sloppy version of
an encoding without holes: for example, there is no such thing as
sloppy-iso-8859-2 or sloppy-macroman.)
The following encodings will become defined:
- sloppy-windows-1250 (Central European, sort of based on ISO-8859-2)
- sloppy-windows-1251 (Cyrillic)
- sloppy-windows-1252 (Western European, based on Latin-1)
- sloppy-windows-1253 (Greek, sort of based on ISO-8859-7)
- sloppy-windows-1254 (Turkish, based on ISO-8859-9)
- sloppy-windows-1255 (Hebrew, based on ISO-8859-8)
- sloppy-windows-1256 (Arabic)
- sloppy-windows-1257 (Baltic, based on ISO-8859-13)
- sloppy-windows-1258 (Vietnamese)
- sloppy-cp874 (Thai, based on ISO-8859-11)
- sloppy-iso-8859-3 (Maltese and Esperanto, I guess)
- sloppy-iso-8859-6 (different Arabic)
- sloppy-iso-8859-7 (Greek)
- sloppy-iso-8859-8 (Hebrew)
- sloppy-iso-8859-11 (Thai)
Aliases such as "sloppy-cp1252" for "sloppy-windows-1252" will also be
defined.
Only sloppy-windows-1251 and sloppy-windows-1252 are used by the rest of ftfy;
the rest are rather uncommon.
Here are some examples, using `ftfy.explain_unicode` to illustrate how
sloppy-windows-1252 merges Windows-1252 with Latin-1:
>>> from ftfy import explain_unicode
>>> some_bytes = b'\x80\x81\x82'
>>> explain_unicode(some_bytes.decode('latin-1'))
U+0080 \x80 [Cc] <unknown>
U+0081 \x81 [Cc] <unknown>
U+0082 \x82 [Cc] <unknown>
>>> explain_unicode(some_bytes.decode('windows-1252', 'replace'))
U+20AC € [Sc] EURO SIGN
U+FFFD � [So] REPLACEMENT CHARACTER
U+201A ‚ [Ps] SINGLE LOW-9 QUOTATION MARK
>>> explain_unicode(some_bytes.decode('sloppy-windows-1252'))
U+20AC € [Sc] EURO SIGN
U+0081 \x81 [Cc] <unknown>
U+201A ‚ [Ps] SINGLE LOW-9 QUOTATION MARK
"""
from __future__ import unicode_literals
import codecs
from encodings import normalize_encoding
import sys
REPLACEMENT_CHAR = '\ufffd'
PY26 = sys.version_info[:2] == (2, 6)
def make_sloppy_codec(encoding):
"""
Take a codec name, and return a 'sloppy' version of that codec that can
encode and decode the unassigned bytes in that encoding.
Single-byte encodings in the standard library are defined using some
boilerplate classes surrounding the functions that do the actual work,
`codecs.charmap_decode` and `charmap_encode`. This function, given an
encoding name, *defines* those boilerplate classes.
"""
# Make an array of all 256 possible bytes.
all_bytes = bytearray(range(256))
# Get a list of what they would decode to in Latin-1.
sloppy_chars = list(all_bytes.decode('latin-1'))
# Get a list of what they decode to in the given encoding. Use the
# replacement character for unassigned bytes.
if PY26:
decoded_chars = all_bytes.decode(encoding, 'replace')
else:
decoded_chars = all_bytes.decode(encoding, errors='replace')
# Update the sloppy_chars list. Each byte that was successfully decoded
# gets its decoded value in the list. The unassigned bytes are left as
# they are, which gives their decoding in Latin-1.
for i, char in enumerate(decoded_chars):
if char != REPLACEMENT_CHAR:
sloppy_chars[i] = char
# For ftfy's own purposes, we're going to allow byte 1A, the "Substitute"
# control code, to encode the Unicode replacement character U+FFFD.
sloppy_chars[0x1a] = REPLACEMENT_CHAR
# Create the data structures that tell the charmap methods how to encode
# and decode in this sloppy encoding.
decoding_table = ''.join(sloppy_chars)
encoding_table = codecs.charmap_build(decoding_table)
# Now produce all the class boilerplate. Look at the Python source for
# `encodings.cp1252` for comparison; this is almost exactly the same,
# except I made it follow pep8.
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
return codecs.CodecInfo(
name='sloppy-' + encoding,
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
# Define a codec for each incomplete encoding. The resulting CODECS dictionary
# can be used by the main module of ftfy.bad_codecs.
CODECS = {}
INCOMPLETE_ENCODINGS = (
['windows-%s' % num for num in range(1250, 1259)] +
['iso-8859-%s' % num for num in (3, 6, 7, 8, 11)] +
['cp%s' % num for num in range(1250, 1259)] + ['cp874']
)
for _encoding in INCOMPLETE_ENCODINGS:
_new_name = normalize_encoding('sloppy-' + _encoding)
CODECS[_new_name] = make_sloppy_codec(_encoding)
| 40.030303
| 79
| 0.716276
|
a7e3ea2bca17911c58028f452a735367a801eccf
| 3,400
|
py
|
Python
|
po_pattern/Lib/site-packages/behave/reporter/summary.py
|
tomekwszelaki/page-object-pattern-python
|
eb0ff7a1329b88149d743f2bc4a827c984e72dc3
|
[
"MIT"
] | 1
|
2017-03-22T04:25:35.000Z
|
2017-03-22T04:25:35.000Z
|
po_pattern/Lib/site-packages/behave/reporter/summary.py
|
tomekwszelaki/page-object-pattern-python
|
eb0ff7a1329b88149d743f2bc4a827c984e72dc3
|
[
"MIT"
] | null | null | null |
po_pattern/Lib/site-packages/behave/reporter/summary.py
|
tomekwszelaki/page-object-pattern-python
|
eb0ff7a1329b88149d743f2bc4a827c984e72dc3
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
"""
Provides a summary after each test run.
"""
from __future__ import absolute_import, division
from behave.model import ScenarioOutline
from behave.reporter.base import Reporter
from behave.formatter.base import StreamOpener
import sys
# -- DISABLED: optional_steps = ('untested', 'undefined')
optional_steps = ('untested',)
def format_summary(statement_type, summary):
parts = []
for status in ('passed', 'failed', 'skipped', 'undefined', 'untested'):
if status not in summary:
continue
counts = summary[status]
if status in optional_steps and counts == 0:
# -- SHOW-ONLY: For relevant counts, suppress: untested items, etc.
continue
if not parts:
# -- FIRST ITEM: Add statement_type to counter.
label = statement_type
if counts != 1:
label += 's'
part = u'%d %s %s' % (counts, label, status)
else:
part = u'%d %s' % (counts, status)
parts.append(part)
return ', '.join(parts) + '\n'
class SummaryReporter(Reporter):
show_failed_scenarios = True
output_stream_name = "stdout"
def __init__(self, config):
super(SummaryReporter, self).__init__(config)
stream = getattr(sys, self.output_stream_name, sys.stderr)
self.stream = StreamOpener.ensure_stream_with_encoder(stream)
self.feature_summary = {'passed': 0, 'failed': 0, 'skipped': 0,
'untested': 0}
self.scenario_summary = {'passed': 0, 'failed': 0, 'skipped': 0,
'untested': 0}
self.step_summary = {'passed': 0, 'failed': 0, 'skipped': 0,
'undefined': 0, 'untested': 0}
self.duration = 0.0
self.failed_scenarios = []
def feature(self, feature):
self.feature_summary[feature.status or 'skipped'] += 1
self.duration += feature.duration
for scenario in feature:
if isinstance(scenario, ScenarioOutline):
self.process_scenario_outline(scenario)
else:
self.process_scenario(scenario)
def end(self):
# -- SHOW FAILED SCENARIOS (optional):
if self.show_failed_scenarios and self.failed_scenarios:
self.stream.write("\nFailing scenarios:\n")
for scenario in self.failed_scenarios:
self.stream.write(u" %s %s\n" % (
scenario.location, scenario.name))
self.stream.write("\n")
# -- SHOW SUMMARY COUNTS:
self.stream.write(format_summary('feature', self.feature_summary))
self.stream.write(format_summary('scenario', self.scenario_summary))
self.stream.write(format_summary('step', self.step_summary))
timings = (int(self.duration / 60.0), self.duration % 60)
self.stream.write('Took %dm%02.3fs\n' % timings)
def process_scenario(self, scenario):
if scenario.status == 'failed':
self.failed_scenarios.append(scenario)
self.scenario_summary[scenario.status or 'skipped'] += 1
for step in scenario:
self.step_summary[step.status or 'skipped'] += 1
def process_scenario_outline(self, scenario_outline):
for scenario in scenario_outline.scenarios:
self.process_scenario(scenario)
| 37.362637
| 79
| 0.605882
|
ab9e83de05901d2b7c7f71332439bbba98109903
| 2,242
|
py
|
Python
|
tests/test_util.py
|
t-cas/JumpSSH
|
9f8529690064d11c9ec965a0340f220669bd7663
|
[
"MIT"
] | 82
|
2017-06-12T03:31:44.000Z
|
2022-02-22T12:15:41.000Z
|
tests/test_util.py
|
t-cas/JumpSSH
|
9f8529690064d11c9ec965a0340f220669bd7663
|
[
"MIT"
] | 225
|
2017-05-24T18:11:25.000Z
|
2022-03-26T17:00:40.000Z
|
tests/test_util.py
|
t-cas/JumpSSH
|
9f8529690064d11c9ec965a0340f220669bd7663
|
[
"MIT"
] | 27
|
2017-05-24T17:50:25.000Z
|
2022-01-23T18:07:11.000Z
|
try:
import unittest.mock as mock
except ImportError:
import mock
import string
import pytest
from jumpssh import util
mock_input = '__builtin__.raw_input' if util.PY2 else 'builtins.input'
def test_id_generator():
# basic checks on size
assert int(util.id_generator(size=1, chars=string.digits)) < 10
assert len(util.id_generator(size=5)) == 5
# basic checks on character types
assert util.id_generator(size=5).isalnum()
assert util.id_generator(size=8, chars=string.ascii_letters).isalpha()
assert util.id_generator(size=8, chars=string.digits).isdigit()
def test_yes_no_query_invalid_input_parameters():
with pytest.raises(ValueError):
util.yes_no_query('A question ?', default='invalid param value')
with pytest.raises(ValueError):
util.yes_no_query('A question ?', interrupt='invalid param value')
@pytest.mark.parametrize("answer", ['y', 'Y', 'yes', 'YES', 'Yes'])
def test_yes_no_query_nominal_case_yes(answer, monkeypatch):
monkeypatch.setattr(mock_input, lambda x: answer)
assert util.yes_no_query('A question ?') is True
@pytest.mark.parametrize("answer", ['n', 'N', 'no', 'NO', 'No'])
def test_yes_no_query_nominal_case_no(answer, monkeypatch):
monkeypatch.setattr(mock_input, lambda x: answer)
assert util.yes_no_query('A question ?') is False
@pytest.mark.parametrize("answer", [' ', ' ', '\t'])
def test_yes_no_query_empty_anwser(answer, monkeypatch):
monkeypatch.setattr(mock_input, lambda x: answer)
assert util.yes_no_query('A question ?', default=True) is True
assert util.yes_no_query('A question ?', default=False) is False
def test_yes_no_query_interrupt():
with mock.patch(mock_input, side_effect=KeyboardInterrupt('Fake Ctrl-C')):
assert util.yes_no_query('A question ?', interrupt=True) is True
assert util.yes_no_query('A question ?', interrupt=False) is False
def test_yes_no_query_eof():
with mock.patch(mock_input, side_effect=EOFError('Fake EOFError')):
assert util.yes_no_query('A question ?', default=True) is True
assert util.yes_no_query('A question ?', default=False) is False
with pytest.raises(EOFError):
util.yes_no_query('A question ?')
| 35.587302
| 78
| 0.718109
|
4ab5ec12573748b8d4a63b78c2e7bc666b2ecd95
| 584
|
py
|
Python
|
stdplugins/leave.py
|
dqanshi/PornHub
|
162a7053ca7f2c0b3617b852559cfaf0502d94a7
|
[
"Apache-2.0"
] | 55
|
2019-07-13T15:57:54.000Z
|
2021-09-20T16:50:42.000Z
|
stdplugins/leave.py
|
dqanshi/PornHub
|
162a7053ca7f2c0b3617b852559cfaf0502d94a7
|
[
"Apache-2.0"
] | 3
|
2020-04-15T02:08:53.000Z
|
2020-06-06T13:45:18.000Z
|
stdplugins/leave.py
|
dqanshi/PornHub
|
162a7053ca7f2c0b3617b852559cfaf0502d94a7
|
[
"Apache-2.0"
] | 450
|
2019-07-12T13:18:41.000Z
|
2022-03-29T18:47:42.000Z
|
# For @UniBorg
"""fake leave
.fleave"""
from telethon import events
from datetime import datetime
from uniborg.util import admin_cmd
import importlib.util
import asyncio
import random
import importlib.util
@borg.on(events.NewMessage(outgoing=True, pattern='^\.(f?f)l '))
async def timer_blankx(e):
txt=e.text[7:] + '\n\n`Processing....` '
j=1
k=j
for j in range(j):
await e.edit(txt + str(k))
k=k-1
await asyncio.sleep(1)
if e.pattern_match.group(1) == 'f':
await e.edit("`Legend is leaving this chat.....!` @admin `Goodbye aren't forever..` ")
| 12.166667
| 88
| 0.664384
|
fa5a018cf1dae94c71c7661a29cd65a45af20b30
| 10,329
|
py
|
Python
|
tests/extension/types_/ipxact_/slave_lite/test_types_ipxact_slave_lite.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
tests/extension/types_/ipxact_/slave_lite/test_types_ipxact_slave_lite.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
tests/extension/types_/ipxact_/slave_lite/test_types_ipxact_slave_lite.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import types_ipxact_slave_lite
expected_verilog = """
module test;
reg CLK;
reg RST;
wire [32-1:0] LED;
reg [32-1:0] myaxi_awaddr;
reg [4-1:0] myaxi_awcache;
reg [3-1:0] myaxi_awprot;
reg myaxi_awvalid;
wire myaxi_awready;
reg [32-1:0] myaxi_wdata;
reg [4-1:0] myaxi_wstrb;
reg myaxi_wvalid;
wire myaxi_wready;
wire [2-1:0] myaxi_bresp;
wire myaxi_bvalid;
reg myaxi_bready;
reg [32-1:0] myaxi_araddr;
reg [4-1:0] myaxi_arcache;
reg [3-1:0] myaxi_arprot;
reg myaxi_arvalid;
wire myaxi_arready;
wire [32-1:0] myaxi_rdata;
wire [2-1:0] myaxi_rresp;
wire myaxi_rvalid;
reg myaxi_rready;
reg [32-1:0] _axi_awaddr;
wire [4-1:0] _axi_awcache;
wire [3-1:0] _axi_awprot;
reg _axi_awvalid;
wire _axi_awready;
reg [32-1:0] _axi_wdata;
reg [4-1:0] _axi_wstrb;
reg _axi_wvalid;
wire _axi_wready;
wire [2-1:0] _axi_bresp;
wire _axi_bvalid;
wire _axi_bready;
reg [32-1:0] _axi_araddr;
wire [4-1:0] _axi_arcache;
wire [3-1:0] _axi_arprot;
reg _axi_arvalid;
wire _axi_arready;
wire [32-1:0] _axi_rdata;
wire [2-1:0] _axi_rresp;
wire _axi_rvalid;
wire _axi_rready;
assign _axi_awcache = 3;
assign _axi_awprot = 0;
assign _axi_bready = 1;
assign _axi_arcache = 3;
assign _axi_arprot = 0;
wire [32-1:0] _tmp_0;
assign _tmp_0 = _axi_awaddr;
always @(*) begin
myaxi_awaddr = _tmp_0;
end
wire [4-1:0] _tmp_1;
assign _tmp_1 = _axi_awcache;
always @(*) begin
myaxi_awcache = _tmp_1;
end
wire [3-1:0] _tmp_2;
assign _tmp_2 = _axi_awprot;
always @(*) begin
myaxi_awprot = _tmp_2;
end
wire _tmp_3;
assign _tmp_3 = _axi_awvalid;
always @(*) begin
myaxi_awvalid = _tmp_3;
end
assign _axi_awready = myaxi_awready;
wire [32-1:0] _tmp_4;
assign _tmp_4 = _axi_wdata;
always @(*) begin
myaxi_wdata = _tmp_4;
end
wire [4-1:0] _tmp_5;
assign _tmp_5 = _axi_wstrb;
always @(*) begin
myaxi_wstrb = _tmp_5;
end
wire _tmp_6;
assign _tmp_6 = _axi_wvalid;
always @(*) begin
myaxi_wvalid = _tmp_6;
end
assign _axi_wready = myaxi_wready;
assign _axi_bresp = myaxi_bresp;
assign _axi_bvalid = myaxi_bvalid;
wire _tmp_7;
assign _tmp_7 = _axi_bready;
always @(*) begin
myaxi_bready = _tmp_7;
end
wire [32-1:0] _tmp_8;
assign _tmp_8 = _axi_araddr;
always @(*) begin
myaxi_araddr = _tmp_8;
end
wire [4-1:0] _tmp_9;
assign _tmp_9 = _axi_arcache;
always @(*) begin
myaxi_arcache = _tmp_9;
end
wire [3-1:0] _tmp_10;
assign _tmp_10 = _axi_arprot;
always @(*) begin
myaxi_arprot = _tmp_10;
end
wire _tmp_11;
assign _tmp_11 = _axi_arvalid;
always @(*) begin
myaxi_arvalid = _tmp_11;
end
assign _axi_arready = myaxi_arready;
assign _axi_rdata = myaxi_rdata;
assign _axi_rresp = myaxi_rresp;
assign _axi_rvalid = myaxi_rvalid;
wire _tmp_12;
assign _tmp_12 = _axi_rready;
always @(*) begin
myaxi_rready = _tmp_12;
end
reg [32-1:0] fsm;
localparam fsm_init = 0;
reg [32-1:0] sum;
reg __axi_cond_0_1;
reg __axi_cond_1_1;
assign _axi_rready = (fsm == 1) || (fsm == 3);
main
uut
(
.CLK(CLK),
.RST(RST),
.LED(LED),
.myaxi_awaddr(myaxi_awaddr),
.myaxi_awcache(myaxi_awcache),
.myaxi_awprot(myaxi_awprot),
.myaxi_awvalid(myaxi_awvalid),
.myaxi_awready(myaxi_awready),
.myaxi_wdata(myaxi_wdata),
.myaxi_wstrb(myaxi_wstrb),
.myaxi_wvalid(myaxi_wvalid),
.myaxi_wready(myaxi_wready),
.myaxi_bresp(myaxi_bresp),
.myaxi_bvalid(myaxi_bvalid),
.myaxi_bready(myaxi_bready),
.myaxi_araddr(myaxi_araddr),
.myaxi_arcache(myaxi_arcache),
.myaxi_arprot(myaxi_arprot),
.myaxi_arvalid(myaxi_arvalid),
.myaxi_arready(myaxi_arready),
.myaxi_rdata(myaxi_rdata),
.myaxi_rresp(myaxi_rresp),
.myaxi_rvalid(myaxi_rvalid),
.myaxi_rready(myaxi_rready)
);
initial begin
$dumpfile("uut.vcd");
$dumpvars(0, uut, CLK, RST, LED, myaxi_awaddr, myaxi_awcache, myaxi_awprot, myaxi_awvalid, myaxi_awready, myaxi_wdata, myaxi_wstrb, myaxi_wvalid, myaxi_wready, myaxi_bresp, myaxi_bvalid, myaxi_bready, myaxi_araddr, myaxi_arcache, myaxi_arprot, myaxi_arvalid, myaxi_arready, myaxi_rdata, myaxi_rresp, myaxi_rvalid, myaxi_rready, _axi_awaddr, _axi_awcache, _axi_awprot, _axi_awvalid, _axi_awready, _axi_wdata, _axi_wstrb, _axi_wvalid, _axi_wready, _axi_bresp, _axi_bvalid, _axi_bready, _axi_araddr, _axi_arcache, _axi_arprot, _axi_arvalid, _axi_arready, _axi_rdata, _axi_rresp, _axi_rvalid, _axi_rready, _tmp_0, _tmp_1, _tmp_2, _tmp_3, _tmp_4, _tmp_5, _tmp_6, _tmp_7, _tmp_8, _tmp_9, _tmp_10, _tmp_11, _tmp_12, fsm, sum, __axi_cond_0_1, __axi_cond_1_1);
end
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
_axi_awaddr = 0;
_axi_awvalid = 0;
_axi_wdata = 0;
_axi_wstrb = 0;
_axi_wvalid = 0;
_axi_araddr = 0;
_axi_arvalid = 0;
fsm = fsm_init;
sum = 0;
__axi_cond_0_1 = 0;
__axi_cond_1_1 = 0;
#100;
RST = 1;
#100;
RST = 0;
#100000;
$finish;
end
always @(posedge CLK) begin
if(RST) begin
_axi_awaddr <= 0;
_axi_awvalid <= 0;
_axi_wdata <= 0;
_axi_wstrb <= 0;
_axi_wvalid <= 0;
_axi_araddr <= 0;
_axi_arvalid <= 0;
__axi_cond_0_1 <= 0;
__axi_cond_1_1 <= 0;
end else begin
if(__axi_cond_0_1) begin
_axi_arvalid <= 0;
end
if(__axi_cond_1_1) begin
_axi_arvalid <= 0;
end
_axi_awaddr <= 0;
_axi_awvalid <= 0;
_axi_wdata <= 0;
_axi_wstrb <= 0;
_axi_wvalid <= 0;
if((fsm == 0) && (_axi_arready || !_axi_arvalid)) begin
_axi_araddr <= 1024;
_axi_arvalid <= 1;
end
__axi_cond_0_1 <= 1;
if(_axi_arvalid && !_axi_arready) begin
_axi_arvalid <= _axi_arvalid;
end
if((fsm == 2) && (_axi_arready || !_axi_arvalid)) begin
_axi_araddr <= 2048;
_axi_arvalid <= 1;
end
__axi_cond_1_1 <= 1;
if(_axi_arvalid && !_axi_arready) begin
_axi_arvalid <= _axi_arvalid;
end
end
end
localparam fsm_1 = 1;
localparam fsm_2 = 2;
localparam fsm_3 = 3;
localparam fsm_4 = 4;
localparam fsm_5 = 5;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
sum <= 0;
end else begin
case(fsm)
fsm_init: begin
if(_axi_arready || !_axi_arvalid) begin
fsm <= fsm_1;
end
end
fsm_1: begin
if(_axi_rready && _axi_rvalid) begin
sum <= sum + _axi_rdata;
end
if(_axi_rready && _axi_rvalid) begin
fsm <= fsm_2;
end
end
fsm_2: begin
if(_axi_arready || !_axi_arvalid) begin
fsm <= fsm_3;
end
end
fsm_3: begin
if(_axi_rready && _axi_rvalid) begin
sum <= sum + _axi_rdata;
end
if(_axi_rready && _axi_rvalid) begin
fsm <= fsm_4;
end
end
fsm_4: begin
$display("sum=%d expected_sum=%d", sum, 768);
fsm <= fsm_5;
end
endcase
end
end
endmodule
module main
(
input CLK,
input RST,
output [32-1:0] LED,
input [32-1:0] myaxi_awaddr,
input [4-1:0] myaxi_awcache,
input [3-1:0] myaxi_awprot,
input myaxi_awvalid,
output myaxi_awready,
input [32-1:0] myaxi_wdata,
input [4-1:0] myaxi_wstrb,
input myaxi_wvalid,
output myaxi_wready,
output [2-1:0] myaxi_bresp,
output reg myaxi_bvalid,
input myaxi_bready,
input [32-1:0] myaxi_araddr,
input [4-1:0] myaxi_arcache,
input [3-1:0] myaxi_arprot,
input myaxi_arvalid,
output myaxi_arready,
output reg [32-1:0] myaxi_rdata,
output [2-1:0] myaxi_rresp,
output reg myaxi_rvalid,
input myaxi_rready
);
assign myaxi_bresp = 0;
assign myaxi_rresp = 0;
assign myaxi_awready = 0;
assign myaxi_wready = 0;
reg [32-1:0] fsm;
localparam fsm_init = 0;
reg [32-1:0] _tmp_0;
reg _tmp_1;
reg _tmp_2;
assign myaxi_arready = (fsm == 0) && !_tmp_1 && _tmp_2;
reg [32-1:0] rdata;
reg _myaxi_cond_0_1;
always @(posedge CLK) begin
if(RST) begin
myaxi_bvalid <= 0;
_tmp_2 <= 0;
_tmp_0 <= 0;
_tmp_1 <= 0;
myaxi_rdata <= 0;
myaxi_rvalid <= 0;
_myaxi_cond_0_1 <= 0;
end else begin
if(_myaxi_cond_0_1) begin
myaxi_rvalid <= 0;
end
if(myaxi_bvalid && myaxi_bready) begin
myaxi_bvalid <= 0;
end
if(myaxi_wvalid && myaxi_wready) begin
myaxi_bvalid <= 1;
end
_tmp_2 <= myaxi_arvalid;
if(myaxi_arready && myaxi_arvalid) begin
_tmp_0 <= myaxi_araddr;
end
_tmp_1 <= myaxi_arready && myaxi_arvalid;
if((fsm == 1) && (myaxi_rready || !myaxi_rvalid)) begin
myaxi_rdata <= rdata;
myaxi_rvalid <= 1;
end
_myaxi_cond_0_1 <= 1;
if(myaxi_rvalid && !myaxi_rready) begin
myaxi_rvalid <= myaxi_rvalid;
end
end
end
localparam fsm_1 = 1;
localparam fsm_2 = 2;
always @(posedge CLK) begin
if(RST) begin
fsm <= fsm_init;
rdata <= 0;
end else begin
case(fsm)
fsm_init: begin
if(_tmp_1) begin
rdata <= _tmp_0 >> 2;
end
if(_tmp_1) begin
fsm <= fsm_1;
end
end
fsm_1: begin
if(myaxi_rready || !myaxi_rvalid) begin
rdata <= rdata + 1;
end
if(myaxi_rready || !myaxi_rvalid) begin
fsm <= fsm_2;
end
end
fsm_2: begin
fsm <= fsm_init;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = types_ipxact_slave_lite.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
| 22.953333
| 755
| 0.627069
|
5e1f67496b6de76601946fdaa2b7a83c14528251
| 3,924
|
py
|
Python
|
hangman/model.py
|
bionikspoon/tdd_python_hangman
|
f4b990b585b0b270c78221826f3e7ee3390f4ee3
|
[
"MIT"
] | 7
|
2015-12-18T08:32:06.000Z
|
2019-04-23T17:50:50.000Z
|
hangman/model.py
|
bionikspoon/tdd_python_hangman
|
f4b990b585b0b270c78221826f3e7ee3390f4ee3
|
[
"MIT"
] | 4
|
2015-12-18T07:22:58.000Z
|
2020-10-28T01:34:50.000Z
|
hangman/model.py
|
bionikspoon/tdd_python_hangman
|
f4b990b585b0b270c78221826f3e7ee3390f4ee3
|
[
"MIT"
] | 2
|
2021-08-25T04:58:51.000Z
|
2021-10-04T22:08:36.000Z
|
# coding=utf-8
"""
hangman.model
~~~~~~~~~~~~~
"""
from __future__ import absolute_import
import re
from collections import namedtuple
from .utils import WordBank, GameLost, GameWon
class Hangman(object):
"""
The the logic for managing the status of the game and raising key game related events.
>>> from hangman.model import Hangman
>>> game = Hangman(answer='hangman')
>>> game.guess('a')
hangman(status='_A___A_', misses=[], remaining_turns=10)
>>> game.guess('n').guess('z').guess('e')
hangman(status='_AN__AN', misses=['E', 'Z'], remaining_turns=8)
>>> game.status
'_AN__AN'
>>> game.misses
['E', 'Z']
>>> game.remaining_turns
8
"""
# CLASS PROPERTIES
# -------------------------------------------------------------------
MAX_TURNS = 10
_re_answer_rules = re.compile('^[A-Z]{1,16}$')
_re_guess_rules = re.compile('^[A-Z]$')
_repr = namedtuple('hangman', ['status', 'misses', 'remaining_turns'])
# CONSTRUCTOR
# -------------------------------------------------------------------
def __init__(self, answer=None):
if not answer:
# Populate answer
answer = WordBank.get()
# Validate answer.
if not self.is_valid_answer(answer):
raise ValueError("Word must be letters A-Z")
self.answer = answer.upper()
self._misses = set()
self._hits = set()
# PUBLIC API
# -------------------------------------------------------------------
def guess(self, letter):
"""Add letter to hits or misses."""
# validate input
if not self.is_valid_guess(letter):
raise ValueError('Must be a letter A-Z')
# add to hits or misses
is_miss = letter.upper() not in self.answer
if is_miss:
self._add_miss(letter)
else:
self._add_hit(letter)
return self
# INSTANCE PROPERTIES
# -------------------------------------------------------------------
@property
def misses(self):
"""List of misses."""
return sorted(list(self._misses))
@misses.setter
def misses(self, letters):
for letter in letters:
self._add_miss(letter)
@property
def hits(self):
"""List of hits."""
return sorted(list(self._hits))
@hits.setter
def hits(self, letters):
for letter in letters:
self._add_hit(letter)
@property
def remaining_turns(self):
"""Calculate number of turns remaining."""
return self.MAX_TURNS - len(self.misses)
@property
def status(self):
"""Build a string representation of status."""
hits = self.hits # calculated property
def fill_in(letter):
"""Replace non-hits with `_`."""
return letter if letter in hits else '_'
return ''.join(fill_in(letter) for letter in self.answer)
# UTILITIES
# -------------------------------------------------------------------
def _add_miss(self, value):
"""Add a letter to misses. Check for game over."""
self._misses.add(value.upper())
if self.remaining_turns <= 0:
raise GameLost
def _add_hit(self, value):
"""Add a letter to hits. Check for game won"""
self._hits.add(value.upper())
if self._hits == set(self.answer):
raise GameWon
def is_valid_answer(self, word):
"""Validate answer. Letters only. Max:16"""
word = str(word).upper()
return not not self._re_answer_rules.search(word)
def is_valid_guess(self, letter):
"""Validate guess. Letters only. Max:1"""
letter = str(letter).upper()
return not not self._re_guess_rules.search(letter)
def __repr__(self):
return repr(self._repr(self.status, self.misses, self.remaining_turns))
| 25.986755
| 90
| 0.534913
|
86e200123c1887ab9de400fc80f9d2cd5b332e5f
| 11,535
|
py
|
Python
|
utils/utils_tf.py
|
haibinzheng/NeuronFair
|
5f6affd6fb378058bb0d2a0fd0ea413d2c8bd3cf
|
[
"Apache-2.0"
] | 1
|
2022-02-10T13:39:44.000Z
|
2022-02-10T13:39:44.000Z
|
utils/utils_tf.py
|
haibinzheng/NeuronFair
|
5f6affd6fb378058bb0d2a0fd0ea413d2c8bd3cf
|
[
"Apache-2.0"
] | null | null | null |
utils/utils_tf.py
|
haibinzheng/NeuronFair
|
5f6affd6fb378058bb0d2a0fd0ea413d2c8bd3cf
|
[
"Apache-2.0"
] | null | null | null |
from distutils.version import LooseVersion
import numpy as np
import os
from six.moves import xrange
import tensorflow as tf
import time
import warnings
import math
import sys
sys.path.append('../')
from .utils import batch_indices, _ArgsWrapper
def model_loss(y, model, mean=True):
"""
Define loss of TF graph
:param y: correct labels
:param model: output of the model
:param mean: boolean indicating whether should return mean of loss
or vector of losses for each input of the batch
:return: return mean of loss if True, otherwise return vector with per
sample loss
"""
op = model.op
if op.type == "Softmax":
logits, = op.inputs
else:
logits = model
out = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)
if mean:
out = tf.reduce_mean(out)
return out
def initialize_uninitialized_global_variables(sess):
"""
Only initializes the variables of a TensorFlow session that were not
already initialized.
:param sess: the TensorFlow session
:return:
"""
# List all global variables
global_vars = tf.global_variables()
# Find initialized status for all variables
is_var_init = [tf.is_variable_initialized(var) for var in global_vars]
is_initialized = sess.run(is_var_init)
# List all variables that were not initialized previously
not_initialized_vars = [var for (var, init) in
zip(global_vars, is_initialized) if not init]
# Initialize all uninitialized variables found, if any
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
def model_train(sess, x, y, predictions, X_train, Y_train, save=False,
predictions_adv=None, init_all=True, evaluate=None,
feed=None, args=None, rng=None):
"""
Train a TF graph
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'train_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:return: True if model trained
"""
args = _ArgsWrapper(args or {})
# Check that necessary arguments were given (see doc above)
assert args.nb_epochs, "Number of epochs was not given in args dict"
assert args.learning_rate, "Learning rate was not given in args dict"
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.train_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if rng is None:
rng = np.random.RandomState()
# Define loss
loss = model_loss(y, predictions)
if predictions_adv is not None:
loss = (loss + model_loss(y, predictions_adv)) / 2
train_step = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_op):
train_step = train_step.minimize(loss)
with sess.as_default():
if hasattr(tf, "global_variables_initializer"):
if init_all:
tf.global_variables_initializer().run()
else:
initialize_uninitialized_global_variables(sess)
else:
warnings.warn("Update your copy of tensorflow; future versions of "
"guardai_util may drop support for this version.")
sess.run(tf.initialize_all_variables())
for epoch in xrange(args.nb_epochs):
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), args.batch_size)
# Perform one training step
feed_dict = {x: X_train[index_shuf[start:end]],
y: Y_train[index_shuf[start:end]]}
if feed is not None:
feed_dict.update(feed)
train_step.run(feed_dict=feed_dict)
assert end >= len(X_train) # Check that all examples were used
cur = time.time()
print("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
evaluate()
if save:
train_dir = os.path.join(args.train_dir)# , str(args.nb_epochs - 1)
try:
os.makedirs(train_dir)
except:
pass
save_path = os.path.join(train_dir, args.filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
print("Completed model training and saved at: " +
str(save_path))
else:
print("Completed model training.")
return True
def model_eval(sess, x, y, predictions, X_test=None, Y_test=None,
feed=None, args=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param x: input placeholder
:param y: output placeholder (for labels)
:param predictions: model output predictions
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:return: a float with the accuracy value
"""
args = _ArgsWrapper(args or {})
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument "
"must be supplied.")
# Define accuracy symbolically
if LooseVersion(tf.__version__) >= LooseVersion('1.0.0'):
correct_preds = tf.equal(tf.argmax(y, axis=-1),
tf.argmax(predictions, axis=-1))
else:
correct_preds = tf.equal(tf.argmax(y, axis=tf.rank(y) - 1),
tf.argmax(predictions,
axis=tf.rank(predictions) - 1))
# Init result var
accuracy = 0.0
with sess.as_default():
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
print("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
feed_dict = {x: X_cur, y: Y_cur}
if feed is not None:
feed_dict.update(feed)
cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)
accuracy += cur_corr_preds[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy
def model_argmax(sess, x, predictions, samples, feed=None):
"""
Helper function that computes the current class prediction
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output
:param samples: numpy array with input samples (dims must match x)
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance
:return: the argmax output of predictions, i.e. the current predicted class
"""
feed_dict = {x: samples}
if feed is not None:
feed_dict.update(feed)
probabilities = sess.run(predictions, feed_dict)
if samples.shape[0] == 1:
return np.argmax(probabilities)
else:
return np.argmax(probabilities, axis=1)
def model_prediction(sess, x, predictions, samples, feed=None, batch_size=128):
"""
Compute the probability of all classes for inputs
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output
:param samples: numpy array with input samples (dims must match x)
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance
:param batch_size: the size of inputs per batch
:return: the output probability
"""
nb_classes = 2
nb_batches = int(math.ceil(float(samples.shape[0]) / batch_size))
pros_all=np.zeros(shape=(samples.shape[0],nb_classes), dtype='float32')
for batch in range(nb_batches):
start=batch*batch_size
end=(batch+1)*batch_size
if end>samples.shape[0]:
end=samples.shape[0]
feed_dict = {x: samples[start:end]}
if feed is not None:
feed_dict.update(feed)
pros=sess.run(predictions, feed_dict)
for i in range(start,end):
pros_all[i]=pros[i-start]
return pros_all
| 39.101695
| 79
| 0.627221
|
ce26a1cac48430f6638982601a4f3fb6ac4d0943
| 642
|
py
|
Python
|
my_topics/src/message_publisher.py
|
DetectiveDawg/Final-Project
|
745e0b92d8c941cd5daf7dbe25b6bd57383da2db
|
[
"Apache-2.0"
] | 1
|
2021-04-01T23:34:09.000Z
|
2021-04-01T23:34:09.000Z
|
my_topics/src/message_publisher.py
|
DetectiveDawg/Final-Project
|
745e0b92d8c941cd5daf7dbe25b6bd57383da2db
|
[
"Apache-2.0"
] | null | null | null |
my_topics/src/message_publisher.py
|
DetectiveDawg/Final-Project
|
745e0b92d8c941cd5daf7dbe25b6bd57383da2db
|
[
"Apache-2.0"
] | 26
|
2020-04-09T00:47:24.000Z
|
2021-09-21T06:22:28.000Z
|
#!/usr/bin/env python
import rospy
from rico_topics.msg import Complex # custom message type
from random import random # for random numbers!
rospy.init_node('message_publisher') # initialize node
pub = rospy.Publisher( # register topic
'complex', # topic name
Complex, # custom message type
queue_size=3 # queue size
)
rate = rospy.Rate(2) # set rate
while not rospy.is_shutdown(): # loop
msg = Complex() # declare type
msg.real = random() # assign value
msg.imaginary = random() # assign value
pub.publish(msg) # publish!
rate.sleep() # sleep to keep rate
| 29.181818
| 57
| 0.64486
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.