hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4643e65b6ea35e2606b338d0ea21854bc21603a8
| 3,020
|
py
|
Python
|
djcommon/helpers.py
|
baskoopmans/djcommon
|
0c373405985b6fa28e8d1fba74fd23ad23df7918
|
[
"BSD-3-Clause"
] | 2
|
2019-03-19T05:47:40.000Z
|
2019-04-21T12:15:57.000Z
|
djcommon/helpers.py
|
baskoopmans/djcommon
|
0c373405985b6fa28e8d1fba74fd23ad23df7918
|
[
"BSD-3-Clause"
] | null | null | null |
djcommon/helpers.py
|
baskoopmans/djcommon
|
0c373405985b6fa28e8d1fba74fd23ad23df7918
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
import sys
import re
import copy
import json
import random
from importlib import import_module
from decimal import Decimal
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
# for formatting Decimal objects
TWOPLACES = Decimal(10) ** -2
def camelcase_to_underscores(string):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', str(string))
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def get_setting(name, default=None):
return getattr(settings, name, default)
def uniqify_list(list, preserve_order=False):
if preserve_order:
# Order preserving
seen = set()
return [x for x in list if x not in seen and not seen.add(x)]
else:
# Not order preserving, faster than the function above.
return {}.fromkeys(list).keys()
def random_slice_list(value, arg):
# Only pick if we are asked for fewer items than we are given
# Else number requested is equal to or greater than the number we have, return them all in random order
if len(value) > arg or arg == 1:
value = random.sample(value, arg)
else:
random.shuffle(value)
return value
def list_contains(list, filter):
"""Example: if list_contains(a_list, lambda x: x.n == 3) # True if any element has .n==3"""
for x in list:
if filter(x):
return True
return False
def hash_recursive(mutable):
if isinstance(mutable, (set, tuple, list)):
return tuple([hash_recursive(item) for item in mutable])
elif not isinstance(mutable, dict):
return hash(mutable)
new_mutable = copy.deepcopy(mutable)
for key, value in new_mutable.items():
new_mutable[key] = hash_recursive(value)
return hash(tuple(frozenset(sorted(new_mutable.items()))))
def get_json_object(request):
data = None
if request.body:
try:
data = json.loads(request.body.replace("'","\""), encoding='utf-8')
except:
data = data
if data and type(data) is dict:
return data
def model_field_has_changed(instance, field):
"""
Check if a given field on a model has changed
"""
if not instance.pk:
return False
try:
old_value = instance.__class__._default_manager.filter(pk=instance.pk).values(field).get()[field]
except ObjectDoesNotExist:
return False
return not getattr(instance, field) == old_value
def touch(path):
import os, time
now = time.time()
try:
os.utime(path, (now, now))
except os.error:
raise Exception("Touching '%s' failed" % path)
def construct_object(location, **kwargs):
module, object = location.rsplit(".", 1)
return getattr(import_module(module), object)(**kwargs)
def reload_urlconf():
if settings.ROOT_URLCONF in sys.modules:
reload(sys.modules[settings.ROOT_URLCONF])
return import_module(settings.ROOT_URLCONF)
| 29.038462
| 108
| 0.642384
|
800472d33f2b7470b2413eb8a4fdcc6365a551f1
| 5,978
|
py
|
Python
|
wheat_detection/configs/faster_rcnn/faster_dilation.py
|
fengyouliang/wheat_detection
|
d056123426a1260c29b486cbb8e44a88a0a3c5bc
|
[
"Apache-2.0"
] | null | null | null |
wheat_detection/configs/faster_rcnn/faster_dilation.py
|
fengyouliang/wheat_detection
|
d056123426a1260c29b486cbb8e44a88a0a3c5bc
|
[
"Apache-2.0"
] | null | null | null |
wheat_detection/configs/faster_rcnn/faster_dilation.py
|
fengyouliang/wheat_detection
|
d056123426a1260c29b486cbb8e44a88a0a3c5bc
|
[
"Apache-2.0"
] | null | null | null |
fp16 = dict(loss_scale=512.)
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
dilations=(2, 2, 2, 2),
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5, class_agnostic=False), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
dataset_type = 'WheatDataset'
data_root = '/home/fengyouliang/datasets/WHD/'
ann_folder = 'ann_stratirfed'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1024, 1024), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 1024),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=0,
train=dict(
type=dataset_type,
ann_file=data_root + f'{ann_folder}/train.json',
img_prefix=data_root + 'train/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + f'{ann_folder}/val.json',
img_prefix=data_root + 'train/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + f'{ann_folder}/val.json',
img_prefix=data_root + 'train/',
pipeline=test_pipeline))
evaluation = dict(interval=2, metric='bbox', interval_iter=-1)
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
total_epochs = 12
checkpoint_config = dict(interval=2)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 31.463158
| 98
| 0.556708
|
ffba4b03bfed13ae9b9031bbd7c20620c683b42c
| 39,580
|
py
|
Python
|
pypy/interpreter/test/test_argument.py
|
pymtl/pypy-pymtl3
|
d2f66f87686e48aeb1eecabeaa3de1381a149f2c
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2021-06-02T23:02:09.000Z
|
2021-06-02T23:02:09.000Z
|
pypy/interpreter/test/test_argument.py
|
pymtl/pypy-pymtl3
|
d2f66f87686e48aeb1eecabeaa3de1381a149f2c
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2021-03-30T18:08:41.000Z
|
2021-03-30T18:08:41.000Z
|
pypy/interpreter/test/test_argument.py
|
pymtl/pypy-pymtl3
|
d2f66f87686e48aeb1eecabeaa3de1381a149f2c
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
# -*- coding: utf-8 -*-
import py
import pytest
from pypy.interpreter.argument import (Arguments, ArgErr, ArgErrUnknownKwds,
ArgErrMultipleValues, ArgErrMissing, ArgErrTooMany, ArgErrTooManyMethod,
ArgErrPosonlyAsKwds)
from pypy.interpreter.signature import Signature
from pypy.interpreter.error import OperationError
class TestSignature(object):
def test_helpers(self):
sig = Signature(["a", "b", "c"], None, None)
assert sig.num_argnames() == 3
assert not sig.has_vararg()
assert not sig.has_kwarg()
assert sig.scope_length() == 3
assert sig.getallvarnames() == ["a", "b", "c"]
sig = Signature(["a", "b", "c"], "c", None)
assert sig.num_argnames() == 3
assert sig.has_vararg()
assert not sig.has_kwarg()
assert sig.scope_length() == 4
assert sig.getallvarnames() == ["a", "b", "c", "c"]
sig = Signature(["a", "b", "c"], None, "c")
assert sig.num_argnames() == 3
assert not sig.has_vararg()
assert sig.has_kwarg()
assert sig.scope_length() == 4
assert sig.getallvarnames() == ["a", "b", "c", "c"]
sig = Signature(["a", "b", "c"], "d", "c", ["kwonly"])
assert sig.num_argnames() == 3
assert sig.has_vararg()
assert sig.has_kwarg()
assert sig.scope_length() == 6
assert sig.getallvarnames() == ["a", "b", "c", "d", "kwonly", "c"]
def test_eq(self):
sig1 = Signature(["a", "b", "c"], "d", "c")
sig2 = Signature(["a", "b", "c"], "d", "c")
assert sig1 == sig2
def test_find_argname(self):
sig = Signature(["a", "b", "c"], None, None, ["kwonly"])
assert sig.find_argname("a") == 0
assert sig.find_argname("b") == 1
assert sig.find_argname("c") == 2
assert sig.find_argname("d") == -1
assert sig.find_argname("kwonly") == 3
def test_posonly(self):
sig = Signature(["a", "b", "c"], posonlyargnames=["x", "y", "z"])
# posonly come first
assert sig.find_argname("x") == 0
assert sig.find_argname("y") == 1
assert sig.find_argname("z") == 2
assert sig.find_argname("a") == 3
assert sig.find_argname("b") == 4
assert sig.find_argname("c") == 5
assert sig.find_argname("d") == -1
class dummy_wrapped_dict(dict):
def __nonzero__(self):
raise NotImplementedError
class kwargsdict(dict):
pass
class DummySpace(object):
class sys:
defaultencoding = 'utf-8'
def newtuple(self, items):
return tuple(items)
def is_true(self, obj):
if isinstance(obj, dummy_wrapped_dict):
return bool(dict(obj))
return bool(obj)
def fixedview(self, it):
return list(it)
def listview(self, it):
return list(it)
def unpackiterable(self, it):
return list(it)
def view_as_kwargs(self, x):
if len(x) == 0:
return [], []
return None, None
def newdict(self, kwargs=False):
if kwargs:
return kwargsdict()
return {}
def newlist(self, l=[]):
return l
def setitem(self, obj, key, value):
obj[key] = value
setitem_str = setitem
def getitem(self, obj, key):
return obj[key]
def wrap(self, obj, lgt=-1):
return obj
newtext = wrap
def text_w(self, s):
return self.utf8_w(s)
def utf8_w(self, s):
return s
def len(self, x):
return len(x)
def int_w(self, x, allow_conversion=True):
return x
def eq_w(self, x, y):
return x == y
def isinstance(self, obj, cls):
return isinstance(obj, cls)
isinstance_w = isinstance
def exception_match(self, w_type1, w_type2):
return issubclass(w_type1, w_type2)
def call_method(self, obj, name, *args):
try:
method = getattr(obj, name)
except AttributeError:
raise OperationError(AttributeError, name)
return method(*args)
def lookup_in_type(self, cls, name):
return getattr(cls, name)
def get_and_call_function(self, w_descr, w_obj, *args):
return w_descr.__get__(w_obj)(*args)
def type(self, obj):
class Type:
def getname(self, space):
return type(obj).__name__
name = type(obj).__name__
return Type()
w_TypeError = TypeError
w_AttributeError = AttributeError
w_UnicodeEncodeError = UnicodeEncodeError
w_dict = dict
w_str = str
class TestArgumentsNormal(object):
def test_create(self):
space = DummySpace()
args_w = []
args = Arguments(space, args_w)
assert args.arguments_w is args_w
assert args.keywords is None
assert args.keywords_w is None
assert args.firstarg() is None
args = Arguments(space, args_w, w_stararg=["*"],
w_starstararg={"k": 1})
assert args.arguments_w == ["*"]
assert args.keywords == ["k"]
assert args.keywords_w == [1]
assert args.firstarg() == "*"
def test_prepend(self):
space = DummySpace()
args = Arguments(space, ["0"])
args1 = args.prepend("thingy")
assert args1 is not args
assert args1.arguments_w == ["thingy", "0"]
assert args1.keywords is args.keywords
assert args1.keywords_w is args.keywords_w
def test_fixedunpacked(self):
space = DummySpace()
args = Arguments(space, [], ["k"], [1])
py.test.raises(ValueError, args.fixedunpack, 1)
args = Arguments(space, ["a", "b"])
py.test.raises(ValueError, args.fixedunpack, 0)
py.test.raises(ValueError, args.fixedunpack, 1)
py.test.raises(ValueError, args.fixedunpack, 3)
py.test.raises(ValueError, args.fixedunpack, 4)
assert args.fixedunpack(2) == ['a', 'b']
def test_match0(self):
space = DummySpace()
args = Arguments(space, [])
l = []
args._match_signature(None, l, Signature([]))
assert len(l) == 0
l = [None, None]
args = Arguments(space, [])
py.test.raises(ArgErr, args._match_signature, None, l, Signature(["a"]))
args = Arguments(space, [])
py.test.raises(ArgErr, args._match_signature, None, l, Signature(["a"], "*"))
args = Arguments(space, [])
l = [None]
args._match_signature(None, l, Signature(["a"]), defaults_w=[1])
assert l == [1]
args = Arguments(space, [])
l = [None]
args._match_signature(None, l, Signature([], "*"))
assert l == [()]
args = Arguments(space, [])
l = [None]
args._match_signature(None, l, Signature([], None, "**"))
assert l == [{}]
args = Arguments(space, [])
l = [None, None]
py.test.raises(ArgErr, args._match_signature, 41, l, Signature([]))
args = Arguments(space, [])
l = [None]
args._match_signature(1, l, Signature(["a"]))
assert l == [1]
args = Arguments(space, [])
l = [None]
args._match_signature(1, l, Signature([], "*"))
assert l == [(1,)]
def test_match4(self):
space = DummySpace()
values = [4, 5, 6, 7]
for havefirstarg in [0, 1]:
for i in range(len(values)-havefirstarg):
arglist = values[havefirstarg:i+havefirstarg]
starargs = tuple(values[i+havefirstarg:])
if havefirstarg:
firstarg = values[0]
else:
firstarg = None
args = Arguments(space, arglist, w_stararg=starargs)
l = [None, None, None, None]
args._match_signature(firstarg, l, Signature(["a", "b", "c", "d"]))
assert l == [4, 5, 6, 7]
args = Arguments(space, arglist, w_stararg=starargs)
l = [None, None, None, None, None, None]
py.test.raises(ArgErr, args._match_signature, firstarg, l, Signature(["a"]))
args = Arguments(space, arglist, w_stararg=starargs)
l = [None, None, None, None, None, None]
py.test.raises(ArgErr, args._match_signature, firstarg, l, Signature(["a", "b", "c", "d", "e"]))
args = Arguments(space, arglist, w_stararg=starargs)
l = [None, None, None, None, None, None]
py.test.raises(ArgErr, args._match_signature, firstarg, l, Signature(["a", "b", "c", "d", "e"], "*"))
l = [None, None, None, None, None]
args = Arguments(space, arglist, w_stararg=starargs)
args._match_signature(firstarg, l, Signature(["a", "b", "c", "d", "e"]), defaults_w=[1])
assert l == [4, 5, 6, 7, 1]
for j in range(len(values)):
l = [None] * (j + 1)
args = Arguments(space, arglist, w_stararg=starargs)
args._match_signature(firstarg, l, Signature(["a", "b", "c", "d", "e"][:j], "*"))
assert l == values[:j] + [tuple(values[j:])]
l = [None, None, None, None, None]
args = Arguments(space, arglist, w_stararg=starargs)
args._match_signature(firstarg, l, Signature(["a", "b", "c", "d"], None, "**"))
assert l == [4, 5, 6, 7, {}]
def test_match_kwds(self):
space = DummySpace()
for i in range(3):
kwds = [("c", 3)]
kwds_w = dict(kwds[:i])
keywords = kwds_w.keys()
keywords_w = kwds_w.values()
w_kwds = dummy_wrapped_dict(kwds[i:])
if i == 2:
w_kwds = None
assert len(keywords) == len(keywords_w)
args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
l = [None, None, None]
args._match_signature(None, l, Signature(["a", "b", "c"]), defaults_w=[4])
assert l == [1, 2, 3]
args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
l = [None, None, None, None]
args._match_signature(None, l, Signature(["a", "b", "b1", "c"]), defaults_w=[4, 5])
assert l == [1, 2, 4, 3]
args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
l = [None, None, None, None]
args._match_signature(None, l, Signature(["a", "b", "c", "d"]), defaults_w=[4, 5])
assert l == [1, 2, 3, 5]
args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
l = [None, None, None, None]
py.test.raises(ArgErr, args._match_signature, None, l,
Signature(["c", "b", "a", "d"]), defaults_w=[4, 5])
args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
l = [None, None, None, None]
py.test.raises(ArgErr, args._match_signature, None, l,
Signature(["a", "b", "c1", "d"]), defaults_w=[4, 5])
args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
l = [None, None, None]
args._match_signature(None, l, Signature(["a", "b"], None, "**"))
assert l == [1, 2, {'c': 3}]
def test_match_kwds2(self):
space = DummySpace()
kwds = [("c", 3), ('d', 4)]
for i in range(4):
kwds_w = dict(kwds[:i])
keywords = kwds_w.keys()
keywords_w = kwds_w.values()
w_kwds = dummy_wrapped_dict(kwds[i:])
if i == 3:
w_kwds = None
args = Arguments(space, [1, 2], keywords, keywords_w, w_starstararg=w_kwds)
l = [None, None, None, None]
args._match_signature(None, l, Signature(["a", "b", "c"], None, "**"))
assert l == [1, 2, 3, {'d': 4}]
def test_match_kwds_creates_kwdict(self):
space = DummySpace()
kwds = [("c", 3), ('d', 4)]
for i in range(4):
kwds_w = dict(kwds[:i])
keywords = kwds_w.keys()
keywords_w = kwds_w.values()
w_kwds = dummy_wrapped_dict(kwds[i:])
if i == 3:
w_kwds = None
args = Arguments(space, [1, 2], keywords, keywords_w, w_starstararg=w_kwds)
l = [None, None, None, None]
args._match_signature(None, l, Signature(["a", "b", "c"], None, "**"))
assert l == [1, 2, 3, {'d': 4}]
assert isinstance(l[-1], kwargsdict)
def test_duplicate_kwds(self):
space = DummySpace()
with pytest.raises(OperationError) as excinfo:
Arguments(space, [], ["a"], [1], w_starstararg={"a": 2}, fnname_parens="foo()")
assert excinfo.value.w_type is TypeError
assert excinfo.value.get_w_value(space) == "foo() got multiple values for keyword argument 'a'"
def test_starstararg_wrong_type(self):
space = DummySpace()
with pytest.raises(OperationError) as excinfo:
Arguments(space, [], ["a"], [1], w_starstararg="hello", fnname_parens="bar()")
assert excinfo.value.w_type is TypeError
assert excinfo.value.get_w_value(space) == "bar() argument after ** must be a mapping, not str"
def test_unwrap_error(self):
space = DummySpace()
valuedummy = object()
def utf8_w(w):
if w is None:
raise OperationError(TypeError, None)
if w is valuedummy:
raise OperationError(ValueError, None)
return bytes(w, 'utf-8')
space.utf8_w = utf8_w
space.text_w = utf8_w
with py.test.raises(OperationError) as excinfo:
Arguments(space, [], ["a"], [1], w_starstararg={None: 1}, fnname_parens="f1()")
assert excinfo.value.w_type is TypeError
assert excinfo.value._w_value is None
with py.test.raises(OperationError) as excinfo:
Arguments(space, [], ["a"], [1], w_starstararg={valuedummy: 1}, fnname_parens="f2()")
assert excinfo.value.w_type is ValueError
assert excinfo.value._w_value is None
def test_blindargs(self):
space = DummySpace()
kwds = [("a", 3), ('b', 4)]
for i in range(4):
kwds_w = dict(kwds[:i])
keywords = kwds_w.keys()
keywords_w = kwds_w.values()
w_kwds = dict(kwds[i:])
if i == 3:
w_kwds = None
args = Arguments(space, [1, 2], keywords[:], keywords_w[:],
w_starstararg=w_kwds)
l = [None, None, None]
args._match_signature(None, l, Signature(["a", "b"], None, "**"), blindargs=2)
assert l == [1, 2, {'a':3, 'b': 4}]
args = Arguments(space, [1, 2], keywords[:], keywords_w[:],
w_starstararg=w_kwds)
l = [None, None, None]
py.test.raises(ArgErrUnknownKwds, args._match_signature, None, l,
Signature(["a", "b"]), blindargs=2)
def test_args_parsing(self):
space = DummySpace()
args = Arguments(space, [])
calls = []
def _match_signature(w_firstarg, scope_w, signature,
defaults_w=None, w_kw_defs=None, blindargs=0):
defaults_w = [] if defaults_w is None else defaults_w
calls.append((w_firstarg, scope_w, signature.argnames, signature.has_vararg(),
signature.has_kwarg(), defaults_w, w_kw_defs, blindargs))
args._match_signature = _match_signature
scope_w = args.parse_obj(None, "foo", Signature(["a", "b"], None, None))
assert len(calls) == 1
assert calls[0] == (None, [None, None], ["a", "b"], False, False,
[], None, 0)
assert calls[0][1] is scope_w
calls = []
scope_w = args.parse_obj(None, "foo", Signature(["a", "b"], "args", None),
blindargs=1)
assert len(calls) == 1
assert calls[0] == (None, [None, None, None], ["a", "b"], True, False,
[], None, 1)
calls = []
scope_w = args.parse_obj(None, "foo", Signature(["a", "b"], "args", "kw"),
defaults_w=['x', 'y'])
assert len(calls) == 1
assert calls[0] == (None, [None, None, None, None], ["a", "b"],
True, True,
["x", "y"], None, 0)
calls = []
scope_w = args.parse_obj("obj", "foo", Signature(["a", "b"], "args", "kw"),
defaults_w=['x', 'y'], blindargs=1)
assert len(calls) == 1
assert calls[0] == ("obj", [None, None, None, None], ["a", "b"],
True, True,
["x", "y"], None, 1)
class FakeArgErr(ArgErr):
def getmsg(self):
return "msg"
def _match_signature(*args):
raise FakeArgErr()
args._match_signature = _match_signature
with pytest.raises(OperationError) as excinfo:
args.parse_obj("obj", "foo",
Signature(["a", "b"], None, None))
assert excinfo.value.w_type is TypeError
assert excinfo.value.get_w_value(space) == "foo() msg"
def test_args_parsing_into_scope(self):
space = DummySpace()
args = Arguments(space, [])
calls = []
def _match_signature(w_firstarg, scope_w, signature,
defaults_w=None, w_kw_defs=None, blindargs=0):
defaults_w = [] if defaults_w is None else defaults_w
calls.append((w_firstarg, scope_w, signature.argnames, signature.has_vararg(),
signature.has_kwarg(), defaults_w, w_kw_defs, blindargs))
args._match_signature = _match_signature
scope_w = [None, None]
args.parse_into_scope(None, scope_w, "foo", Signature(["a", "b"], None, None))
assert len(calls) == 1
assert calls[0] == (None, scope_w, ["a", "b"], False, False,
[], None, 0)
assert calls[0][1] is scope_w
calls = []
scope_w = [None, None, None, None]
args.parse_into_scope(None, scope_w, "foo", Signature(["a", "b"], "args", "kw"),
defaults_w=['x', 'y'])
assert len(calls) == 1
assert calls[0] == (None, scope_w, ["a", "b"],
True, True,
["x", "y"], None, 0)
calls = []
scope_w = [None, None, None, None]
args.parse_into_scope("obj", scope_w, "foo", Signature(["a", "b"],
"args", "kw"),
defaults_w=['x', 'y'])
assert len(calls) == 1
assert calls[0] == ("obj", scope_w, ["a", "b"],
True, True,
["x", "y"], None, 0)
class FakeArgErr(ArgErr):
def getmsg(self):
return "msg"
def _match_signature(*args):
raise FakeArgErr()
args._match_signature = _match_signature
with pytest.raises(OperationError) as excinfo:
args.parse_into_scope("obj", [None, None], "foo",
Signature(["a", "b"], None, None))
assert excinfo.value.w_type is TypeError
assert excinfo.value.get_w_value(space) == "foo() msg"
def test_topacked_frompacked(self):
space = DummySpace()
args = Arguments(space, [1], ['a', 'b'], [2, 3])
w_args, w_kwds = args.topacked()
assert w_args == (1,)
assert w_kwds == {'a': 2, 'b': 3}
args1 = Arguments.frompacked(space, w_args, w_kwds)
assert args.arguments_w == [1]
assert set(args.keywords) == set(['a', 'b'])
assert args.keywords_w[args.keywords.index('a')] == 2
assert args.keywords_w[args.keywords.index('b')] == 3
args = Arguments(space, [1])
w_args, w_kwds = args.topacked()
assert w_args == (1, )
assert not w_kwds
def test_argument_unicode(self):
space = DummySpace()
w_starstar = space.wrap({u'abc': 5})
args = Arguments(space, [], w_starstararg=w_starstar)
l = [None]
args._match_signature(None, l, Signature(['abc']))
assert len(l) == 1
assert l[0] == space.wrap(5)
def test_starstarargs_special(self):
class kwargs(object):
def __init__(self, k, v):
self.k = k
self.v = v
class MyDummySpace(DummySpace):
def view_as_kwargs(self, kw):
if isinstance(kw, kwargs):
return kw.k, kw.v
return None, None
space = MyDummySpace()
for i in range(3):
kwds = [("c", 3)]
kwds_w = dict(kwds[:i])
keywords = kwds_w.keys()
keywords_w = kwds_w.values()
rest = dict(kwds[i:])
w_kwds = kwargs(rest.keys(), rest.values())
if i == 2:
w_kwds = None
assert len(keywords) == len(keywords_w)
args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
l = [None, None, None]
args._match_signature(None, l, Signature(["a", "b", "c"]), defaults_w=[4])
assert l == [1, 2, 3]
args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
l = [None, None, None, None]
args._match_signature(None, l, Signature(["a", "b", "b1", "c"]), defaults_w=[4, 5])
assert l == [1, 2, 4, 3]
args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
l = [None, None, None, None]
args._match_signature(None, l, Signature(["a", "b", "c", "d"]), defaults_w=[4, 5])
assert l == [1, 2, 3, 5]
args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
l = [None, None, None, None]
py.test.raises(ArgErr, args._match_signature, None, l,
Signature(["c", "b", "a", "d"]), defaults_w=[4, 5])
args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
l = [None, None, None, None]
py.test.raises(ArgErr, args._match_signature, None, l,
Signature(["a", "b", "c1", "d"]), defaults_w=[4, 5])
args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
l = [None, None, None]
args._match_signature(None, l, Signature(["a", "b"], None, "**"))
assert l == [1, 2, {'c': 3}]
with pytest.raises(OperationError) as excinfo:
Arguments(space, [], ["a"],
[1], w_starstararg=kwargs(["a"], [2]))
assert excinfo.value.w_type is TypeError
assert excinfo.value.get_w_value(space) == "got multiple values for keyword argument 'a'"
with pytest.raises(OperationError) as excinfo:
Arguments(space, [], ["a"],
[1], w_starstararg=kwargs(["a"], [2]), fnname_parens="foo()")
assert excinfo.value.w_type is TypeError
assert excinfo.value.get_w_value(space) == "foo() got multiple values for keyword argument 'a'"
def test_posonly(self):
space = DummySpace()
sig = Signature(["a", "b", "c"], posonlyargnames=["x", "y", "z"])
args = Arguments(space, [1, 2, 3, 4, 5, 6])
l = [None] * 6
args._match_signature(None, l, sig)
assert l == [1, 2, 3, 4, 5, 6]
args = Arguments(space, [1, 2, 3, 4, 5], ["c"], [6])
l = [None] * 6
args._match_signature(None, l, sig)
assert l == [1, 2, 3, 4, 5, 6]
class TestErrorHandling(object):
def test_missing_args(self):
err = ArgErrMissing(['a'], True)
s = err.getmsg()
assert s == "missing 1 required positional argument: 'a'"
err = ArgErrMissing(['a', 'b'], True)
s = err.getmsg()
assert s == "missing 2 required positional arguments: 'a' and 'b'"
err = ArgErrMissing(['a', 'b', 'c'], True)
s = err.getmsg()
assert s == "missing 3 required positional arguments: 'a', 'b', and 'c'"
err = ArgErrMissing(['a'], False)
s = err.getmsg()
assert s == "missing 1 required keyword-only argument: 'a'"
def test_too_many(self):
sig0 = Signature([], None, None)
err = ArgErrTooMany(sig0, 0, 1, 0)
s = err.getmsg()
assert s == "takes 0 positional arguments but 1 was given"
err = ArgErrTooMany(sig0, 0, 2, 0)
s = err.getmsg()
assert s == "takes 0 positional arguments but 2 were given"
sig1 = Signature(['a'], None, None)
err = ArgErrTooMany(sig1, 0, 2, 0)
s = err.getmsg()
assert s == "takes 1 positional argument but 2 were given"
sig2 = Signature(['a', 'b'], None, None)
err = ArgErrTooMany(sig2, 0, 3, 0)
s = err.getmsg()
assert s == "takes 2 positional arguments but 3 were given"
err = ArgErrTooMany(sig2, 1, 3, 0)
s = err.getmsg()
assert s == "takes from 1 to 2 positional arguments but 3 were given"
err = ArgErrTooMany(sig0, 0, 1, 1)
s = err.getmsg()
assert s == "takes 0 positional arguments but 1 positional argument (and 1 keyword-only argument) were given"
err = ArgErrTooMany(sig0, 0, 2, 1)
s = err.getmsg()
assert s == "takes 0 positional arguments but 2 positional arguments (and 1 keyword-only argument) were given"
err = ArgErrTooMany(sig0, 0, 1, 2)
s = err.getmsg()
assert s == "takes 0 positional arguments but 1 positional argument (and 2 keyword-only arguments) were given"
def test_too_many_method(self):
sig0 = Signature([], None, None)
err = ArgErrTooManyMethod(sig0, 0, 1, 0)
s = err.getmsg()
assert s == "takes 0 positional arguments but 1 was given. Did you forget 'self' in the function definition?"
err = ArgErrTooManyMethod(sig0, 0, 2, 0)
s = err.getmsg()
assert s == "takes 0 positional arguments but 2 were given"
sig1 = Signature(['self'], None, None)
err = ArgErrTooManyMethod(sig1, 0, 2, 0)
s = err.getmsg()
assert s == "takes 1 positional argument but 2 were given"
sig1 = Signature(['a'], None, None)
err = ArgErrTooManyMethod(sig1, 0, 2, 0)
s = err.getmsg()
assert s == "takes 1 positional argument but 2 were given. Did you forget 'self' in the function definition?"
sig2 = Signature(['a', 'b'], None, None)
err = ArgErrTooManyMethod(sig2, 0, 3, 0)
s = err.getmsg()
assert s == "takes 2 positional arguments but 3 were given. Did you forget 'self' in the function definition?"
err = ArgErrTooManyMethod(sig2, 1, 3, 0)
s = err.getmsg()
assert s == "takes from 1 to 2 positional arguments but 3 were given. Did you forget 'self' in the function definition?"
err = ArgErrTooManyMethod(sig0, 0, 1, 1)
s = err.getmsg()
assert s == "takes 0 positional arguments but 1 positional argument (and 1 keyword-only argument) were given. Did you forget 'self' in the function definition?"
err = ArgErrTooManyMethod(sig0, 0, 2, 1)
s = err.getmsg()
assert s == "takes 0 positional arguments but 2 positional arguments (and 1 keyword-only argument) were given"
err = ArgErrTooManyMethod(sig0, 0, 1, 2)
s = err.getmsg()
assert s == "takes 0 positional arguments but 1 positional argument (and 2 keyword-only arguments) were given. Did you forget 'self' in the function definition?"
def test_bad_type_for_star(self):
space = self.space
with pytest.raises(OperationError) as excinfo:
Arguments(space, [], w_stararg=space.wrap(42), fnname_parens="f1()")
msg = space.text_w(excinfo.value.get_w_value(space))
assert msg == "f1() argument after * must be an iterable, not int"
with pytest.raises(OperationError) as excinfo:
Arguments(space, [], w_starstararg=space.wrap(42), fnname_parens="f2()")
msg = space.text_w(excinfo.value.get_w_value(space))
assert msg == "f2() argument after ** must be a mapping, not int"
def test_dont_count_default_arguments(self):
space = self.space
msg = space.unwrap(space.appexec([], """():
def f1(*, c): pass
try:
f1(4)
except TypeError as e:
return str(e)
"""))
assert msg == 'f1() takes 0 positional arguments but 1 was given'
#
msg = space.unwrap(space.appexec([], """():
def f1(*, c=8): pass
try:
f1(4)
except TypeError as e:
return str(e)
"""))
assert msg == 'f1() takes 0 positional arguments but 1 was given'
#
msg = space.unwrap(space.appexec([], """():
def f1(a, b, *, c): pass
try:
f1(4, 5, 6)
except TypeError as e:
return str(e)
"""))
assert msg == 'f1() takes 2 positional arguments but 3 were given'
#
msg = space.unwrap(space.appexec([], """():
def f1(*, c): pass
try:
f1(6, c=7)
except TypeError as e:
return str(e)
"""))
assert msg == 'f1() takes 0 positional arguments but 1 positional argument (and 1 keyword-only argument) were given'
#
msg = space.unwrap(space.appexec([], """():
def f1(*, c, d=8, e=9): pass
try:
f1(6, 2, c=7, d=8)
except TypeError as e:
return str(e)
"""))
assert msg == 'f1() takes 0 positional arguments but 2 positional arguments (and 2 keyword-only arguments) were given'
#
msg = space.unwrap(space.appexec([], """():
def f1(*, c, d=8, e=9, **kwds): pass
try:
f1(6, 2, c=7, d=8, morestuff=9)
except TypeError as e:
return str(e)
"""))
assert msg == 'f1() takes 0 positional arguments but 2 positional arguments (and 2 keyword-only arguments) were given'
def test_unknown_keywords(self):
space = DummySpace()
err = ArgErrUnknownKwds(space, 1, ['a', 'b'], [0], None)
s = err.getmsg()
assert s == "got an unexpected keyword argument 'b'"
err = ArgErrUnknownKwds(space, 1, ['a', 'b'], [1], None)
s = err.getmsg()
assert s == "got an unexpected keyword argument 'a'"
err = ArgErrUnknownKwds(space, 2, ['a', 'b', 'c'],
[0], None)
s = err.getmsg()
assert s == "got 2 unexpected keyword arguments"
def test_unknown_unicode_keyword(self):
class DummySpaceUnicode(DummySpace):
class sys:
defaultencoding = 'utf-8'
space = DummySpaceUnicode()
err = ArgErrUnknownKwds(space, 1, ['a', None, 'b', 'c'],
[0, 3, 2],
[unichr(0x1234), u'b', u'c'])
s = err.getmsg()
assert s == "got an unexpected keyword argument '%s'" % unichr(0x1234).encode('utf-8')
def test_multiple_values(self):
err = ArgErrMultipleValues('bla')
s = err.getmsg()
assert s == "got multiple values for argument 'bla'"
def test_posonly_error(self):
space = DummySpace()
sig = Signature([], posonlyargnames=["x", "y", "z"])
with pytest.raises(ArgErrPosonlyAsKwds) as info:
args = Arguments(space, [1, 2, 3, 4, 5], ["x"], [6])
l = [None] * 6
args._match_signature(None, l, sig)
assert info.value.getmsg() == "got an unexpected keyword argument 'x'"
class AppTestArgument:
def test_error_message(self):
exc = raises(TypeError, (lambda a, b=2: 0), b=3)
assert str(exc.value) == "<lambda>() missing 1 required positional argument: 'a'"
exc = raises(TypeError, (lambda: 0), b=3)
assert str(exc.value) == "<lambda>() got an unexpected keyword argument 'b'"
exc = raises(TypeError, (lambda a, b: 0), 1, 2, 3, a=1)
assert str(exc.value) == "<lambda>() got multiple values for argument 'a'"
exc = raises(TypeError, (lambda a, b=1: 0), 1, 2, 3, a=1)
assert str(exc.value) == "<lambda>() got multiple values for argument 'a'"
exc = raises(TypeError, (lambda a, **kw: 0), 1, 2, 3)
assert str(exc.value) == "<lambda>() takes 1 positional argument but 3 were given"
exc = raises(TypeError, (lambda a, b=1, **kw: 0), 1, 2, 3)
assert str(exc.value) == "<lambda>() takes from 1 to 2 positional arguments but 3 were given"
exc = raises(TypeError, (lambda a, b, c=3, **kw: 0), 1)
assert str(exc.value) == "<lambda>() missing 1 required positional argument: 'b'"
exc = raises(TypeError, (lambda a, b, **kw: 0), 1)
assert str(exc.value) == "<lambda>() missing 1 required positional argument: 'b'"
exc = raises(TypeError, (lambda a, b, c=3, **kw: 0), a=1)
assert str(exc.value) == "<lambda>() missing 1 required positional argument: 'b'"
exc = raises(TypeError, (lambda a, b, **kw: 0), a=1)
assert str(exc.value) == "<lambda>() missing 1 required positional argument: 'b'"
exc = raises(TypeError, '(lambda *, a: 0)()')
assert str(exc.value) == "<lambda>() missing 1 required keyword-only argument: 'a'"
exc = raises(TypeError, '(lambda *, a=1, b: 0)(a=1)')
assert str(exc.value) == "<lambda>() missing 1 required keyword-only argument: 'b'"
exc = raises(TypeError, '(lambda *, kw: 0)(1, kw=3)')
assert str(exc.value) == "<lambda>() takes 0 positional arguments but 1 positional argument (and 1 keyword-only argument) were given"
@pytest.mark.pypy_only
def test_error_message_method(self):
class A(object):
def f0():
pass
def f1(a):
pass
exc = raises(TypeError, lambda : A().f0())
assert exc.value.args[0] == "f0() takes 0 positional arguments but 1 was given. Did you forget 'self' in the function definition?"
exc = raises(TypeError, lambda : A().f1(1))
assert exc.value.args[0] == "f1() takes 1 positional argument but 2 were given. Did you forget 'self' in the function definition?"
def f0():
pass
exc = raises(TypeError, f0, 1)
# does not contain the warning about missing self
assert exc.value.args[0] == "f0() takes 0 positional arguments but 1 was given"
def test_error_message_module_function(self):
import operator # use countOf because it's defined at applevel
exc = raises(TypeError, lambda : operator.countOf(1, 2, 3))
# does not contain the warning
# 'Did you forget 'self' in the function definition?'
assert 'self' not in str(exc.value)
@pytest.mark.pypy_only
def test_error_message_bound_method(self):
class A(object):
def f0():
pass
def f1(a):
pass
m0 = A().f0
exc = raises(TypeError, lambda : m0())
assert exc.value.args[0] == "f0() takes 0 positional arguments but 1 was given. Did you forget 'self' in the function definition?"
m1 = A().f1
exc = raises(TypeError, lambda : m1(1))
assert exc.value.args[0] == "f1() takes 1 positional argument but 2 were given. Did you forget 'self' in the function definition?"
def test_unicode_keywords(self):
def f(**kwargs):
assert kwargs["美"] == 42
f(**{"美" : 42})
#
def f(x): pass
e = raises(TypeError, "f(**{'ü' : 19})")
assert e.value.args[0] == "f() got an unexpected keyword argument 'ü'"
def test_starstarargs_dict_subclass(self):
def f(**kwargs):
return kwargs
class DictSubclass(dict):
def __iter__(self):
yield 'x'
# CPython, as an optimization, looks directly into dict internals when
# passing one via **kwargs.
x =DictSubclass()
assert f(**x) == {}
x['a'] = 1
assert f(**x) == {'a': 1}
def test_starstarargs_module_dict(self):
def f(**kwargs):
return kwargs
assert f(**globals()) == globals()
def test_cpython_issue4806(self):
def broken():
raise TypeError("myerror")
def g(*args):
pass
try:
g(*(broken() for i in range(1)))
except TypeError as e:
assert str(e) == "myerror"
else:
assert False, "Expected TypeError"
def test_call_iter_dont_eat_typeerror(self):
# same as test_cpython_issue4806, not only for generators
# (only for 3.x, on CPython 2.7 this case still eats the
# TypeError and replaces it with "argument after * ...")
class X:
def __iter__(self):
raise TypeError("myerror")
def f():
pass
e = raises(TypeError, "f(*42)")
assert str(e.value).endswith(
"f() argument after * must be an iterable, not int")
e = raises(TypeError, "f(*X())")
assert str(e.value) == "myerror"
def test_keyword_arg_after_keywords_dict(self):
"""
def f(x, y):
return (x, y)
assert f(**{'x': 5}, y=6) == (5, 6)
"""
def test_error_message_kwargs(self):
def f(x, y):
pass
e = raises(TypeError, "f(y=2, **{3: 5}, x=6)")
assert "f() keywords must be strings" in str(e.value)
e = raises(TypeError, "f(y=2, **{'x': 5}, x=6)")
# CPython figures out the name here, by peeking around in the stack in
# BUILD_MAP_UNPACK_WITH_CALL. we don't, too messy
assert "got multiple values for keyword argument 'x'" in str(e.value)
def test_dict_subclass_with_weird_getitem(self):
# issue 2435: bug-to-bug compatibility with cpython. for a subclass of
# dict, just ignore the __getitem__ and behave like ext_do_call in ceval.c
# which just uses the underlying dict
class d(dict):
def __getitem__(self, key):
return key
for key in ["foo", u"foo"]:
q = d()
q[key] = "bar"
def test(**kwargs):
return kwargs
assert test(**q) == {"foo": "bar"}
def test_issue2996_1(self): """
class Class:
def method(*args, a_parameter=None, **kwargs):
pass
Class().method(**{'a_parameter': 4})
"""
def test_issue2996_2(self): """
class Foo:
def methhh(*args, offset=42):
return args, offset
foo = Foo()
assert foo.methhh(**{}) == ((foo,), 42)
"""
| 40.141988
| 169
| 0.537494
|
d02b913a2717ccb207fe58f2771c4f8c0680c4c5
| 893
|
py
|
Python
|
src/data_wash.py
|
topologyYDM/dectree_QSO
|
3008ad7eb8ac9ba13b7182cb063aa41af05818fe
|
[
"MIT"
] | 3
|
2019-03-05T14:02:21.000Z
|
2019-03-30T02:13:40.000Z
|
src/data_wash.py
|
topologyYDM/dectree_QSO
|
3008ad7eb8ac9ba13b7182cb063aa41af05818fe
|
[
"MIT"
] | 15
|
2019-03-25T02:17:50.000Z
|
2019-04-12T03:41:29.000Z
|
src/data_wash.py
|
topologyYDM/dectree_QSO
|
3008ad7eb8ac9ba13b7182cb063aa41af05818fe
|
[
"MIT"
] | 1
|
2019-03-03T08:13:12.000Z
|
2019-03-03T08:13:12.000Z
|
# -*- coding: utf-8 -*-
import sys
import numpy as np
filename = sys.argv[1]
colors = ['r', 'i', 'u', 'z', 'g']
with open('./data/light_curve/' + filename) as f:
lines = f.readlines()
atbss = []
atbs_r = []
atbs_i = []
atbs_u = []
atbs_z = []
atbs_g = []
atbss.append(atbs_r)
atbss.append(atbs_i)
atbss.append(atbs_u)
atbss.append(atbs_z)
atbss.append(atbs_g)
count = 0
for line in lines:
atb=[]
line = line.split()
try:
atb.append(float(line[0]))
atb.append(float(line[2]))
atb.append(float(line[3]))
except:
count = count + 1
if count == 5:
count = 0
continue
atbss[count].append(atb)
count = count + 1
if count == 5:
count = 0
for j in range(0, 5):
color = colors[j]
with open(filename + str(color) + '.dat', 'w+') as ff:
for i in range(len(atbss[j])):
print >>ff, atbss[j][i][0], atbss[j][i][1], atbss[j][i][2]
| 15.135593
| 61
| 0.578947
|
b4f3a5541c00e8b284013b5335700d9588d8e985
| 4,156
|
py
|
Python
|
phuber/network.py
|
dmizr/phuber
|
3b70eadd9bd1420047ada743ff5604eda48d63ac
|
[
"MIT"
] | 12
|
2020-12-17T14:54:03.000Z
|
2021-12-13T21:30:13.000Z
|
phuber/network.py
|
dmizr/phuber
|
3b70eadd9bd1420047ada743ff5604eda48d63ac
|
[
"MIT"
] | 5
|
2020-12-30T21:18:05.000Z
|
2021-04-16T21:27:35.000Z
|
phuber/network.py
|
dmizr/phuber
|
3b70eadd9bd1420047ada743ff5604eda48d63ac
|
[
"MIT"
] | 7
|
2021-04-15T02:13:26.000Z
|
2021-12-01T21:20:59.000Z
|
from typing import Any, Callable, List, Optional, Type, Union
import torch
import torch.nn as nn
import torchvision
from torchvision.models.resnet import BasicBlock, Bottleneck
from phuber.utils import truncated_normal
class LeNet(nn.Module):
"""LeNet-5 from `"Gradient-Based Learning Applied To Document Recognition"
<http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf>`_
"""
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 6, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.relu = nn.ReLU()
self.avgpool = nn.AvgPool2d(kernel_size=2)
self._init_weights()
# ref: https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778
def _init_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
# truncated normal distribution with std 0.1 (truncate > 2 x std)
# https://www.tensorflow.org/api_docs/python/tf/random/truncated_normal
weights = truncated_normal(list(m.weight.shape), threshold=0.1 * 2)
weights = torch.from_numpy(weights)
m.weight.data.copy_(weights)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.avgpool(self.relu(self.conv1(x)))
x = self.avgpool(self.relu(self.conv2(x)))
x = torch.flatten(x, start_dim=1)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
class ResNet(torchvision.models.ResNet):
"""Modifies `torchvision's ResNet implementation
<https://pytorch.org/docs/stable/_modules/torchvision/models/resnet.html>`_
to make it suitable for CIFAR 10/100.
Removes or replaces some down-sampling layers to increase the size of the feature
maps, in order to make it suitable for classification tasks on datasets with smaller
images such as CIFAR 10/100.
This network architecture is similar to the one used in
`"Improved Regularization of Convolutional Neural Networks with Cutout"
<https://arxiv.org/pdf/1708.04552.pdf>`_
(code `here <https://github.com/uoguelph-mlrg/Cutout>`_) and in the popular
`pytorch-cifar repository <https://github.com/kuangliu/pytorch-cifar>`_.
"""
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 100,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__(
block,
layers,
num_classes,
zero_init_residual,
groups,
width_per_group,
replace_stride_with_dilation,
norm_layer,
)
# CIFAR: kernel_size 7 -> 3, stride 2 -> 1, padding 3->1
self.conv1_planes = 64
self.conv1 = nn.Conv2d(
3, self.conv1_planes, kernel_size=3, stride=1, padding=1, bias=False
)
# Remove maxpool layer from forward by changing it into an identity layer
self.maxpool = nn.Identity()
def resnet18(**kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition"
<https://arxiv.org/pdf/1512.03385.pdf>`_, modified for CIFAR-10/100 images.
Args:
**kwargs: Keyword arguments, notably num_classes for the number of classes
"""
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet50(**kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition"
<https://arxiv.org/pdf/1512.03385.pdf>`_ modified for CIFAR-10/100 images.
Args:
**kwargs: Keyword arguments, notably num_classes for the number of classes
"""
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
| 36.13913
| 88
| 0.637392
|
0b4adb3fcb4db6aa0ef40651928c16aa0ead3898
| 1,142
|
py
|
Python
|
test/make_test_suite_json.py
|
tzlaine/yaml
|
958716b79697d4501f57d10eca8d3024047f5e2d
|
[
"BSL-1.0"
] | null | null | null |
test/make_test_suite_json.py
|
tzlaine/yaml
|
958716b79697d4501f57d10eca8d3024047f5e2d
|
[
"BSL-1.0"
] | 2
|
2019-10-31T02:01:21.000Z
|
2019-10-31T02:13:51.000Z
|
test/make_test_suite_json.py
|
tzlaine/yaml
|
958716b79697d4501f57d10eca8d3024047f5e2d
|
[
"BSL-1.0"
] | null | null | null |
#!/usr/bin/env python
import argparse
import os
import shutil
import sys
parser = argparse.ArgumentParser(description='Create an index of the .json test files from JSONTestSuite.')
parser.add_argument('--json-test-suite-path', '-y', type=str, required=True,
help='the path to JSONTestSuite')
parser.add_argument('--output-dir', '-o', type=str, required=True,
help='the directory into which to write the json file index')
args = parser.parse_args()
in_path = os.path.join(args.json_test_suite_path, 'test_parsing')
all_files = sorted([f for f in os.listdir(in_path) if os.path.isfile(os.path.join(in_path, f))])
json_files = [f for f in all_files if f.endswith('.json')]
tests = [f for f in all_files if not f.startswith('n_')]
error_tests = [f for f in all_files if f.startswith('n_')]
index_file = open(os.path.join(args.output_dir, 'json_index.cmake'), 'w')
index_file.write('set(json_file_index\n\n')
index_file.write('\n'.join(tests) + '\n')
index_file.write('\n)\n')
index_file.write('set(error_json_file_index\n\n')
index_file.write('\n'.join(error_tests) + '\n')
index_file.write('\n)\n')
| 38.066667
| 107
| 0.707531
|
b6f0eabc8f665c3e230038934f1b11c9a2a45ed6
| 11,251
|
py
|
Python
|
Code/src/models/optim/DSAD_trainer.py
|
antoine-spahr/X-ray-Anomaly-Detection
|
850b6195d6290a50eee865b4d5a66f5db5260e8f
|
[
"MIT"
] | 2
|
2020-10-12T08:25:13.000Z
|
2021-08-16T08:43:43.000Z
|
Code/src/models/optim/DSAD_trainer.py
|
antoine-spahr/X-ray-Anomaly-Detection
|
850b6195d6290a50eee865b4d5a66f5db5260e8f
|
[
"MIT"
] | null | null | null |
Code/src/models/optim/DSAD_trainer.py
|
antoine-spahr/X-ray-Anomaly-Detection
|
850b6195d6290a50eee865b4d5a66f5db5260e8f
|
[
"MIT"
] | 1
|
2020-06-17T07:40:17.000Z
|
2020-06-17T07:40:17.000Z
|
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import time
import logging
from sklearn.metrics import roc_auc_score
from sklearn.manifold import TSNE
from src.models.optim.CustomLosses import DeepSADLoss
from src.utils.utils import print_progessbar
class DSAD_trainer:
"""
Trainer for the DSAD.
"""
def __init__(self, c, eta, lr=1e-4, n_epoch=150, lr_milestone=(), batch_size=64,
weight_decay=1e-6, device='cuda', n_job_dataloader=0, print_batch_progress=False):
"""
Constructor of the DeepSAD trainer.
----------
INPUT
|---- c (torch.Tensor) the hypersphere center.
|---- eta (float) the deep SAD parameter weighting the importance of
| unkonwn/known sample in learning.
|---- lr (float) the learning rate.
|---- n_epoch (int) the number of epoch.
|---- lr_milestone (tuple) the lr update steps.
|---- batch_size (int) the batch_size to use.
|---- weight_decay (float) the weight_decay for the Adam optimizer.
|---- device (str) the device to work on ('cpu' or 'cuda').
|---- n_jobs_dataloader (int) number of workers for the dataloader.
|---- print_batch_progress (bool) whether to dispay the batch
| progress bar.
OUTPUT
|---- None
"""
# learning parameters
self.lr = lr
self.n_epoch = n_epoch
self.lr_milestone = lr_milestone
self.batch_size = batch_size
self.weight_decay = weight_decay
self.device = device
self.n_job_dataloader = n_job_dataloader
self.print_batch_progress = print_batch_progress
# DeepSAD parameters
self.c = torch.tensor(c, device=self.device) if c is not None else None
self.eta = eta
# Optimization parameters
self.eps = 1e-6
# Results
self.train_time = None
self.train_loss = None
self.eval_auc = None
self.eval_time = None
self.eval_scores = None
def train(self, dataset, net, valid_dataset=None):
"""
Train the DeepSAD network on the provided dataset.
----------
INPUT
|---- dataset (torch.utils.data.Dataset) the dataset on which the
| network is trained. It must return an image and
| semi-supervized labels.
|---- net (nn.Module) The DeepSAD to train.
|---- valid_dataset (torch.utils.data.Dataset) the dataset on which
| to validate the network at each epoch. Not validated if
| not provided.
OUTPUT
|---- net (nn.Module) The trained DeepSAD.
"""
logger = logging.getLogger()
# make the train dataloader
train_loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, \
shuffle=True, num_workers=self.n_job_dataloader)
# put net to device
net = net.to(self.device)
# initialize hypersphere center
if self.c is None:
logger.info(' Initializing the hypersphere center.')
self.c = self.initialize_hypersphere_center(train_loader, net)
logger.info(' Center succesfully initialized.')
# define loss criterion
loss_fn = DeepSADLoss(self.c, self.eta, eps=self.eps)
# define optimizer
optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
# define scheduler
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestone, gamma=0.1)
# Start training
logger.info('Start Training the DeepSAD.')
start_time = time.time()
epoch_loss_list = []
n_batch = len(train_loader)
for epoch in range(self.n_epoch):
net.train()
epoch_loss = 0.0
epoch_start_time = time.time()
for b, data in enumerate(train_loader):
# get input and semi-supervized labels
input, _, mask, semi_label, _ = data
# put them to device
input = input.to(self.device).float().requires_grad_(True)
mask = mask.to(self.device)
semi_label = semi_label.to(self.device)
# mask input
input = input * mask
# zero the network's gradients
optimizer.zero_grad()
# optimize by backpropagation
_, embed = net(input)
loss = loss_fn(embed, semi_label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
if self.print_batch_progress:
print_progessbar(b, n_batch, Name='\t\t Train Batch', Size=40, erase=True)
# validate if required
valid_auc = ''
if valid_dataset:
auc = self.evaluate(net, valid_dataset, return_auc=True, print_to_logger=False, save_tSNE=False)
valid_auc = f' Valid AUC {auc:.3%} |'
# log the epoch statistics
logger.info(f'----| Epoch: {epoch + 1:03}/{self.n_epoch:03} '
f'| Train Time: {time.time() - epoch_start_time:.3f} [s] '
f'| Train Loss: {epoch_loss / n_batch:.6f} |' + valid_auc)
epoch_loss_list.append([epoch+1, epoch_loss/n_batch])
# update scheduler
scheduler.step()
if epoch + 1 in self.lr_milestone:
logger.info(f'---- LR Scheduler : new learning rate {scheduler.get_lr()[0]:g}')
# End training
self.train_loss = epoch_loss_list
self.train_time = time.time() - start_time
logger.info(f'---- Finished Training DSAD in {self.train_time:.3f} [s]')
return net
def evaluate(self, net, dataset, return_auc=False, print_to_logger=True, save_tSNE=True):
"""
Evaluate the DSAD network on the provided dataset.
----------
INPUT
|---- net (nn.Module) The DeepSAD network to validate.
|---- dataset (torch.utils.data.Dataset) the dataset on which the
| network is evaluated.
|---- net (nn.Module) The DeepSAD network to validate.
|---- return_auc (bool) whether to return the computed auc or not.
|---- print_to_logger (bool) whether to print in the logger.
|---- save_tSNE (bool) whether to save a 2D t-SNE representation of
| the embeded data points
OUTPUT
|---- None
"""
if print_to_logger:
logger = logging.getLogger()
# make dataloader
loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size,
shuffle=True, num_workers=self.n_job_dataloader)
# put net on device
net = net.to(self.device)
# Evaluating
if print_to_logger:
logger.info('Start Evaluating the DSAD.')
start_time = time.time()
idx_label_score = []
net.eval()
with torch.no_grad():
for b, data in enumerate(loader):
# get data on device
input, label, mask, semi_label, idx = data
input = input.to(self.device).float()
label = label.to(self.device)
mask = mask.to(self.device)
semi_label = semi_label.to(self.device)
idx = idx.to(self.device)
# mask input
input = input * mask
# Embed input and compute anomaly score
_, embed = net(input)
score = torch.norm(self.c - embed, p=2, dim=1)
# append idx, scores, label and embeding
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
label.cpu().data.numpy().tolist(),
score.cpu().data.numpy().tolist(),
embed.cpu().data.numpy().tolist()))
if self.print_batch_progress:
print_progessbar(b, len(loader), Name='\t\t Evaluation Batch', Size=40, erase=True)
# compute AUCs
index, label, score, embed = zip(*idx_label_score)
label, score = np.array(label), np.array(score)
auc = roc_auc_score(label, score)
if save_tSNE:
embed = np.array(embed)
embed = TSNE(n_components=2).fit_transform(embed)
idx_label_score = list(zip(index, label.tolist(), score.tolist(), embed.tolist()))
self.eval_time = time.time() - start_time
self.eval_scores = idx_label_score
self.eval_auc = auc
if print_to_logger:
logger.info(f'Evaluation Time : {self.eval_time}')
logger.info(f'Evaluation AUC : {self.eval_auc:.3%}')
logger.info('Finished Evaluating the DSAD.')
if return_auc:
return auc
def initialize_hypersphere_center(self, loader, net, eps=0.1):
"""
Initialize the hypersphere center as the mean output of the network over
one forward pass.
----------
INPUT
|---- loader (torch.utils.data.DataLoader) the loader of the data.
|---- net (nn.Module) the DeepSAD network. The output must be a vector
| embedding of the input.
|---- eps (float) the epsilon representing the minimum value of the
| component of the center.
OUTPUT
|---- c (torch.Tensor) the initialized center.
"""
n_sample = 0
net.eval()
with torch.no_grad():
# get embdedding dimension with one forward pass of one batch
sample = next(iter(loader))[0].float()
embed_dim = net(sample.to(self.device))[1].shape[1]
# initialize c
c = torch.zeros(embed_dim, device=self.device)
# get the output of all samples and accumulate them
for b, data in enumerate(loader):
input, _, mask, semi_label, _ = data
input = input.to(self.device).float()
mask = mask.to(self.device)
semi_label = semi_label.to(self.device)
#mask input and take normal samples only
input = (input * mask)[semi_label != -1]
_, embed = net(input)
n_sample += embed.shape[0]
c += torch.sum(embed, dim=0)
if self.print_batch_progress:
print_progessbar(b, len(loader), Name='\t\t Center Initialization Batch', Size=40, erase=True)
# take the mean of accumulated c
c /= n_sample
# check if c_i are epsilon too close to zero to avoid them to be rivialy matched to zero
c[(torch.abs(c) < eps) & (c < 0)] = -eps
c[(torch.abs(c) < eps) & (c > 0)] = eps
return c
| 39.202091
| 114
| 0.552218
|
3b519040719ec0654c62e6fdba9805c035ca15e6
| 3,733
|
py
|
Python
|
floodsystem/datafetcher.py
|
PamposhMam/silver-pancakce
|
bf67429289ece1c710f32f8f21d691867c8d0a7b
|
[
"MIT"
] | null | null | null |
floodsystem/datafetcher.py
|
PamposhMam/silver-pancakce
|
bf67429289ece1c710f32f8f21d691867c8d0a7b
|
[
"MIT"
] | null | null | null |
floodsystem/datafetcher.py
|
PamposhMam/silver-pancakce
|
bf67429289ece1c710f32f8f21d691867c8d0a7b
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2018 Garth N. Wells
#
# SPDX-License-Identifier: MIT
"""This module provides functionality for retrieving real-time and
latest time history level data
"""
import datetime
import json
import os
import dateutil.parser
import requests
def fetch(url):
"""Fetch data from url and return fetched JSON object"""
r = requests.get(url)
data = r.json()
return data
def dump(data, filename):
"""Save JSON object to file"""
f = open(filename, 'w')
data = json.dump(data, f)
f.close()
def load(filename):
"""Load JSON object from file"""
f = open(filename, 'r')
data = json.load(f)
f.close()
return data
def fetch_station_data(use_cache=True):
"""Fetch data from Environment agency for all active river level
monitoring stations via a REST API and return retrieved data as a
JSON object.
Fetched data is dumped to a cache file so on subsequent call it can
optionally be retrieved from the cache file. This is faster than
retrieval over the Internet and avoids excessive calls to the
Environment Agency service.
"""
# URL for retrieving data for active stations with river level
# monitoring (see
# http://environment.data.gov.uk/flood-monitoring/doc/reference)
url = "http://environment.data.gov.uk/flood-monitoring/id/stations?status=Active¶meter=level&qualifier=Stage&_view=full" # noqa
sub_dir = 'cache'
try:
os.makedirs(sub_dir)
except FileExistsError:
pass
cache_file = os.path.join(sub_dir, 'station_data.json')
# Attempt to load station data from file, otherwise fetch over
# Internet
if use_cache:
try:
# Attempt to load from file
data = load(cache_file)
except FileNotFoundError:
# If load from file fails, fetch and dump to file
data = fetch(url)
dump(data, cache_file)
else:
# Fetch and dump to file
data = fetch(url)
dump(data, cache_file)
return data
def fetch_latest_water_level_data(use_cache=False):
"""Fetch latest levels from all 'measures'. Returns JSON object"""
# URL for retrieving data
url = "http://environment.data.gov.uk/flood-monitoring/id/measures?parameter=level&qualifier=Stage&qualifier=level" # noqa
sub_dir = 'cache'
try:
os.makedirs(sub_dir)
except FileExistsError:
pass
cache_file = os.path.join(sub_dir, 'level_data.json')
# Attempt to load level data from file, otherwise fetch over
# Internet
if use_cache:
try:
# Attempt to load from file
data = load(cache_file)
except FileNotFoundError:
data = fetch(url)
dump(data, cache_file)
else:
data = fetch(url)
dump(data, cache_file)
return data
def fetch_measure_levels(measure_id, dt):
"""Fetch measure levels from latest reading and going back a period
dt. Return list of dates and a list of values.
"""
# Current time (UTC)
now = datetime.datetime.utcnow()
# Start time for data
start = now - dt
# Construct URL for fetching data
url_base = measure_id
url_options = "/readings/?_sorted&since=" + start.isoformat() + 'Z'
url = url_base + url_options
# Fetch data
data = fetch(url)
# Extract dates and levels
dates, levels = [], []
for measure in data['items']:
# Convert date-time string to a datetime object
d = dateutil.parser.parse(measure['dateTime'])
# Append data
dates.append(d)
try:
levels.append(measure['value'])
except:
continue
return dates, levels
| 25.744828
| 136
| 0.643182
|
e09baca08a9e032a8d6795ee8e247fce9c0e35af
| 32,520
|
py
|
Python
|
VideoShow.py
|
leavin296/fresheng
|
617ededc456e765e42c711336e23eb0931466163
|
[
"MIT"
] | null | null | null |
VideoShow.py
|
leavin296/fresheng
|
617ededc456e765e42c711336e23eb0931466163
|
[
"MIT"
] | null | null | null |
VideoShow.py
|
leavin296/fresheng
|
617ededc456e765e42c711336e23eb0931466163
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'VideoShow.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QDir, QUrl, Qt, QThread, QSizeF, QSize, QRectF
from PyQt5.QtWidgets import QWidget, QFileDialog, QSlider, QGraphicsScene, QGraphicsView
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent, QMediaObject
from PyQt5.QtMultimediaWidgets import QVideoWidget, QGraphicsVideoItem
from win32api import GetSystemMetrics
from Slider import *
import pysrt
import time
class VideoShow(QtWidgets.QWidget):
def __init__(self, parent = None):
super().__init__()
self.parent = parent
self.setupUi(self)
self.start()
#self.turnOnOrOffButton(False)
def start(self):
self.toolButton.setEnabled(False)
self.listStartTime = []
self.indexNextSub = 0
self.isEnableSub = False
self.isEnableSub2 = False
self.isOpenedVideo = False
self.loadedPath = ['','',''] # video, sub1, sub2
self.isEngOrVieOrTwiceSubLabel = 3
self.buttonCtrlPressed = False
self.skipTime = 500
# Tắt focus để bắt sự kiện button arrow !!!
def setChildrenFocusPolicy (self, policy):
def recursiveSetChildFocusPolicy (parentQWidget):
for childQWidget in parentQWidget.findChildren(QWidget):
childQWidget.setFocusPolicy(policy)
recursiveSetChildFocusPolicy(childQWidget)
recursiveSetChildFocusPolicy(self)
def keyReleaseEvent(self, event):
if event.key() == Qt.Key_Control:
self.buttonCtrlPressed = False
def keyPressEvent(self, event):
#print(self.buttonCtrlPressed)
self.notificationVideo.setText('')
if event.key() == Qt.Key_Control:
self.buttonCtrlPressed = True
#print(self.isFullScreen())
if event.key() == Qt.Key_Escape and self.isFullScreen():
self.fullScreen()
if event.key() == Qt.Key_Enter:
if self.frameSlider.isHidden():
self.frameSlider.show()
self.frameButton.show()
else:
self.frameSlider.hide()
self.frameButton.hide()
if event.key() == Qt.Key_Space:
self.play()
if event.key() == Qt.Key_Right:
self.forwardVideo()
if event.key() == Qt.Key_Left:
self.backwardVideo()
if event.key() == Qt.Key_Comma and not self.buttonCtrlPressed:
self.backwardSub()
if event.key() == Qt.Key_Period and not self.buttonCtrlPressed:
self.forwardSub()
if event.key() == Qt.Key_E:
self.isEngOrVieOrTwiceSubLabel = 1
self.setLabelVieo(1)
if event.key() == Qt.Key_V:
self.isEngOrVieOrTwiceSubLabel = 2
self.setLabelVieo(2)
if event.key() == Qt.Key_T:
self.isEngOrVieOrTwiceSubLabel = 3
self.setLabelVieo(3)
if event.key() == Qt.Key_S and not self.buttonCtrlPressed: # sử dụng thêm keyReleaseEvent để đánh dấu nút ctrl
if self.labelVideo.isVisible():
self.labelVideo.setVisible(False)
else:
self.labelVideo.setVisible(True)
if event.key() == Qt.Key_F:
self.fullScreen()
modifiers = QtWidgets.QApplication.keyboardModifiers()
if modifiers == QtCore.Qt.ControlModifier and event.key() == Qt.Key_Comma:
print('skip -%d'%self.skipTime)
self.skipSub(-1)
if modifiers == QtCore.Qt.ControlModifier and event.key() == Qt.Key_Period:
print('skip +%d'%self.skipTime)
self.skipSub(1)
if modifiers == QtCore.Qt.ControlModifier and event.key() == Qt.Key_S:
self.saveCoupleFromButtonEvent()
# mouseMouve Event
# def mouseMoveEvent(self, event):
# if self.frameSlider.isHidden():
# self.frameSlider.show()
# print('show frameslider')
# QThread.sleep(3)
# self.frameSlider.hide()
# print('hide frameslider')
def setLabelVieo(self, typeof):
if self.labelVideo.text() != '':
if self.indexNextSub != 0 or self.indexNextSub != len(self.listStartTime) - 1:
text = self.listStartTime[self.indexNextSub-1][0].text()
else:
text = self.listStartTime[self.indexNextSub][0].text()
if typeof == 1 and self.isEnableSub:
self.labelVideo.setText(text.split('\n')[0])
elif typeof ==2 and self.isEnableSub2:
self.labelVideo.setText(text.split('\n')[1])
elif self.isEnableSub and self.isEnableSub2:
self.labelVideo.setText(text)
def turnOnOrOffSubVideoLabel(self):
if self.labelVideo.isVisible():
self.labelVideo.setVisible(False)
else:
self.labelVideo.setVisible(True)
def forwardVideo(self):
pos = self.mediaPlayer.position()
if self.isEnableSub:
if self.indexNextSub >=1:
self.indexNextSub-=1
self.grabIndexCurrent(pos + 10000, len(self.listStartTime)-1, 1)
self.mediaPlayer.setPosition(pos+ 10000)
def backwardVideo(self):
pos = self.mediaPlayer.position()
if self.isEnableSub:
if self.indexNextSub < len(self.listStartTime)-1:
self.indexNextSub+=1
if pos > self.maxTimeVideo:
pos = self.maxTimeVideo
self.grabIndexCurrent(pos - 10000, 0, -1)
self.mediaPlayer.setPosition(pos- 10000)
def forwardSub(self):
if self.isEnableSub:
self.mediaPlayer.setPosition(self.listStartTime[self.indexNextSub][2])
self.setStatusScrollArea()
def backwardSub(self):
if self.isEnableSub and self.indexNextSub >= 2:
self.indexNextSub -=2
self.mediaPlayer.setPosition(self.listStartTime[self.indexNextSub][2])
self.setStatusScrollArea()
def skipSub(self, direction):
# if self.isEnableSub:
# self.setStatusScrollArea()
for item in self.listStartTime:
item[2]+=self.skipTime*direction
item[3]+=self.skipTime*direction
self.notificationVideo.setText('skip %d'%(direction*self.skipTime))
def calculateTime(self, subRipTime):
time = str(subRipTime).replace(',', ':')
time = time.split(':')
time = list(map(lambda x: int(x), time))
time = (time[0]*3600 + time[1]*60 + time[2])*1000 + time[3]
return time
def saveCoupleFromButtonEvent(self):
if self.isEnableSub and self.isEnableSub2:
checkbox = None
if self.indexNextSub !=0:
checkbox = self.listStartTime[self.indexNextSub-1][0]
else:
checkbox = self.listStartTime[self.indexNextSub][0]
if checkbox.isChecked():
checkbox.setChecked(False)
else:
checkbox.setChecked(True)
self.saveCouple(checkbox)
def saveCouple(self, checkbox):
if self.isEnableSub and not self.actionOpenSub2.isEnabled():
text = checkbox.text()
if text.find('\n') == -1:
checkbox.setEnabled(False)
QtWidgets.QMessageBox.information(None, 'WARNING', "Can't save with single sentence")
return
couple = text.split('\n')
print(checkbox.isChecked())
if not checkbox.isChecked():
self.parent.parent.DictDB.delete('ENG', couple[0])
QtWidgets.QMessageBox.information(None, 'Notification', 'Couple was deleted !')
else:
result1 = self.parent.parent.DictDB.selectRowIDByEngOrVie('ENG', couple[0])
result2 = self.parent.parent.DictDB.selectRowIDByEngOrVie('VIE', couple[1])
if len(result1) == 0 and len(result2) == 0:
self.parent.parent.DictDB.insert(couple[0], couple[1], 0)
QtWidgets.QMessageBox.information(None, 'Notification', 'Couple was inserted !')
else:
QtWidgets.QMessageBox.information(None, 'WARNING', 'Couple already exist in revision')
self.buttonCtrlPressed = False
def eventSubCheckBox(self, checkbox):
def event():
self.saveCouple(checkbox)
return event
def eventSubButton(self, time, index):
# time tính theo milisecond
def event():
self.mediaPlayer.setPosition(time)
self.indexNextSub = index
self.setStatusScrollArea()
return event
def enableCheckBoxsOrButtonScroll(self, index):
if self.isEnableSub:
for item in self.listStartTime:
item[index].setEnabled(True)
def loadSubToScroll(self, fileName):
subs = pysrt.open(fileName)
for index, value in enumerate(subs):
horizontalLayoutScroll = QtWidgets.QHBoxLayout()
checkbox = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
checkbox.setObjectName('checkbox%s'%(index))
text = value.text.replace('\n', ' ').replace("\"", "''")
checkbox.setText(text.replace('\xa0', ' '))
checkbox.setFixedHeight(50)
checkbox.setFont(QtGui.QFont('Times New Roman', 10))
checkbox.clicked.connect(self.eventSubCheckBox(checkbox))
checkbox.setFocusPolicy(Qt.NoFocus)
button = QtWidgets.QToolButton(self.scrollAreaWidgetContents)
button.setObjectName('button%s'%(index))
button.setFixedHeight(20)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("assets/access.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
button.setIcon(icon)
button.setIconSize(QtCore.QSize(32, 32))
button.setFocusPolicy(Qt.NoFocus)
checkbox.setEnabled(False)
if not self.isOpenedVideo:
button.setEnabled(False)
startTime = self.calculateTime(value.start)
endTime = self.calculateTime(value.end)
button.clicked.connect(self.eventSubButton(startTime, index))
horizontalLayoutScroll.addWidget(checkbox,0)
horizontalLayoutScroll.addWidget(button,10)
horizontalLayoutScroll.setContentsMargins(0, 0, 50, 0) # left, top, right, bottom
self.verticalScroll.addLayout(horizontalLayoutScroll)
self.listStartTime.append([checkbox, button, startTime, endTime, value.start])
def LoadOtherSub(self, fileName):
subs = pysrt.open(fileName)
if subs[5].start == self.listStartTime[5][2] and subs[10].end == self.listStartTime[10][3]:
for index, item in enumerate(self.listStartTime):
while subs[0].start < item[4]:
del subs[0]
if subs[0].start == item[4]:
text = subs[0].text.replace('\n', ' ').replace('\xa0', ' ').replace("\"", "''")
item[0].setText(item[0].text()+'\n'+ text)
del subs[0]
self.actionOpenSub2.setEnabled(False)
self.enableCheckBoxsOrButtonScroll(0)
self.isEnableSub2 = True
self.loadedPath[2] = fileName
else:
QtWidgets.QMessageBox.information(None, 'WARNING', 'Please choose correctly sub compatible with engsub')
def clearLayout(self, layout):
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
self.clearLayout(item.layout())
def closeEvent(self, event):
reply = QtWidgets.QMessageBox.question(self, 'Message',
"Are you sure to quit?", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
event.accept()
self.mediaPlayer.stop()
else:
event.ignore()
def reloadVideoAndSub(self):
self.clearLayout(self.verticalScroll)
#self.mediaPlayer.setMedia(QMediaPlayer.NoMedia)
#self.mediaPlayer.destroyed()
QMediaPlayer.stop(self.mediaPlayer)
# init
self.start()
self.scrollArea.hide()
self.actionOpenSub1.setEnabled(True)
self.actionOpenSub2.setEnabled(False)
print(len(self.listStartTime))
def turnOnOrOffButton(self, turn):
buttons = [self.toolButton, self.toolButton_3, self.toolButton_4]
map(lambda x: x.setEnabled(turn), buttons)
def openVid(self, fileName):
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(fileName)))
self.toolButton.setEnabled(True)
self.enableCheckBoxsOrButtonScroll(1)
self.isOpenedVideo = True
self.loadedPath[0] = fileName
#self.turnOnOrOffButton(True)
self.play()
def openEngSub(self, fileName):
self.loadSubToScroll(fileName)
self.scrollArea.show()
self.isEnableSub = True
self.actionOpenSub2.setEnabled(True)
self.actionOpenSub1.setEnabled(False)
self.loadedPath[1] = fileName
def openFile(self, title):
def open_file():
choose = -1
dialog = QtWidgets.QFileDialog()
extension = ''
if title == 'Open Video':
extension = 'Videos (*.mkv *.mp4 *.mpg)'
choose = 1
elif title == 'Open Eng Sub':
extension = 'SRT (*.srt)'
choose = 2
elif title == 'Open Viewed Video':
pass
else:
extension = 'SRT (*.srt)'
choose = 3
#dialog.setDefaultSuffix(".srt")
fileName, _ = dialog.getOpenFileName(None, title, QDir.homePath(), extension)
name = fileName.lower()
if choose == 2: # quy ước loại sub sẽ đặt tên ở đuôi !!!
if name[len(name)-7:len(name)-4] != 'eng':
QtWidgets.QMessageBox.information(None, 'WARNING', 'Please choose correctly sub with format *eng.srt')
return
elif choose == 3:
if name[len(name)-7:len(name)-4] != 'vie':
QtWidgets.QMessageBox.information(None, 'WARNING', 'Please choose correctly sub with format *vie.srt')
return
if fileName != '':
#self.loadSubToScroll()
if title == 'Open Video':
self.openVid(fileName)
elif title == 'Open Eng Sub':
self.openEngSub(fileName)
else:
self.LoadOtherSub(fileName)
loaded = True
for p in self.loadedPath:
if p == '':
loaded = False
if loaded:
with open('history.txt', 'w') as file:
file.writelines(','.join(self.loadedPath))
return open_file
def openViewedVideo(self):
with open('history.txt', 'r') as file:
paths = file.readlines()[0].split(',')
self.openVid(paths[0])
self.openEngSub(paths[1])
self.LoadOtherSub(paths[2])
def play(self):
icon = QtGui.QIcon()
if self.mediaPlayer.state() == QMediaPlayer.PlayingState:
self.mediaPlayer.pause()
icon.addPixmap(QtGui.QPixmap("assets/pause.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton.setIcon(icon)
else:
icon.addPixmap(QtGui.QPixmap("assets/play.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton.setIcon(icon)
self.mediaPlayer.play()
def scrollToSub(self, curHeight, maxHeight):
# dùng hệ thức chéo để tính tỉ lệ !
self.maxButtonPos = self.listStartTime[-1][1].y()
pos = int(float(curHeight/self.maxButtonPos)*maxHeight)
#print(pos, curHeight, maxHeight)
self.scrollArea.verticalScrollBar().setValue(pos)
# hàm sự kiện này có đối số position là mặc định !!!
def setTextCheckBox(self, checkbox, color, size, italic_):
checkbox.setStyleSheet('color: %s'%(color))
checkbox.setFont(QtGui.QFont('Times New Roman', size, italic = italic_))
def setStatusScrollArea(self):
timeSub = self.listStartTime[self.indexNextSub]
self.setTextCheckBox(timeSub[0], 'green', 15, True)
if self.oldIndexSub != 0:
self.setTextCheckBox(self.listStartTime[self.oldIndexSub][0], 'black', 10, False)
self.scrollToSub(timeSub[1].y(), self.scrollArea.verticalScrollBar().maximum())
text = timeSub[0].text()
if self.isEngOrVieOrTwiceSubLabel == 1:
self.labelVideo.setText(text.split('\n')[0])
elif self.isEngOrVieOrTwiceSubLabel == 2:
self.labelVideo.setText(text.split('\n')[1])
else:
self.labelVideo.setText(text)
self.oldIndexSub = self.indexNextSub
if self.indexNextSub < len(self.listStartTime)-1:
self.indexNextSub +=1
def durationChanged(self, duration):
self.positionSlider.setRange(0, duration)
self.maxTimeVideo = duration
self.labelDurationTime.setText(self.formatTimeToHMS(duration))
def positionChanged(self, position):
self.positionSlider.setValue(position)
self.labelCurTime.setText(self.formatTimeToHMS(position))
if self.isEnableSub:
start = self.listStartTime[self.indexNextSub][2]
end = self.listStartTime[self.indexNextSub][3]
if position >= start and position <= end:
self.setStatusScrollArea()
def grabIndexCurrent(self, position, stopIndex, step):
indexCur = self.indexNextSub
for i in range(indexCur, stopIndex, step):
start = self.listStartTime[i][2]
end = self.listStartTime[i][3]
if position >= start and position <= end:
self.indexNextSub = i
break
if step == 1 and position >= end and position <= self.listStartTime[i+1][2]:
self.indexNextSub = i +1
break
elif step == -1 and position <= start and position >= self.listStartTime[i-1][2]:
self.indexNextSub = i
break
if indexCur == self.indexNextSub:
if step == 1:
self.indexNextSub = len(self.listStartTime) -1
else:
self.indexNextSub = 1
#print(self.indexNextSub)
def formatTime(self, time):
if len(time) == 1:
return '0' + time
return time
def formatTimeToHMS(self, time):
time = time/1000
hour = int(time/3600)
minute = int((time-3600*hour)/60)
second = int(time-3600*hour - minute*60)
return '%s:%s:%s'%(self.formatTime(str(hour)), self.formatTime(str(minute)), self.formatTime(str(second)))
# sliderMoved có sẵn tham số position cho slider để tùy chỉnh video !!!
def sliderMoved(self, position):
self.mediaPlayer.setPosition(position)
def setSCrollbar(self, number):
def setScroll():
#self.scrollArea.scrollContentsBy(0, number)
#self.scrollAreaWidgetContents.scroll(number)
vbar = self.scrollArea.verticalScrollBar()
#print(vbar.maximum())
vbar.setValue(number)
return setScroll
def fullScreen(self):
icon = QtGui.QIcon()
if self.isFullScreen():
icon.addPixmap(QtGui.QPixmap("assets/fullscreen.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButtonFullScreen.setIcon(icon)
self.verticalLayout.setContentsMargins(9,9,9,9)
if self.isEnableSub:
self.scrollArea.show()
self.menubar.show()
self.frameSlider.show()
self.frameButton.show()
self.showNormal()
self.videoItem.setSize(QSizeF(600, 400))
#self.graphicsView.fitInView(QRectF(-400, -400, 400, 400), Qt.KeepAspectRatio)
#self.graphicsView.setMaximumSize(QSize(600, 400))
#self.graphicsView.setAlignment(Qt.AlignCenter)
self.labelVideo.hide()
else:
icon.addPixmap(QtGui.QPixmap("assets/minimize.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButtonFullScreen.setIcon(icon)
self.verticalLayout.setContentsMargins(0,0,0,0)
if self.isEnableSub:
self.scrollArea.hide()
self.menubar.hide()
self.frameSlider.hide()
self.frameButton.hide()
self.showFullScreen()
self.videoItem.setSize(QSizeF(self.width()-1, self.height()-2))
self.labelVideo.show()
def setupUi(self, Form):
self.sizeMonitor = [GetSystemMetrics(0), GetSystemMetrics(1)]
self.setMouseTracking(True)
self.form = Form
Form.setObjectName("Form")
Form.resize(631, 406)
self.oldHighLine = None
self.oldIndexSub = 0
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.mediaPlayer.positionChanged.connect(self.positionChanged) # bắt frame video thay đổi
self.mediaPlayer.durationChanged.connect(self.durationChanged) # bắt thời lượng video
# Hàm này chỉ chạy có 1 lần !!!
# Create Slider tạo thanh trượt video
#self.positionSlider = QSlider(Qt.Horizontal)
self.positionSlider = Slider(Qt.Horizontal)
# truyền đối số Qt.Horizontal không dùng hàm __init__ nó sẽ kế thừa trực tiếp từ Horizontal
# nếu dùng hàm __init__ nó sẽ ko kế thừa được mà chỉ tham chiếu đến lớp horizontal !!!
self.positionSlider.parent = self
self.positionSlider.setRange(0, 0)
self.positionSlider.sliderMoved.connect(self.sliderMoved)
# set event notify 500 millisecond
QMediaObject.setNotifyInterval(self.mediaPlayer, 500)
# Create Menubar
self.menubar = QtWidgets.QMenuBar(Form)
self.menubar.setFixedHeight(25)
self.menuOpen = QtWidgets.QMenu(self.menubar)
self.menuOpen.setObjectName('menuOpen')
self.menuOption = QtWidgets.QMenu(self.menubar)
self.menuOption.setObjectName('menuOption')
self.actionReload = QtWidgets.QAction(Form)
self.actionReload.setObjectName('actionReload')
self.actionReload.triggered.connect(self.reloadVideoAndSub)
self.menuOption.addAction(self.actionReload)
self.actionOpenVideo = QtWidgets.QAction(Form)
self.actionOpenVideo.setObjectName('openvideo')
self.actionOpenSub1 = QtWidgets.QAction(Form)
self.actionOpenSub1.setObjectName('openSub1')
self.actionOpenSub1.triggered.connect(self.openFile('Open Eng Sub'))
self.actionOpenSub2 = QtWidgets.QAction(Form)
self.actionOpenSub2.setObjectName('openSub2')
self.actionOpenSub2.triggered.connect(self.openFile('Open Vie Sub'))
self.actionOpenViewedVideo = QtWidgets.QAction(Form)
self.actionOpenViewedVideo.triggered.connect(self.openViewedVideo)
self.menuOpen.addAction(self.actionOpenVideo)
self.menuOpen.addAction(self.actionOpenSub1)
self.menuOpen.addAction(self.actionOpenSub2)
self.menuOpen.addAction(self.actionOpenViewedVideo)
self.actionOpenVideo.triggered.connect(self.openFile('Open Video'))
self.menubar.addAction(self.menuOpen.menuAction())
self.menubar.addAction(self.menuOption.menuAction())
self.actionOpenSub2.setEnabled(False)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
# create gridlayout contain scrollarea
# self.gridLayout = QtWidgets.QGridLayout(Form)
# self.gridLayout.setObjectName("gridLayout")
# create scrollarea
self.scrollArea = QtWidgets.QScrollArea(Form)
self.scrollArea.setObjectName("scrollArea")
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setMinimumSize(300,400)
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 430, 319))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
# create verticalscroll
# chú ý lớp cha là scroll widget content
self.verticalScroll = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
# self.gridLayout.addWidget(self.scrollArea)
self.verticalLayout.addWidget(self.menubar, 0)
scene = QGraphicsScene(self)
self.graphicsView = QGraphicsView(scene) # dùng widget graphicsview mới overlay lable lên được !!!
self.videoItem = QGraphicsVideoItem()
self.videoItem.setSize(QSizeF(600,400))
#self.graphicsView.setStyleSheet("background-color:black;")
# dùng palette (bảng màu) riêng cho widget nên không ảnh hưởng đến labelvideo khi ghi đè lên
p = self.graphicsView.palette()
p.setColor(self.graphicsView.backgroundRole(), Qt.black)
self.graphicsView.setPalette(p)
scene.addItem(self.videoItem)
self.horizontalLayout.addWidget(self.graphicsView)
self.mediaPlayer.setVideoOutput(self.videoItem)
# add label for videowidget represent subtitle
self.labelVideo = QtWidgets.QLabel(self.graphicsView)
self.labelVideo.setObjectName('labelVideo')
self.labelVideo.setText('')
self.labelVideo.setStyleSheet("QLabel {font-size: 20px; opacity:1; color:white}")
self.labelVideo.setFixedWidth(500)
self.labelVideo.setFixedHeight(200)
self.labelVideo.setAlignment(Qt.AlignCenter)
self.labelVideo.setWordWrap(True)
self.labelVideo.move(int(self.sizeMonitor[0]/2-200), int(self.sizeMonitor[1]*5/7))
#print(self.labelVideo.x(), self.labelVideo.y())
#self.labelVideo.raise_()
#self.videoWidget.raise_()
self.notificationVideo = QtWidgets.QLabel(self.graphicsView)
self.notificationVideo.setObjectName('notificationVideo')
self.notificationVideo.setText('')
self.notificationVideo.setStyleSheet("QLabel {font-size: 20px; opacity:1; color:white}")
self.notificationVideo.setFixedWidth(500)
self.notificationVideo.setFixedHeight(200)
self.notificationVideo.setAlignment(Qt.AlignCenter)
self.notificationVideo.setWordWrap(True)
self.notificationVideo.move(int(self.sizeMonitor[0]/2+200), int(self.sizeMonitor[1]*2/7))
self.horizontalLayout.addWidget(self.scrollArea)
self.verticalLayout.addLayout(self.horizontalLayout,10)
# cho stretch widget là 100% tức sẽ chiếm toàn bộ diện tích trong layout !!!
# những widget khác cho 0% stretch
# create layoutSlider
self.horizontalLayoutSlider = QtWidgets.QHBoxLayout()
self.horizontalLayoutSlider.setObjectName("horizontalLayoutSlider")
self.labelCurTime = QtWidgets.QLabel(Form)
self.labelCurTime.setObjectName('labelCurTime')
self.labelCurTime.setText('00:00')
self.labelDurationTime = QtWidgets.QLabel(Form)
self.labelDurationTime.setText('NaN')
self.labelDurationTime.setObjectName('labelDurationTime')
self.horizontalLayoutSlider.addWidget(self.labelCurTime)
self.horizontalLayoutSlider.addWidget(self.positionSlider)
self.horizontalLayoutSlider.addWidget(self.labelDurationTime)
#self.verticalLayout.addLayout(self.horizontalLayoutSlider,0)
# Layout không thể hide được nên sẽ dùng frame (kế thừa từ widget) setlayout để hide nó
self.frameSlider = QtWidgets.QFrame()
self.frameSlider.setLayout(self.horizontalLayoutSlider)
self.verticalLayout.addWidget(self.frameSlider, 0)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
#self.verticalLayout.addLayout(self.horizontalLayout_2)
self.toolButton = QtWidgets.QToolButton(Form)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("assets/play.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton.setIcon(icon)
self.toolButton.setIconSize(QtCore.QSize(32, 32))
self.toolButton.setObjectName("toolButton")
# Event play
self.toolButton.clicked.connect(self.play)
self.horizontalLayout_2.addWidget(self.toolButton)
self.toolButton_3 = QtWidgets.QToolButton(Form)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("assets/previous.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_3.setIcon(icon1)
self.toolButton_3.setIconSize(QtCore.QSize(32, 32))
self.toolButton_3.setObjectName("toolButton_3")
self.toolButton_3.clicked.connect(self.backwardVideo)
self.horizontalLayout_2.addWidget(self.toolButton_3)
self.toolButton_4 = QtWidgets.QToolButton(Form)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("assets/next.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_4.setIcon(icon2)
self.toolButton_4.setIconSize(QtCore.QSize(32, 32))
self.toolButton_4.setObjectName("toolButton_4")
self.toolButton_4.clicked.connect(self.forwardVideo)
self.horizontalLayout_2.addWidget(self.toolButton_4)
#self.verticalLayout.addLayout(self.horizontalLayout_2,0)
self.toolButtonFullScreen = QtWidgets.QToolButton(Form)
self.toolButtonFullScreen.setObjectName('toolButtonFullScreen')
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap('assets/fullscreen.png'), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButtonFullScreen.setIcon(icon3)
self.toolButtonFullScreen.setIconSize(QtCore.QSize(32,32))
self.horizontalLayout_2.addWidget(self.toolButtonFullScreen)
self.toolButtonFullScreen.clicked.connect(self.fullScreen)
# self.toolButton_3.clicked.connect(self.setSCrollbar(30))
# self.toolButton_4.clicked.connect(self.setSCrollbar(90))
self.frameButton = QtWidgets.QFrame()
self.frameButton.setLayout(self.horizontalLayout_2)
self.verticalLayout.addWidget(self.frameButton, 0)
# turn on mousemove tracking for videowidget !!!
#self.videoWidget.setMouseTracking(True)
# tắt mục tiêu tập trung để nhật sự kiện arrow button
self.setChildrenFocusPolicy(Qt.NoFocus)
self.scrollArea.setFocusPolicy(Qt.NoFocus)
# setcontentsmargins cho verticalLayout Tổng chứ ko phải cho Form !!!
#self.verticalLayout.setContentsMargins(0,0,0,0)
self.scrollArea.hide()
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Study Film"))
self.toolButton.setText(_translate("Form", "..."))
self.toolButton_3.setText(_translate("Form", "..."))
self.toolButton_4.setText(_translate("Form", "..."))
self.menuOpen.setTitle(_translate("Form", "Open"))
self.menuOption.setTitle(_translate("Form", "Option"))
self.actionReload.setText(_translate("Form", 'Reload'))
self.actionOpenVideo.setText(_translate("Form", 'Open video'))
self.actionOpenSub1.setText(_translate("Form", 'Open Eng Sub'))
self.actionOpenSub2.setText(_translate("Form", 'Open Vie Sub'))
self.actionOpenViewedVideo.setText(_translate("Form", 'Open Viewed Video'))
| 42.621232
| 122
| 0.627706
|
ff964278948e0e40221aa349538a8b6cacc0b451
| 10,100
|
py
|
Python
|
selfdrive/controls/lib/driver_monitor.py
|
symuhammad/openpilot
|
96072fe69bc10f6ac5a92e2353e1ca0e649694dc
|
[
"MIT"
] | 8
|
2020-02-15T09:20:58.000Z
|
2020-08-26T11:06:06.000Z
|
selfdrive/controls/lib/driver_monitor.py
|
symuhammad/openpilot
|
96072fe69bc10f6ac5a92e2353e1ca0e649694dc
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/driver_monitor.py
|
symuhammad/openpilot
|
96072fe69bc10f6ac5a92e2353e1ca0e649694dc
|
[
"MIT"
] | 35
|
2019-09-30T15:35:06.000Z
|
2020-09-27T06:51:12.000Z
|
import numpy as np
from common.realtime import DT_CTRL, DT_DMON
from selfdrive.controls.lib.drive_helpers import create_event, EventTypes as ET
from common.filter_simple import FirstOrderFilter
from common.stat_live import RunningStatFilter
_AWARENESS_TIME = 300. # 1.6 minutes limit without user touching steering wheels make the car enter a terminal status
_AWARENESS_PRE_TIME_TILL_TERMINAL = 25. # a first alert is issued 25s before expiration
_AWARENESS_PROMPT_TIME_TILL_TERMINAL = 15. # a second alert is issued 15s before start decelerating the car
_DISTRACTED_TIME = 15.
_DISTRACTED_PRE_TIME_TILL_TERMINAL = 8.
_DISTRACTED_PROMPT_TIME_TILL_TERMINAL = 6.
_FACE_THRESHOLD = 0.4
_EYE_THRESHOLD = 0.6
_BLINK_THRESHOLD = 0.5 # 0.225
_BLINK_THRESHOLD_SLACK = 0.65
_BLINK_THRESHOLD_STRICT = 0.5
_PITCH_WEIGHT = 1.35 # 1.5 # pitch matters a lot more
_METRIC_THRESHOLD = 0.4
_METRIC_THRESHOLD_SLACK = 0.55
_METRIC_THRESHOLD_STRICT = 0.4
_PITCH_POS_ALLOWANCE = 0.04 # 0.08 # rad, to not be too sensitive on positive pitch
_PITCH_NATURAL_OFFSET = 0.12 # 0.1 # people don't seem to look straight when they drive relaxed, rather a bit up
_YAW_NATURAL_OFFSET = 0.08 # people don't seem to look straight when they drive relaxed, rather a bit to the right (center of car)
_DISTRACTED_FILTER_TS = 0.25 # 0.6Hz
_POSE_CALIB_MIN_SPEED = 13 # 30 mph
_POSE_OFFSET_MIN_COUNT = 600 # valid data counts before calibration completes, 1 seg is 600 counts
_POSE_OFFSET_MAX_COUNT = 3600 # stop deweighting new data after 6 min, aka "short term memory"
_RECOVERY_FACTOR_MAX = 5. # relative to minus step change
_RECOVERY_FACTOR_MIN = 1.25 # relative to minus step change
MAX_TERMINAL_ALERTS = 3 # not allowed to engage after 3 terminal alerts
MAX_TERMINAL_DURATION = 3000 # 30s
# model output refers to center of cropped image, so need to apply the x displacement offset
RESIZED_FOCAL = 320.0
H, W, FULL_W = 320, 160, 426
class DistractedType():
NOT_DISTRACTED = 0
BAD_POSE = 1
BAD_BLINK = 2
def face_orientation_from_net(angles_desc, pos_desc, rpy_calib):
# the output of these angles are in device frame
# so from driver's perspective, pitch is up and yaw is right
pitch_net = angles_desc[0]
yaw_net = angles_desc[1]
roll_net = angles_desc[2]
face_pixel_position = ((pos_desc[0] + .5)*W - W + FULL_W, (pos_desc[1]+.5)*H)
yaw_focal_angle = np.arctan2(face_pixel_position[0] - FULL_W//2, RESIZED_FOCAL)
pitch_focal_angle = np.arctan2(face_pixel_position[1] - H//2, RESIZED_FOCAL)
roll = roll_net
pitch = pitch_net + pitch_focal_angle
yaw = -yaw_net + yaw_focal_angle
# no calib for roll
pitch -= rpy_calib[1]
yaw -= rpy_calib[2]
return np.array([roll, pitch, yaw])
class DriverPose():
def __init__(self):
self.yaw = 0.
self.pitch = 0.
self.roll = 0.
self.pitch_offseter = RunningStatFilter(max_trackable=_POSE_OFFSET_MAX_COUNT)
self.yaw_offseter = RunningStatFilter(max_trackable=_POSE_OFFSET_MAX_COUNT)
self.cfactor = 1.
class DriverBlink():
def __init__(self):
self.left_blink = 0.
self.right_blink = 0.
self.cfactor = 1.
class DriverStatus():
def __init__(self):
self.pose = DriverPose()
self.pose_calibrated = self.pose.pitch_offseter.filtered_stat.n > _POSE_OFFSET_MIN_COUNT and \
self.pose.yaw_offseter.filtered_stat.n > _POSE_OFFSET_MIN_COUNT
self.blink = DriverBlink()
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
self.driver_distracted = False
self.driver_distraction_filter = FirstOrderFilter(0., _DISTRACTED_FILTER_TS, DT_DMON)
self.face_detected = False
self.terminal_alert_cnt = 0
self.terminal_time = 0
self.step_change = 0.
self.active_monitoring_mode = True
self.threshold_prompt = _DISTRACTED_PROMPT_TIME_TILL_TERMINAL / _DISTRACTED_TIME
self.is_rhd_region = False
self.is_rhd_region_checked = False
self._set_timers(active_monitoring=True)
def _set_timers(self, active_monitoring):
if self.active_monitoring_mode and self.awareness <= self.threshold_prompt:
if active_monitoring:
self.step_change = DT_CTRL / _DISTRACTED_TIME
else:
self.step_change = 0.
return # no exploit after orange alert
elif self.awareness <= 0.:
return
if active_monitoring:
# when falling back from passive mode to active mode, reset awareness to avoid false alert
if not self.active_monitoring_mode:
self.awareness_passive = self.awareness
self.awareness = self.awareness_active
self.threshold_pre = _DISTRACTED_PRE_TIME_TILL_TERMINAL / _DISTRACTED_TIME
self.threshold_prompt = _DISTRACTED_PROMPT_TIME_TILL_TERMINAL / _DISTRACTED_TIME
self.step_change = DT_CTRL / _DISTRACTED_TIME
self.active_monitoring_mode = True
else:
if self.active_monitoring_mode:
self.awareness_active = self.awareness
self.awareness = self.awareness_passive
self.threshold_pre = _AWARENESS_PRE_TIME_TILL_TERMINAL / _AWARENESS_TIME
self.threshold_prompt = _AWARENESS_PROMPT_TIME_TILL_TERMINAL / _AWARENESS_TIME
self.step_change = DT_CTRL / _AWARENESS_TIME
self.active_monitoring_mode = False
def _is_driver_distracted(self, pose, blink):
if not self.pose_calibrated:
pitch_error = pose.pitch - _PITCH_NATURAL_OFFSET
yaw_error = pose.yaw - _YAW_NATURAL_OFFSET
# add positive pitch allowance
if pitch_error > 0.:
pitch_error = max(pitch_error - _PITCH_POS_ALLOWANCE, 0.)
else:
pitch_error = pose.pitch - self.pose.pitch_offseter.filtered_stat.mean()
yaw_error = pose.yaw - self.pose.yaw_offseter.filtered_stat.mean()
pitch_error *= _PITCH_WEIGHT
pose_metric = np.sqrt(yaw_error**2 + pitch_error**2)
if pose_metric > _METRIC_THRESHOLD*pose.cfactor:
return DistractedType.BAD_POSE
elif (blink.left_blink + blink.right_blink)*0.5 > _BLINK_THRESHOLD*blink.cfactor:
return DistractedType.BAD_BLINK
else:
return DistractedType.NOT_DISTRACTED
def set_policy(self, model_data):
ep = min(model_data.meta.engagedProb, 0.8) / 0.8
self.pose.cfactor = np.interp(ep, [0, 0.5, 1], [_METRIC_THRESHOLD_STRICT, _METRIC_THRESHOLD, _METRIC_THRESHOLD_SLACK])/_METRIC_THRESHOLD
self.blink.cfactor = np.interp(ep, [0, 0.5, 1], [_BLINK_THRESHOLD_STRICT, _BLINK_THRESHOLD, _BLINK_THRESHOLD_SLACK])/_BLINK_THRESHOLD
def get_pose(self, driver_monitoring, cal_rpy, car_speed, op_engaged):
# 10 Hz
if len(driver_monitoring.faceOrientation) == 0 or len(driver_monitoring.facePosition) == 0:
return
self.pose.roll, self.pose.pitch, self.pose.yaw = face_orientation_from_net(driver_monitoring.faceOrientation, driver_monitoring.facePosition, cal_rpy)
self.blink.left_blink = driver_monitoring.leftBlinkProb * (driver_monitoring.leftEyeProb>_EYE_THRESHOLD)
self.blink.right_blink = driver_monitoring.rightBlinkProb * (driver_monitoring.rightEyeProb>_EYE_THRESHOLD)
self.face_detected = driver_monitoring.faceProb > _FACE_THRESHOLD and \
abs(driver_monitoring.facePosition[0]) <= 0.4 and abs(driver_monitoring.facePosition[1]) <= 0.45 and \
not self.is_rhd_region
self.driver_distracted = self._is_driver_distracted(self.pose, self.blink)>0
# first order filters
self.driver_distraction_filter.update(self.driver_distracted)
# update offseter
# only update when driver is actively driving the car above a certain speed
if self.face_detected and car_speed>_POSE_CALIB_MIN_SPEED and (not op_engaged or not self.driver_distracted):
self.pose.pitch_offseter.push_and_update(self.pose.pitch)
self.pose.yaw_offseter.push_and_update(self.pose.yaw)
self.pose_calibrated = self.pose.pitch_offseter.filtered_stat.n > _POSE_OFFSET_MIN_COUNT and \
self.pose.yaw_offseter.filtered_stat.n > _POSE_OFFSET_MIN_COUNT
self._set_timers(self.face_detected)
def update(self, events, driver_engaged, ctrl_active, standstill):
if (driver_engaged and self.awareness > 0) or not ctrl_active:
# reset only when on disengagement if red reached
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
return events
driver_attentive = self.driver_distraction_filter.x < 0.37
awareness_prev = self.awareness
if (driver_attentive and self.face_detected and self.awareness > 0):
# only restore awareness when paying attention and alert is not red
self.awareness = min(self.awareness + ((_RECOVERY_FACTOR_MAX-_RECOVERY_FACTOR_MIN)*(1.-self.awareness)+_RECOVERY_FACTOR_MIN)*self.step_change, 1.)
if self.awareness == 1.:
self.awareness_passive = min(self.awareness_passive + self.step_change, 1.)
# don't display alert banner when awareness is recovering and has cleared orange
if self.awareness > self.threshold_prompt:
return events
# should always be counting if distracted unless at standstill and reaching orange
if (not self.face_detected or (self.driver_distraction_filter.x > 0.63 and self.driver_distracted and self.face_detected)) and \
not (standstill and self.awareness - self.step_change <= self.threshold_prompt):
self.awareness = max(self.awareness - self.step_change, -0.1)
alert = None
if self.awareness <= 0.:
# terminal red alert: disengagement required
alert = 'driverDistracted' if self.active_monitoring_mode else 'driverUnresponsive'
self.terminal_time += 1
if awareness_prev > 0.:
self.terminal_alert_cnt += 1
elif self.awareness <= self.threshold_prompt:
# prompt orange alert
alert = 'promptDriverDistracted' if self.active_monitoring_mode else 'promptDriverUnresponsive'
elif self.awareness <= self.threshold_pre:
# pre green alert
alert = 'preDriverDistracted' if self.active_monitoring_mode else 'preDriverUnresponsive'
if alert is not None:
events.append(create_event(alert, [ET.WARNING]))
return events
| 43.347639
| 154
| 0.742475
|
9b7f2ff43eadaa853a717e8df904e5db85bd06db
| 18
|
py
|
Python
|
virtbs/__init__.py
|
afazekas/speedling
|
bd9d93ef16f3d702f146812fb6e8fef51426a378
|
[
"Apache-2.0"
] | 1
|
2019-04-10T18:20:09.000Z
|
2019-04-10T18:20:09.000Z
|
virtbs/__init__.py
|
afazekas/speedling
|
bd9d93ef16f3d702f146812fb6e8fef51426a378
|
[
"Apache-2.0"
] | 1
|
2019-07-12T12:08:19.000Z
|
2019-07-12T12:08:19.000Z
|
virtbs/__init__.py
|
afazekas/speedling
|
bd9d93ef16f3d702f146812fb6e8fef51426a378
|
[
"Apache-2.0"
] | 1
|
2019-02-26T16:37:10.000Z
|
2019-02-26T16:37:10.000Z
|
# Not package yet
| 9
| 17
| 0.722222
|
60e419418114e3162868c60069a5c30527b6d81e
| 2,559
|
py
|
Python
|
jupyterlab_widgets/setup.py
|
casperdcl/ipywidgets
|
f27cdc6dcdb838174f8280cd27c1c1013cadb0e7
|
[
"BSD-3-Clause"
] | null | null | null |
jupyterlab_widgets/setup.py
|
casperdcl/ipywidgets
|
f27cdc6dcdb838174f8280cd27c1c1013cadb0e7
|
[
"BSD-3-Clause"
] | null | null | null |
jupyterlab_widgets/setup.py
|
casperdcl/ipywidgets
|
f27cdc6dcdb838174f8280cd27c1c1013cadb0e7
|
[
"BSD-3-Clause"
] | null | null | null |
"""
jupyterlab_widgets setup
"""
import os
from jupyter_packaging import (
create_cmdclass, install_npm, ensure_targets,
combine_commands, ensure_python, get_version,
skip_if_exists
)
import setuptools
HERE = os.path.abspath(os.path.dirname(__file__))
# The name of the project
name = "jupyterlab_widgets"
# Ensure a valid python version
ensure_python(">=3.6")
# Get our version
version = get_version(os.path.join(name, "_version.py"))
lab_path = os.path.join(HERE, name, "labextension")
# Representative files that should exist after a successful build
jstargets = [
os.path.join(lab_path, "package.json"),
]
package_data_spec = {
name: [
"*"
]
}
labext_name = "@jupyter-widgets/jupyterlab-manager"
data_files_spec = [
("share/jupyter/labextensions/%s" % labext_name, lab_path, "**"),
("share/jupyter/labextensions/%s" % labext_name, HERE, "install.json"),
]
cmdclass = create_cmdclass(
"jsdeps",
package_data_spec=package_data_spec,
data_files_spec=data_files_spec
)
# if the static assets already exist, do not invoke npm so we can make a wheel
# from the sdist package, since the npm build really only works from this
# repo.
js_command = combine_commands(
install_npm(HERE, build_cmd="build:prod", npm=["jlpm"]),
ensure_targets(jstargets),
)
is_repo = os.path.exists(os.path.join(HERE, os.pardir, ".git"))
if is_repo:
cmdclass["jsdeps"] = js_command
else:
cmdclass["jsdeps"] = skip_if_exists(jstargets, js_command)
with open("README.md", "r") as fh:
long_description = fh.read()
setup_args = dict(
name=name,
version=version,
url="https://github.com/jupyter-widgets/ipywidgets",
author="Jupyter Development Team",
description="A JupyterLab extension.",
long_description= long_description,
long_description_content_type="text/markdown",
cmdclass=cmdclass,
packages=setuptools.find_packages(),
install_requires=[],
zip_safe=False,
include_package_data=True,
python_requires=">=3.6",
license="BSD-3-Clause",
platforms="Linux, Mac OS X, Windows",
keywords=["Jupyter", "JupyterLab", "JupyterLab3"],
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Framework :: Jupyter",
],
)
if __name__ == "__main__":
setuptools.setup(**setup_args)
| 26.112245
| 78
| 0.687378
|
262672fb2b6b8df99067578c99cdba141e07a38d
| 6,506
|
py
|
Python
|
CrySPY/EA/ea_append.py
|
ruoitrau86/CrySPY
|
84cc0c663a6ecc73ff89d0d2893424ecf3faae50
|
[
"MIT"
] | null | null | null |
CrySPY/EA/ea_append.py
|
ruoitrau86/CrySPY
|
84cc0c663a6ecc73ff89d0d2893424ecf3faae50
|
[
"MIT"
] | null | null | null |
CrySPY/EA/ea_append.py
|
ruoitrau86/CrySPY
|
84cc0c663a6ecc73ff89d0d2893424ecf3faae50
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import ConfigParser
import os
import pandas as pd
from .. import utility
from ..gen_struc.EA.select_parents import Select_parents
from ..gen_struc.EA.crossover import Crossover
from ..gen_struc.EA.permutation import Permutation
from ..gen_struc.EA.strain import Strain
from ..gen_struc.EA.ea_generation import EA_generation
from ..gen_struc.random import rndgen
from ..IO import out_results
from ..IO import pkl_data
from ..IO import read_input as rin
def append_struc(stat, init_struc_data, opt_struc_data, rslt_data):
# ---------- append structures by EA
print('\n# ---------- Append structures by EA')
with open('cryspy.out', 'a') as fout:
fout.write('\n# ---------- Append structures by EA\n')
# ---------- fitness
fitness = dict(zip(rslt_data['Struc_ID'].values, rslt_data['Energy'].values))
# ---------- instantiate Seclect_parents class
print('# ------ select parents')
sp = Select_parents(opt_struc_data, fitness, None, None, rin.fit_reverse, rin.n_fittest)
if rin.slct_func == 'TNM':
sp.set_tournament(t_size=rin.t_size)
else:
sp.set_roulette(a=rin.a_rlt, b=rin.b_rlt)
# ---------- generate offspring by EA
print('# ------ Generate structures')
eagen = EA_generation(sp=sp, symprec=rin.symprec, id_start=rin.tot_struc, init_pos_path='./data/init_POSCARS')
# ------ instantiate Crossover class
if rin.n_crsov > 0:
co = Crossover(rin.atype, rin.nat, rin.mindist, rin.crs_lat, rin.crs_func,
rin.nat_diff_tole, rin.maxcnt_ea)
eagen.gen_crossover(rin.n_crsov, co=co) # crossover
with open('cryspy.out', 'a') as fout:
fout.write('{} structures by crossover\n'.format(rin.n_crsov))
# ------ instantiate Permutation class
if rin.n_perm > 0:
pm = Permutation(rin.atype, rin.mindist, rin.ntimes, rin.maxcnt_ea)
eagen.gen_permutation(rin.n_perm, pm=pm) # permutation
with open('cryspy.out', 'a') as fout:
fout.write('{} structures by permutation\n'.format(rin.n_perm))
# ------ instantiate Strain class
if rin.n_strain > 0:
st = Strain(rin.atype, rin.mindist, rin.sigma_st, rin.maxcnt_ea)
eagen.gen_strain(rin.n_strain, st=st) # strain
with open('cryspy.out', 'a') as fout:
fout.write('{} structures by strain\n'.format(rin.n_strain))
# ------ update init_struc_data
init_struc_data.update(eagen.offspring)
# ---------- random generation
if rin.n_rand > 0:
if rin.spgnum == 0:
tmp_struc_data = rndgen.rndgen_wo_spg(
rin.n_rand, rin.natot, rin.atype, rin.nat, eagen.cID,
rin.minlen, rin.maxlen, rin.dangle, rin.mindist,
rin.maxcnt, rin.symprec, '../data/init_POSCARS')
# ------ update init_struc_data
init_struc_data.update(tmp_struc_data)
else:
fwpath = utility.check_fwpath()
tmp_struc_data = rndgen.rndgen_spg(
rin.n_rand, rin.natot, rin.atype, rin.nat, rin.spgnum, eagen.cID,
rin.minlen, rin.maxlen, rin.dangle, rin.mindist,
rin.maxcnt, rin.symprec, '../data/init_POSCARS', fwpath)
# ------ update init_struc_data
init_struc_data.update(tmp_struc_data)
with open('cryspy.out', 'a') as fout:
fout.write('{} structures by random\n'.format(rin.n_rand))
# ---------- save init_struc_data
pkl_data.save_init_struc(init_struc_data)
# ---------- load or init ea_data
if os.path.isfile('./data/pkl_data/EA_data.pkl'):
_, _, ea_info, ea_origin = pkl_data.load_ea_data()
else:
# ------ initialize
# -- ea_info
ea_info = pd.DataFrame(columns=['Gen', 'Population',
'Crossover', 'Permutation', 'Strain',
'Random', 'Elite',
'crs_func', 'crs_lat', 'slct_func'])
ea_info.iloc[:, 0:7] = ea_info.iloc[:, 0:7].astype(int)
# -- ea_origin
ea_origin = pd.DataFrame(columns=['Gen', 'Struc_ID', 'Operation', 'Parent'])
ea_origin.iloc[:, 0:2] = ea_origin.iloc[:, 0:2].astype(int)
# ---------- ea_info
tmp_info = pd.Series([rin.tot_struc, rin.n_pop, rin.n_crsov, rin.n_perm, rin.n_strain, rin.n_rand, 0,
rin.crs_func, rin.crs_lat, rin.slct_func], index=ea_info.columns)
ea_info = ea_info.append(tmp_info, ignore_index=True)
# ------ out ea_info
out_results.out_ea_info(ea_info)
# ---------- ea_origin
# ------ EA operation part
for cID in range(rin.tot_struc, rin.tot_struc + rin.n_pop - rin.n_rand):
tmp_origin = pd.Series([rin.tot_struc, cID, eagen.operation[cID], eagen.parents[cID]], index=ea_origin.columns)
ea_origin = ea_origin.append(tmp_origin, ignore_index=True)
# ------ random part
for cID in range(rin.tot_struc + rin.n_pop - rin.n_rand, rin.tot_struc + rin.n_pop):
tmp_origin = pd.Series([rin.tot_struc, cID, 'random', None], index=ea_origin.columns)
ea_origin = ea_origin.append(tmp_origin, ignore_index=True)
#------ out ea_origin
out_results.out_ea_origin(ea_origin)
# ---------- save ea_data
ea_data = (None, None, ea_info, ea_origin)
pkl_data.save_ea_data(ea_data)
# ---------- change variables in cryspy.in
config = ConfigParser.ConfigParser()
config.read('cryspy.in')
print('# -- changed cryspy.in')
# ------ tot_struc
config.set('basic', 'tot_struc', '{}'.format(rin.tot_struc + rin.n_pop))
print('Changed the value of tot_struc in cryspy.in from {} to {}'.format(
rin.tot_struc, rin.tot_struc + rin.n_pop))
# ------ append_struc_ea
config.set('option', 'append_struc_ea', '{}'.format(False))
print('Changed the value of append_struc_ea in cryspy.in from {} to {}'.format(
True, False))
# ------ write
with open('cryspy.in', 'w') as f:
config.write(f)
# ---------- status
stat.set('input', 'tot_struc', '{}'.format(rin.tot_struc + rin.n_pop))
stat.set('input', 'append_struc_ea', '{}'.format(False))
with open('cryspy.stat', 'w') as fstat:
stat.write(fstat)
# ---------- return
return init_struc_data
| 43.373333
| 119
| 0.601752
|
65a069438b85cb2b0b130e8c8947b706aac5f4dc
| 424
|
py
|
Python
|
python/algorithms/binary_search.py
|
minhajul/learning
|
7fdccee7ff7624e69ba3198531be90f448b77837
|
[
"MIT"
] | 1
|
2018-06-09T08:47:13.000Z
|
2018-06-09T08:47:13.000Z
|
python/algorithms/binary_search.py
|
minhajul/learning
|
7fdccee7ff7624e69ba3198531be90f448b77837
|
[
"MIT"
] | 12
|
2018-06-08T20:02:43.000Z
|
2018-09-25T17:16:05.000Z
|
python/algorithms/binary_search.py
|
minhajul/learning
|
7fdccee7ff7624e69ba3198531be90f448b77837
|
[
"MIT"
] | null | null | null |
def binary_search(list_items, x):
left = list_items[0]
right = len(list_items) - 1
while left <= right:
mid = round((left + right) / 2)
if x == list_items[mid]:
return mid
if x > list_items[mid]:
left = mid + 1
if x < list_items[mid]:
right = mid - 1
return -1
result = binary_search([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3)
print(result)
| 20.190476
| 58
| 0.509434
|
c90dc36e66fee3dcb300a00430345871b4dd01fa
| 2,688
|
py
|
Python
|
hello/views.py
|
1tjcaron/learnmath
|
311229e518bc52d834b30a961803c5fdc7851568
|
[
"MIT"
] | null | null | null |
hello/views.py
|
1tjcaron/learnmath
|
311229e518bc52d834b30a961803c5fdc7851568
|
[
"MIT"
] | null | null | null |
hello/views.py
|
1tjcaron/learnmath
|
311229e518bc52d834b30a961803c5fdc7851568
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
def hello(request):
return render(request, 'hello.html', {"status1": "Unstarted"})
def hi2(request):
return render(request, 'hello.html', {"status1": "Done"})
import datetime
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from . import forms
def pass_quiz_percentage(request):
return HttpResponse('Great Job! Go <a href="/1"> back home</a>')
def quiz_percentage(request, problem_number):
problems = {
1: (10, 20, 60),
2: (10, 30, 60),
3: (10, 15, 100),
4: (15, 30, 100),
5: (0, 100, 20),
}
# book_instance = get_object_or_404(BookInstance, pk=pk)
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = forms.DefinedPercForm(*problems.get(problem_number, (0, 0, 0)), request.POST)
# Check if the form is valid:
if form.is_valid():
if problem_number < max(problems.keys()):
return HttpResponseRedirect(f"/quiz/percentage/{problem_number + 1}")
else:
return HttpResponseRedirect(f"/quiz/percentage/success")
# If this is a GET (or any other method) create the default form.
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks=3)
form = forms.DefinedPercForm(*problems.get(problem_number, (0, 0, 0)), initial={'renewal_date': proposed_renewal_date})
context = {
'form': form,
'problem': f"Problem ({problem_number}/5)"
# 'book_instance': book_instance,
}
return render(request, 'quiz_zero_perc.html', context)
def quiz_number_line(request):
# book_instance = get_object_or_404(BookInstance, pk=pk)
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = forms.NumberLineForm(request.POST)
# Check if the form is valid:
if form.is_valid():
return HttpResponseRedirect("/" )
# If this is a GET (or any other method) create the default form.
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks=3)
form = forms.NumberLineForm(initial={'renewal_date': proposed_renewal_date})
context = {
'form': form,
# 'book_instance': book_instance,
}
return render(request, 'quiz_number_line.html', context)
| 32.385542
| 127
| 0.651786
|
04922411299b7200f2711b08256924151e3c9d00
| 6,669
|
py
|
Python
|
src/ircthread.py
|
fallingknife/electrum-uno-server
|
ecae2ea51a00806269f080546c42db9452ef34b5
|
[
"MIT"
] | null | null | null |
src/ircthread.py
|
fallingknife/electrum-uno-server
|
ecae2ea51a00806269f080546c42db9452ef34b5
|
[
"MIT"
] | null | null | null |
src/ircthread.py
|
fallingknife/electrum-uno-server
|
ecae2ea51a00806269f080546c42db9452ef34b5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import time
import socket
import ssl
import threading
import Queue
import irc.client
from utils import logger
from utils import Hash
from version import VERSION
out_msg = []
class IrcThread(threading.Thread):
def __init__(self, processor, config):
threading.Thread.__init__(self)
self.processor = processor
self.daemon = True
options = dict(config.items('server'))
self.stratum_tcp_port = options.get('stratum_tcp_port')
self.stratum_tcp_ssl_port = options.get('stratum_tcp_ssl_port')
self.report_stratum_tcp_port = options.get('report_stratum_tcp_port')
self.report_stratum_tcp_ssl_port = options.get('report_stratum_tcp_ssl_port')
self.irc_bind_ip = options.get('irc_bind_ip')
self.host = options.get('host')
self.report_host = options.get('report_host')
self.nick = options.get('irc_nick')
if self.report_stratum_tcp_port:
self.stratum_tcp_port = self.report_stratum_tcp_port
if self.report_stratum_tcp_ssl_port:
self.stratum_tcp_ssl_port = self.report_stratum_tcp_ssl_port
if self.report_host:
self.host = self.report_host
if not self.nick:
self.nick = Hash(self.host)[:5].encode("hex")
self.pruning = True
self.pruning_limit = config.get('leveldb', 'pruning_limit')
self.nick = 'E_' + self.nick
self.password = None
self.who_queue = Queue.Queue()
def getname(self):
s = 'v' + VERSION + ' '
if self.pruning:
s += 'p' + self.pruning_limit + ' '
def add_port(letter, number):
DEFAULT_PORTS = {'t':'50005', 's':'50006'}
if not number: return ''
if DEFAULT_PORTS[letter] == number:
return letter + ' '
else:
return letter + number + ' '
s += add_port('t',self.stratum_tcp_port)
s += add_port('s',self.stratum_tcp_ssl_port)
return s
def start(self, queue):
self.queue = queue
threading.Thread.start(self)
def on_connect(self, connection, event):
connection.join("##electrum-uno")
def on_join(self, connection, event):
m = re.match("(E_.*)!", event.source)
if m:
self.who_queue.put((connection, m.group(1)))
def on_quit(self, connection, event):
m = re.match("(E_.*)!", event.source)
if m:
self.queue.put(('quit', [m.group(1)]))
def on_kick(self, connection, event):
m = re.match("(E_.*)", event.arguments[0])
if m:
self.queue.put(('quit', [m.group(1)]))
def on_disconnect(self, connection, event):
logger.error("irc: disconnected")
raise BaseException("disconnected")
def on_who(self, connection, event):
line = str(event.arguments[6]).split()
try:
ip = socket.gethostbyname(line[1])
except:
# no IPv4 address could be resolved. Could be .onion or IPv6.
ip = line[1]
nick = event.arguments[4]
host = line[1]
ports = line[2:]
self.queue.put(('join', [nick, ip, host, ports]))
def on_name(self, connection, event):
for s in event.arguments[2].split():
if s.startswith("E_"):
self.who_queue.put((connection, s))
def who_thread(self):
while not self.processor.shared.stopped():
try:
connection, s = self.who_queue.get(timeout=1)
except Queue.Empty:
continue
#logger.info("who: "+ s)
connection.who(s)
time.sleep(1)
def run(self):
while self.processor.shared.paused():
time.sleep(1)
self.ircname = self.host + ' ' + self.getname()
# avoid UnicodeDecodeError using LenientDecodingLineBuffer
irc.client.ServerConnection.buffer_class = irc.buffer.LenientDecodingLineBuffer
logger.info("joining IRC")
t = threading.Thread(target=self.who_thread)
t.start()
while not self.processor.shared.stopped():
client = irc.client.Reactor()
try:
#bind_address = (self.irc_bind_ip, 0) if self.irc_bind_ip else None
#ssl_factory = irc.connection.Factory(wrapper=ssl.wrap_socket, bind_address=bind_address)
#c = client.server().connect('irc.freenode.net', 6697, self.nick, self.password, ircname=self.ircname, connect_factory=ssl_factory)
c = client.server().connect('irc.freenode.net', 6667, self.nick, self.password, ircname=self.ircname)
except irc.client.ServerConnectionError:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
c.add_global_handler("welcome", self.on_connect)
c.add_global_handler("join", self.on_join)
c.add_global_handler("quit", self.on_quit)
c.add_global_handler("kick", self.on_kick)
c.add_global_handler("whoreply", self.on_who)
c.add_global_handler("namreply", self.on_name)
c.add_global_handler("disconnect", self.on_disconnect)
c.set_keepalive(60)
self.connection = c
try:
client.process_forever()
except BaseException as e:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
logger.info("quitting IRC")
| 37.256983
| 147
| 0.624232
|
903b4b6357e887c9e3bc4d8b4d3f00f158204858
| 1,714
|
py
|
Python
|
components/gpio_control/GPIODevices/led.py
|
drocx/RPi-Jukebox-RFID
|
1e211c2f4571a86d97747fe9094a34931de8b7c1
|
[
"MIT"
] | null | null | null |
components/gpio_control/GPIODevices/led.py
|
drocx/RPi-Jukebox-RFID
|
1e211c2f4571a86d97747fe9094a34931de8b7c1
|
[
"MIT"
] | null | null | null |
components/gpio_control/GPIODevices/led.py
|
drocx/RPi-Jukebox-RFID
|
1e211c2f4571a86d97747fe9094a34931de8b7c1
|
[
"MIT"
] | 1
|
2019-10-06T16:33:52.000Z
|
2019-10-06T16:33:52.000Z
|
import logging
import time
import mpd
from RPi import GPIO
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
class LED:
def __init__(self, pin, initial_value=True, name='LED'):
self.pin = pin
self.name=name
logger.debug('initialize {}(pin={}) to off'.format(self.name, self.pin))
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, initial_value)
def on(self):
logger.debug('Set Output of {}(pin={}) to on'.format(self.name, self.pin))
GPIO.output(self.pin, GPIO.HIGH)
def off(self):
logger.debug('Set Output of {}(pin={}) to off'.format(self.name, self.pin))
GPIO.output(self.pin, GPIO.LOW)
def status(self):
return GPIO.input(self.pin)
class MPDStatusLED(LED):
logger = logging.getLogger("MPDStatusLED")
def __init__(self, pin, host='localhost', port=6600, name='MPDStatusLED'):
super(MPDStatusLED, self).__init__(pin, initial_value=False, name=name)
self.mpc = mpd.MPDClient()
self.host = host
self.port = port
self.logger.info('Waiting for MPD Connection on {}:{}'.format(
self.host,self.port))
while not self.has_mpd_connection():
self.logger.debug('No MPD Connection yet established')
time.sleep(1)
self.logger.info('Connection to MPD server on host {}:{} established'.format(self.host,self.port))
self.on()
def has_mpd_connection(self):
self.mpc.disconnect()
try:
self.mpc.connect(self.host, self.port)
self.mpc.ping()
self.mpc.disconnect()
return True
except ConnectionError:
return False
| 29.050847
| 106
| 0.616103
|
43184a88cc4cdcaccfdde3323f6cb86579a5b114
| 21,044
|
py
|
Python
|
designate/central/rpcapi.py
|
mrlesmithjr/designate
|
bff3d5f6e31fe595a77143ec4ac779c187bf72a8
|
[
"Apache-2.0"
] | 145
|
2015-01-02T09:35:53.000Z
|
2021-12-14T17:03:53.000Z
|
designate/central/rpcapi.py
|
mrlesmithjr/designate
|
bff3d5f6e31fe595a77143ec4ac779c187bf72a8
|
[
"Apache-2.0"
] | 6
|
2015-03-15T00:22:27.000Z
|
2019-12-16T09:37:38.000Z
|
designate/central/rpcapi.py
|
mrlesmithjr/designate
|
bff3d5f6e31fe595a77143ec4ac779c187bf72a8
|
[
"Apache-2.0"
] | 109
|
2015-01-13T16:47:34.000Z
|
2021-03-15T13:18:48.000Z
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from designate.loggingutils import rpc_logging
from designate import rpc
LOG = logging.getLogger(__name__)
CENTRAL_API = None
def reset():
global CENTRAL_API
CENTRAL_API = None
@rpc_logging(LOG, 'central')
class CentralAPI(object):
"""
Client side of the central RPC API.
API version history:
1.0 - Initial version
1.1 - Add new finder methods
1.2 - Add get_tenant and get_tenants
1.3 - Add get_absolute_limits
2.0 - Renamed most get_resources to find_resources
2.1 - Add quota methods
3.0 - RecordSet Changes
3.1 - Add floating ip ptr methods
3.2 - TLD Api changes
3.3 - Add methods for blacklisted domains
4.0 - Create methods now accept designate objects
4.1 - Add methods for server pools
4.2 - Add methods for pool manager integration
4.3 - Added Zone Transfer Methods
5.0 - Remove dead server code
5.1 - Add xfr_zone
5.2 - Add Zone Import methods
5.3 - Add Zone Export method
5.4 - Add asynchronous Zone Export methods
5.5 - Add deleted zone purging task
5.6 - Changed 'purge_zones' function args
6.0 - Renamed domains to zones
6.1 - Add ServiceStatus methods
6.2 - Changed 'find_recordsets' method args
"""
RPC_API_VERSION = '6.2'
# This allows us to mark some methods as not logged.
# This can be for a few reasons - some methods my not actually call over
# RPC, or may be so noisy that logging them is not useful
# This should be an array of strings that match the function names
LOGGING_BLACKLIST = ['update_service_status']
def __init__(self, topic=None):
self.topic = topic if topic else cfg.CONF['service:central'].topic
target = messaging.Target(topic=self.topic,
version=self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='6.2')
@classmethod
def get_instance(cls):
"""
The rpc.get_client() which is called upon the API object initialization
will cause a assertion error if the designate.rpc.TRANSPORT isn't setup
by rpc.init() before.
This fixes that by creating the rpcapi when demanded.
"""
global CENTRAL_API
if not CENTRAL_API:
CENTRAL_API = cls()
return CENTRAL_API
# Misc Methods
def get_absolute_limits(self, context):
return self.client.call(context, 'get_absolute_limits')
# Quota Methods
def get_quotas(self, context, tenant_id):
return self.client.call(context, 'get_quotas', tenant_id=tenant_id)
def get_quota(self, context, tenant_id, resource):
return self.client.call(context, 'get_quota', tenant_id=tenant_id,
resource=resource)
def set_quota(self, context, tenant_id, resource, hard_limit):
return self.client.call(context, 'set_quota', tenant_id=tenant_id,
resource=resource, hard_limit=hard_limit)
def reset_quotas(self, context, tenant_id):
return self.client.call(context, 'reset_quotas', tenant_id=tenant_id)
# TSIG Key Methods
def create_tsigkey(self, context, tsigkey):
return self.client.call(context, 'create_tsigkey', tsigkey=tsigkey)
def find_tsigkeys(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
return self.client.call(context, 'find_tsigkeys', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_tsigkey(self, context, tsigkey_id):
return self.client.call(context, 'get_tsigkey', tsigkey_id=tsigkey_id)
def update_tsigkey(self, context, tsigkey):
return self.client.call(context, 'update_tsigkey', tsigkey=tsigkey)
def delete_tsigkey(self, context, tsigkey_id):
return self.client.call(context, 'delete_tsigkey',
tsigkey_id=tsigkey_id)
# Tenant Methods
def find_tenants(self, context):
return self.client.call(context, 'find_tenants')
def get_tenant(self, context, tenant_id):
return self.client.call(context, 'get_tenant', tenant_id=tenant_id)
def count_tenants(self, context):
return self.client.call(context, 'count_tenants')
# Zone Methods
def create_zone(self, context, zone):
return self.client.call(context, 'create_zone', zone=zone)
def get_zone(self, context, zone_id):
return self.client.call(context, 'get_zone', zone_id=zone_id)
def get_zone_ns_records(self, context, zone_id):
return self.client.call(context, 'get_zone_ns_records',
zone_id=zone_id)
def find_zones(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
return self.client.call(context, 'find_zones', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_zone(self, context, criterion=None):
return self.client.call(context, 'find_zone', criterion=criterion)
def update_zone(self, context, zone, increment_serial=True):
return self.client.call(context, 'update_zone', zone=zone,
increment_serial=increment_serial)
def delete_zone(self, context, zone_id):
return self.client.call(context, 'delete_zone', zone_id=zone_id)
def purge_zones(self, context, criterion, limit=None):
return self.client.call(context, 'purge_zones',
criterion=criterion, limit=limit)
def count_zones(self, context, criterion=None):
return self.client.call(context, 'count_zones', criterion=criterion)
def touch_zone(self, context, zone_id):
return self.client.call(context, 'touch_zone', zone_id=zone_id)
# TLD Methods
def create_tld(self, context, tld):
return self.client.call(context, 'create_tld', tld=tld)
def find_tlds(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
return self.client.call(context, 'find_tlds', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_tld(self, context, tld_id):
return self.client.call(context, 'get_tld', tld_id=tld_id)
def update_tld(self, context, tld):
return self.client.call(context, 'update_tld', tld=tld)
def delete_tld(self, context, tld_id):
return self.client.call(context, 'delete_tld', tld_id=tld_id)
# RecordSet Methods
def create_recordset(self, context, zone_id, recordset):
return self.client.call(context, 'create_recordset',
zone_id=zone_id, recordset=recordset)
def get_recordset(self, context, zone_id, recordset_id):
return self.client.call(context, 'get_recordset', zone_id=zone_id,
recordset_id=recordset_id)
def find_recordsets(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None, force_index=False):
return self.client.call(context, 'find_recordsets',
criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir, force_index=force_index)
def find_recordset(self, context, criterion=None):
return self.client.call(context, 'find_recordset', criterion=criterion)
def export_zone(self, context, zone_id):
return self.client.call(context, 'export_zone', zone_id=zone_id)
def update_recordset(self, context, recordset, increment_serial=True):
return self.client.call(context, 'update_recordset',
recordset=recordset,
increment_serial=increment_serial)
def delete_recordset(self, context, zone_id, recordset_id,
increment_serial=True):
return self.client.call(context, 'delete_recordset',
zone_id=zone_id,
recordset_id=recordset_id,
increment_serial=increment_serial)
def count_recordsets(self, context, criterion=None):
return self.client.call(context, 'count_recordsets',
criterion=criterion)
# Record Methods
def create_record(self, context, zone_id, recordset_id, record,
increment_serial=True):
return self.client.call(context, 'create_record',
zone_id=zone_id,
recordset_id=recordset_id,
record=record,
increment_serial=increment_serial)
def get_record(self, context, zone_id, recordset_id, record_id):
return self.client.call(context, 'get_record',
zone_id=zone_id,
recordset_id=recordset_id,
record_id=record_id)
def find_records(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
return self.client.call(context, 'find_records', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_record(self, context, criterion=None):
return self.client.call(context, 'find_record', criterion=criterion)
def update_record(self, context, record, increment_serial=True):
return self.client.call(context, 'update_record',
record=record,
increment_serial=increment_serial)
def delete_record(self, context, zone_id, recordset_id, record_id,
increment_serial=True):
return self.client.call(context, 'delete_record',
zone_id=zone_id,
recordset_id=recordset_id,
record_id=record_id,
increment_serial=increment_serial)
def count_records(self, context, criterion=None):
return self.client.call(context, 'count_records', criterion=criterion)
# Misc. Report combining counts for tenants, zones and records
def count_report(self, context, criterion=None):
return self.client.call(context, 'count_report', criterion=criterion)
# Sync Methods
def sync_zones(self, context):
return self.client.call(context, 'sync_zones')
def sync_zone(self, context, zone_id):
return self.client.call(context, 'sync_zone', zone_id=zone_id)
def sync_record(self, context, zone_id, recordset_id, record_id):
return self.client.call(context, 'sync_record',
zone_id=zone_id,
recordset_id=recordset_id,
record_id=record_id)
def list_floatingips(self, context):
return self.client.call(context, 'list_floatingips')
def get_floatingip(self, context, region, floatingip_id):
return self.client.call(context, 'get_floatingip', region=region,
floatingip_id=floatingip_id)
def update_floatingip(self, context, region, floatingip_id, values):
return self.client.call(context, 'update_floatingip', region=region,
floatingip_id=floatingip_id, values=values)
# Blacklisted Zone Methods
def create_blacklist(self, context, blacklist):
return self.client.call(context, 'create_blacklist',
blacklist=blacklist)
def get_blacklist(self, context, blacklist_id):
return self.client.call(context, 'get_blacklist',
blacklist_id=blacklist_id)
def find_blacklists(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
return self.client.call(
context, 'find_blacklists', criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key, sort_dir=sort_dir)
def find_blacklist(self, context, criterion):
return self.client.call(context, 'find_blacklist', criterion=criterion)
def update_blacklist(self, context, blacklist):
return self.client.call(context, 'update_blacklist',
blacklist=blacklist)
def delete_blacklist(self, context, blacklist_id):
return self.client.call(context, 'delete_blacklist',
blacklist_id=blacklist_id)
# Pool Server Methods
def create_pool(self, context, pool):
return self.client.call(context, 'create_pool', pool=pool)
def find_pools(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
return self.client.call(context, 'find_pools', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_pool(self, context, criterion=None):
return self.client.call(context, 'find_pool', criterion=criterion)
def get_pool(self, context, pool_id):
return self.client.call(context, 'get_pool', pool_id=pool_id)
def update_pool(self, context, pool):
return self.client.call(context, 'update_pool', pool=pool)
def delete_pool(self, context, pool_id):
return self.client.call(context, 'delete_pool', pool_id=pool_id)
# Pool Manager Integration Methods
def update_status(self, context, zone_id, status, serial):
self.client.cast(context, 'update_status', zone_id=zone_id,
status=status, serial=serial)
# Zone Ownership Transfers
def create_zone_transfer_request(self, context, zone_transfer_request):
return self.client.call(
context, 'create_zone_transfer_request',
zone_transfer_request=zone_transfer_request)
def get_zone_transfer_request(self, context, zone_transfer_request_id):
return self.client.call(
context,
'get_zone_transfer_request',
zone_transfer_request_id=zone_transfer_request_id)
def find_zone_transfer_requests(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
return self.client.call(
context, 'find_zone_transfer_requests', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir)
def find_zone_transfer_request(self, context, zone_transfer_request):
return self.client.call(
context, 'find_zone_transfer_request',
zone_transfer_request=zone_transfer_request)
def update_zone_transfer_request(self, context, zone_transfer_request):
return self.client.call(
context, 'update_zone_transfer_request',
zone_transfer_request=zone_transfer_request)
def delete_zone_transfer_request(self, context, zone_transfer_request_id):
return self.client.call(
context,
'delete_zone_transfer_request',
zone_transfer_request_id=zone_transfer_request_id)
def create_zone_transfer_accept(self, context, zone_transfer_accept):
return self.client.call(
context, 'create_zone_transfer_accept',
zone_transfer_accept=zone_transfer_accept)
def get_zone_transfer_accept(self, context, zone_transfer_accept_id):
return self.client.call(
context,
'get_zone_transfer_accept',
zone_transfer_accept_id=zone_transfer_accept_id)
def find_zone_transfer_accepts(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
return self.client.call(
context, 'find_zone_transfer_accepts', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir)
def find_zone_transfer_accept(self, context, zone_transfer_accept):
return self.client.call(
context, 'find_zone_transfer_accept',
zone_transfer_accept=zone_transfer_accept)
def update_zone_transfer_accept(self, context, zone_transfer_accept):
return self.client.call(
context, 'update_zone_transfer_accept',
zone_transfer_accept=zone_transfer_accept)
def delete_zone_transfer_accept(self, context, zone_transfer_accept_id):
return self.client.call(
context,
'delete_zone_transfer_accept',
zone_transfer_accept_id=zone_transfer_accept_id)
def xfr_zone(self, context, zone_id):
return self.client.call(context, 'xfr_zone', zone_id=zone_id)
# Zone Import Methods
def create_zone_import(self, context, request_body):
return self.client.call(context, 'create_zone_import',
request_body=request_body)
def find_zone_imports(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
return self.client.call(context, 'find_zone_imports',
criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_zone_import(self, context, zone_import_id):
return self.client.call(context, 'get_zone_import',
zone_import_id=zone_import_id)
def update_zone_import(self, context, zone_import):
return self.client.call(context, 'update_zone_import',
zone_import=zone_import)
def delete_zone_import(self, context, zone_import_id):
return self.client.call(context, 'delete_zone_import',
zone_import_id=zone_import_id)
# Zone Export Methods
def create_zone_export(self, context, zone_id):
return self.client.call(context, 'create_zone_export',
zone_id=zone_id)
def find_zone_exports(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
return self.client.call(context, 'find_zone_exports',
criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def get_zone_export(self, context, zone_export_id):
return self.client.call(context, 'get_zone_export',
zone_export_id=zone_export_id)
def update_zone_export(self, context, zone_export):
return self.client.call(context, 'update_zone_export',
zone_export=zone_export)
def delete_zone_export(self, context, zone_export_id):
return self.client.call(context, 'delete_zone_export',
zone_export_id=zone_export_id)
def find_service_status(self, context, criterion=None):
return self.client.call(context, 'find_service_status',
criterion=criterion)
def find_service_statuses(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
return self.client.call(context, 'find_service_statuses',
criterion=criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def update_service_status(self, context, service_status):
self.client.cast(context, 'update_service_status',
service_status=service_status)
| 42.85947
| 79
| 0.636096
|
2315ddaaa6c246368e901870ef40f87018c0d644
| 7,605
|
py
|
Python
|
models/nn/convnet.py
|
gdikov/MNIST_Challenge
|
56834aeeaeefc440cb1d9882c95b73b84fe20edf
|
[
"MIT"
] | 1
|
2017-02-15T12:19:46.000Z
|
2017-02-15T12:19:46.000Z
|
models/nn/convnet.py
|
gdikov/MNIST-challenge
|
56834aeeaeefc440cb1d9882c95b73b84fe20edf
|
[
"MIT"
] | 1
|
2017-02-07T12:51:17.000Z
|
2017-02-07T12:51:17.000Z
|
models/nn/convnet.py
|
gdikov/MNIST_Challenge
|
56834aeeaeefc440cb1d9882c95b73b84fe20edf
|
[
"MIT"
] | 1
|
2019-01-22T17:06:03.000Z
|
2019-01-22T17:06:03.000Z
|
from models.model import AbstractModel
from models.nn.layers import *
from numerics.softmax import softmax
import config as cfg
import os
import cPickle
import numpy as np
# from utils.vizualiser import plot_filters
class ConvolutionalNeuralNetwork(AbstractModel):
def __init__(self, convolution_mode='scipy'):
super(ConvolutionalNeuralNetwork, self).__init__('ConvNet')
self.batch_size = cfg.batch_size
if convolution_mode in ['scipy', 'naive']:
self.conv_mode = convolution_mode
else:
raise ValueError
self._build_network()
self.train_history = {'train_loss': [],
'val_acc': []}
def _build_network(self):
"""
Build a modified version of LeNet
:return:
"""
inp_layer = Input()
filter_size = 5
conv1 = Conv(incoming=inp_layer,
conv_params={'stride': 1, 'pad': (filter_size - 1) / 2, 'filter_size': filter_size},
num_filters=20,
conv_mode=self.conv_mode)
relu1 = ReLU(incoming=conv1)
pool1 = Pool(incoming=relu1,
pool_params={'pool_height': 2, 'pool_width': 2, 'stride': 2})
conv2 = Conv(incoming=pool1,
conv_params={'stride': 1, 'pad': (filter_size - 1) / 2, 'filter_size': filter_size},
num_filters=50,
conv_mode=self.conv_mode)
relu2 = ReLU(incoming=conv2)
pool2 = Pool(incoming=relu2, pool_params={'pool_height': 2, 'pool_width': 2, 'stride': 2})
linear1 = Linear(incoming=pool2, num_units=500)
lrelu1 = ReLU(incoming=linear1)
dropout1 = Dropout(incoming=lrelu1, p=0.5)
out_layer = Linear(incoming=dropout1, num_units=10)
self.layers = (inp_layer,
conv1, relu1, pool1,
conv2, relu2, pool2,
linear1, lrelu1,
dropout1,
out_layer)
def save_trainable_params(self):
path_to_params = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pretrained')
if not os.path.exists(path_to_params):
os.makedirs(path_to_params)
for layer_id, layer in enumerate(self.layers):
if layer.params is not None:
with open(os.path.join(path_to_params, 'layer_{0}.npy'.format(layer_id)), 'wb') as f:
cPickle.dump(layer.params, f)
def load_trainable_params(self):
path_to_params = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pretrained')
if not os.path.exists(path_to_params):
os.makedirs(path_to_params)
print("Path to pre-computed parameters not found at: {}".format(path_to_params))
raise IOError
for layer_id, layer in enumerate(self.layers):
if layer.params is not None:
print("\tLoading pre-computed parameters for layer {0}".format(layer_id))
with open(os.path.join(path_to_params, 'layer_{0}.npy'.format(layer_id)), 'rb') as f:
layer.params = cPickle.load(f)
def _compute_forward_pass(self, inp_data_batch, mode):
out_data_batch = self.layers[0].forward(inp_data_batch)
for layer_id in xrange(1, len(self.layers)):
out_data_batch = self.layers[layer_id].forward(out_data_batch, mode=mode)
return out_data_batch
def _compute_backward_pass(self, end_derivatives):
# update the last layer manually
upstream_derivatives = self.layers[-1].backward(end_derivatives)
self.layers[-1].update_trainable_params()
for layer_id in xrange(len(self.layers)-2, 0, -1):
upstream_derivatives = self.layers[layer_id].backward(upstream_derivatives)
self.layers[layer_id].update_trainable_params()
return upstream_derivatives
def _compute_loss(self, scores, targets):
num_train = scores.shape[0]
probabilities = softmax(scores)
loss = -np.sum(np.log(probabilities[np.arange(num_train), targets])) / num_train
probabilities[np.arange(num_train), targets] -= 1
dsoftmax = probabilities / num_train
return loss, dsoftmax
def _batch_idx(self, data_size, shuffle=True):
if shuffle:
# maybe this is unnecessary because they are already shuffled
# but it doesn't harm much to do it again
shuffled_order = np.random.permutation(np.arange(data_size))
else:
shuffled_order = np.arange(data_size)
for x in np.array_split(shuffled_order, data_size // self.batch_size):
yield x
def fit(self, train_data, **kwargs):
# reshape the input so that a channel dimension is added
self.data = train_data
# reshape the images into row vectors of 28*28 elements
num_samples, dim_x, dim_y = self.data['x_train'].shape
self.data['x_train'] = self.data['x_train'].reshape(num_samples, dim_x * dim_y)
num_epochs = kwargs.get('num_epochs', 100)
best_val_acc = 0.0
for i in xrange(num_epochs):
epoch_losses = []
for idx in self._batch_idx(num_samples):
scores = self._compute_forward_pass(self.data['x_train'][idx], mode='train')
loss, dscores = self._compute_loss(scores, self.data['y_train'][idx])
self._compute_backward_pass(dscores)
self.train_history['train_loss'].append(loss)
epoch_losses.append(loss)
print("\t\tMinibatch train loss: {}".format(loss))
# validate
val_predictions = self.predict(self.data['x_val'])
val_acc = np.sum(val_predictions == self.data['y_val']) / float(val_predictions.shape[0]) * 100.
self.train_history['val_acc'].append(val_acc)
if val_acc > best_val_acc:
print("\t\tSaving weights")
self.save_trainable_params()
best_val_acc = val_acc
print("\t\tEpoch: {0}, mean loss: {1}, validation accuracy: {2}".format(i, np.mean(epoch_losses), val_acc))
def predict(self, new_data, **kwargs):
# reshape the input so that a channel dimension is added
# reshape the images into row vectors of 28*28 elements
num_samples, dim_x, dim_y = new_data.shape
new_data = new_data.reshape(num_samples, dim_x * dim_y)
scores_all = []
for i, idx in enumerate(self._batch_idx(num_samples, shuffle=False)):
scores = self._compute_forward_pass(new_data[idx], mode='test')
scores_all.append(scores)
scores_all = np.concatenate(scores_all)
return np.argmax(scores_all, axis=1)
if __name__ == "__main__":
from utils.data_utils import load_MNIST
data_train, data_test = load_MNIST()
model = ConvolutionalNeuralNetwork()
model.load_trainable_params()
# plot_filters(model.layers[1].params['W'], plot_shape=(2,10), channel=1)
# model.fit(data, num_epochs=100)
predictions = model.predict(data_train['x_val'])
test_acc = np.sum(predictions == data_train['y_val']) / float(predictions.shape[0]) * 100.
print("Validation accuracy: {0}"
.format(test_acc))
#
# miscalssified_idx = predictions != data['y_val'][:100]
# from utils.vizualiser import plot_digits
# #
# plot_digits(data['x_val'][:100][miscalssified_idx][:64], predictions[miscalssified_idx][:64], plot_shape=(8, 8))
| 40.238095
| 119
| 0.620644
|
37af50eba96b794ed6534cb2f8db62996566cf05
| 1,241
|
py
|
Python
|
algospot/lec9_DP/[cutz]blockgame.py
|
cutz-j/AlgorithmStudy
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
[
"MIT"
] | 3
|
2019-11-26T14:31:01.000Z
|
2020-01-10T18:19:46.000Z
|
algospot/lec9_DP/[cutz]blockgame.py
|
cutz-j/AlgorithmStudy
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
[
"MIT"
] | null | null | null |
algospot/lec9_DP/[cutz]blockgame.py
|
cutz-j/AlgorithmStudy
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
[
"MIT"
] | null | null | null |
import sys
move = []
def cell(y, x):
return 1 << (y*5 + x)
def precalc():
for y in range(4):
for x in range(4):
cells = []
for dy in range(2):
for dx in range(2):
cells.append(cell(y+dy, x+dx))
square = cells[0] + cells[1] + cells[2] + cells[3]
for i in range(4):
move.append(square - cells[i])
for i in range(5):
for j in range(4):
move.append(cell(i, j) + cell(i, j+1))
move.append(cell(j, i) + cell(j+1, i))
def play(board):
if cache.get(board, -1) != -1:
return cache[board]
ret = 0
for i in range(len(move)):
if move[i] & board == 0:
if play(board | move[i]) == 0:
ret = 1
break
cache[board] = ret
return ret
#rl = lambda : sys.stdin.readline()
rl = input
C = int(rl())
precalc()
for _ in range(C):
board = 0
cache = {}
for i in range(5):
row = rl()
for j in range(5):
if row[j] == '#':
board += cell(i, j)
result = play(board)
print('WINNING') if result else print("LOSSING")
| 22.563636
| 62
| 0.438356
|
ee11e06723f1648a5717bda7666d7446edee7f89
| 328
|
py
|
Python
|
Python Tutorial Beginner/16_Read_file.py
|
PaulPan00/donkey_wrapper
|
a03cf0f42f65625fbce792b06c98acd153c5d6c8
|
[
"MIT"
] | 6
|
2021-03-26T01:42:31.000Z
|
2021-04-11T16:17:42.000Z
|
Python Tutorial Beginner/16_Read_file.py
|
packetsss/Python
|
a03cf0f42f65625fbce792b06c98acd153c5d6c8
|
[
"MIT"
] | null | null | null |
Python Tutorial Beginner/16_Read_file.py
|
packetsss/Python
|
a03cf0f42f65625fbce792b06c98acd153c5d6c8
|
[
"MIT"
] | 7
|
2021-04-06T06:55:22.000Z
|
2021-05-03T11:26:38.000Z
|
# Create by Packetsss
# Personal use is allowed
# Commercial use is prohibited
file = open("Ex.txt", "r")
# print(file.readable())
# print(file.read())
print(file.readlines()[0])
# for employee in file.readlines():
# print(employee)
file.close()
file = open("Ex2.txt", "w")
file.write("\nWhy am I sad?")
file.close()
| 15.619048
| 35
| 0.658537
|
3d77336188d792ba2f715e760aebf2476fdcca02
| 270
|
py
|
Python
|
tests/test_crv/crv_test_main.py
|
jonpovey/cocotb-coverage
|
8a8826a68af261a4195f9c315a59d8eb913bb24e
|
[
"BSD-2-Clause"
] | 70
|
2018-11-28T12:06:18.000Z
|
2022-02-23T20:36:12.000Z
|
tests/test_crv/crv_test_main.py
|
jonpovey/cocotb-coverage
|
8a8826a68af261a4195f9c315a59d8eb913bb24e
|
[
"BSD-2-Clause"
] | 58
|
2018-12-19T12:08:53.000Z
|
2022-01-27T07:45:36.000Z
|
tests/test_crv/crv_test_main.py
|
jonpovey/cocotb-coverage
|
8a8826a68af261a4195f9c315a59d8eb913bb24e
|
[
"BSD-2-Clause"
] | 16
|
2018-12-26T22:59:48.000Z
|
2022-03-21T06:21:47.000Z
|
import cocotb
import pytest
import crv_test
from cocotb.triggers import Timer
from cocotb.result import TestFailure
@cocotb.test()
def test_crv(dut):
exitcode = pytest.main()
if exitcode != pytest.ExitCode.OK:
raise TestFailure()
yield Timer(1000)
| 19.285714
| 38
| 0.733333
|
4535fac78da8dbf8a5d0934e7f0b901c40b6e12c
| 7,119
|
py
|
Python
|
ogr/factory.py
|
pawelkopka/ogr
|
a88e9bbc76fe1476477165de496ab458e310d5ee
|
[
"MIT"
] | null | null | null |
ogr/factory.py
|
pawelkopka/ogr
|
a88e9bbc76fe1476477165de496ab458e310d5ee
|
[
"MIT"
] | 5
|
2021-04-08T22:01:19.000Z
|
2022-02-10T12:21:29.000Z
|
ogr/factory.py
|
pawelkopka/ogr
|
a88e9bbc76fe1476477165de496ab458e310d5ee
|
[
"MIT"
] | 1
|
2020-01-25T14:40:32.000Z
|
2020-01-25T14:40:32.000Z
|
# MIT License
#
# Copyright (c) 2018-2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
from typing import Dict, Type, Optional, Set, Iterable
from ogr.abstract import GitService, GitProject
from ogr.exceptions import OgrException
from ogr.parsing import parse_git_repo
_SERVICE_MAPPING: Dict[str, Type[GitService]] = {}
def use_for_service(service: str, _func=None):
"""
Class decorator that adds the class to the service mapping.
When the project url contains the `service` as a substring,
this implementation will be used to initialize the project.
When using this decorator, be sure that your class is initialized.
(Add the import to ogr/__init__.py )
Usage:
@use_for_service("github.com")
class GithubService(BaseGitService):
pass
@use_for_service("pagure.io")
@use_for_service("src.fedoraproject.org")
class PagureService(BaseGitService):
pass
:param service: str (url of the service)
"""
def decorator_cover(func):
@functools.wraps(func)
def covered_func(kls: Type[GitService]):
_SERVICE_MAPPING[service] = kls
return kls
return covered_func
return decorator_cover(_func)
def get_project(
url,
service_mapping_update: Dict[str, Type[GitService]] = None,
custom_instances: Iterable[GitService] = None,
**kwargs,
) -> GitProject:
"""
Return the project for the given url.
:param url: str (url of the project, e.g. "https://github.com/packit-service/ogr")
:param service_mapping_update: custom mapping from service url (str) to service class
:param custom_instances: list of instances that will be used when creating a project instance
:param kwargs: arguments forwarded to __init__ of the matching service
:return: GitProject using the matching implementation
"""
kls = get_service_class(url=url, service_mapping_update=service_mapping_update)
if custom_instances:
for service_inst in custom_instances:
if isinstance(service_inst, kls) and service_inst.instance_url in url:
service = service_inst
break
else:
raise OgrException(
f"Instance of type {kls.__name__} "
f"matching instance url '{url}' was not provided."
)
else:
repo_url = parse_git_repo(potential_url=url)
service = kls(instance_url=repo_url.get_instance_url(), **kwargs)
project = service.get_project_from_url(url=url)
return project
def get_service_class_or_none(
url: str, service_mapping_update: Dict[str, Type[GitService]] = None
) -> Optional[Type[GitService]]:
"""
Get the matching service class from the url.
:param url: str (url of the project, e.g. "https://github.com/packit-service/ogr")
:param service_mapping_update: custom mapping from service url (str) to service class
:return: Matched class (subclass of GitService) or None
"""
mapping = {}
mapping.update(_SERVICE_MAPPING)
if service_mapping_update:
mapping.update(service_mapping_update)
for service, service_kls in mapping.items():
if service in url:
return service_kls
return None
def get_service_class(
url: str, service_mapping_update: Dict[str, Type[GitService]] = None
) -> Type[GitService]:
"""
Get the matching service class from the url.
:param url: str (url of the project, e.g. "https://github.com/packit-service/ogr")
:param service_mapping_update: custom mapping from service url (str) to service class
:return: Matched class (subclass of GitService)
"""
service_kls = get_service_class_or_none(
url=url, service_mapping_update=service_mapping_update
)
if service_kls:
return service_kls
raise OgrException("No matching service was found.")
def get_instances_from_dict(instances: dict) -> Set[GitService]:
"""
Load the service instances from the dictionary in the following form:
key = hostname, url or name that can be mapped to the service-type
value = dictionary with arguments used when creating a new instance of the service
(passed to the `__init__` method)
e.g.:
{
"github.com": {"token": "abcd"},
"pagure": {
"token": "abcd",
"instance_url": "https://src.fedoraproject.org",
},
},
=> {
GithubService(token="abcd"),
PagureService(token="abcd", instance_url="https://src.fedoraproject.org")
}
When the mapping key->service-type is not recognised, you can add a `type` key to the dictionary
and specify the type of the instance.
(It can be either name, hostname or url. The used mapping is same as for key->service-type.)
The provided `key` is used as an `instance_url` and passed to the `__init__` method as well.
e.g.:
{
"https://my.gtlb": {"token": "abcd", "type": "gitlab"},
},
=> {GitlabService(token="abcd", instance_url="https://my.gtlb")}
:param instances: mapping from service name/url/hostname to attributes for the service creation
:return: set of the service instances
"""
services = set()
for key, value in instances.items():
service_kls = get_service_class_or_none(url=key)
if not service_kls:
if "type" not in value:
raise OgrException(
f"No matching service was found for url '{key}'. "
f"Add the service name as a `type` attribute."
)
service_type = value["type"]
if service_type not in _SERVICE_MAPPING:
raise OgrException(
f"No matching service was found for type '{service_type}'."
)
service_kls = _SERVICE_MAPPING[service_type]
value.setdefault("instance_url", key)
del value["type"]
service_instance = service_kls(**value)
services.add(service_instance)
return services
| 35.068966
| 100
| 0.674673
|
5f60e20d2a72125ff2323de58f084a5c48b99486
| 37,083
|
py
|
Python
|
run_classifier.py
|
cospplay/bert-master
|
433923003ebfb60193ff5f9f81e4d96ad3d83f1b
|
[
"Apache-2.0"
] | 5
|
2021-04-16T01:44:49.000Z
|
2022-01-11T01:10:17.000Z
|
run_classifier.py
|
cospplay/bert-master
|
433923003ebfb60193ff5f9f81e4d96ad3d83f1b
|
[
"Apache-2.0"
] | null | null | null |
run_classifier.py
|
cospplay/bert-master
|
433923003ebfb60193ff5f9f81e4d96ad3d83f1b
|
[
"Apache-2.0"
] | 2
|
2021-05-13T07:35:33.000Z
|
2022-01-06T06:27:30.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import pandas as pd
import modeling
import optimization
import tokenization
import tensorflow as tf
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class MyDataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
file_path=os.path.join(data_dir,'train_sentiment_2.txt')
f=open(file_path,'r',encoding='utf-8')
train_data=[]
index=0
for line in f.readlines():
guid = "train-%d" % (index)
line=line.replace('\n','').split('\t')
text_a = tokenization.convert_to_unicode(str(line[1]))
label=str(line[2])
train_data.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label)
)
index+=1
return train_data
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
file_path = os.path.join(data_dir, 'test_sentiment.txt')
f = open(file_path, 'r', encoding='utf-8')
train_data = []
index = 0
for line in f.readlines():
guid = "dev-%d" % (index)
line = line.replace('\n', '').split('\t')
text_a = tokenization.convert_to_unicode(str(line[1]))
label = str(line[2])
train_data.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label)
)
index += 1
return train_data
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
file_path = os.path.join(data_dir, 'test.csv')
test_df = pd.read_csv(file_path, 'r', encoding='utf-8')
test_data = []
for index,test in enumerate(test_df.values):
guid = "test-%d" % (index)
line = line.replace('\n', '').split('\t')
text_a = tokenization.convert_to_unicode(str(line[0]))
label = str(line[1])
test_data.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label)
)
return test_data
def get_labels(self):
"""Gets the list of labels for this data set."""
return ['0', "1", "2"]
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
#数据处理
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}#构建标签0,1,2
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)#判断当前样本是由几句话构成,如果是多句切分,第一句话分词
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"#有b保留三个特殊字符 标签 连接符 结束符
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"#无b保留2个特殊字符 标签 结束符
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1表示属于哪一句话0代表第一句话,1代表第二句
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)#将所有结果转化为id映射
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)#对所有实际的值mask设1,填充值设0
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)#将数据写入到TFRecord中,加快后续读数据速度
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
'myda': MyDataProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)#读数据
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)#计算迭代次数
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)#令初始学习率偏小
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 35.486124
| 94
| 0.678775
|
9b9d3d5b3fe80247642d962edd6fb787537d01d6
| 102
|
py
|
Python
|
mmseg/models/necks/__init__.py
|
HeqingZhang/mmsegmentation
|
90d8038e909be9f2154b49d15f95a648ceb75120
|
[
"Apache-2.0"
] | 367
|
2022-01-14T03:32:25.000Z
|
2022-03-31T04:48:20.000Z
|
mmseg/models/necks/__init__.py
|
HeqingZhang/mmsegmentation
|
90d8038e909be9f2154b49d15f95a648ceb75120
|
[
"Apache-2.0"
] | 27
|
2022-01-27T07:12:49.000Z
|
2022-03-31T04:31:13.000Z
|
mmseg/models/necks/__init__.py
|
HeqingZhang/mmsegmentation
|
90d8038e909be9f2154b49d15f95a648ceb75120
|
[
"Apache-2.0"
] | 53
|
2022-01-18T11:21:43.000Z
|
2022-03-31T06:42:41.000Z
|
from .fpn import FPN
from .multilevel_neck import MultiLevelNeck
__all__ = ['FPN', 'MultiLevelNeck']
| 20.4
| 43
| 0.77451
|
5a71707e864e946c3a98706efc7d211f16a86868
| 4,673
|
py
|
Python
|
lib/python2.7/site-packages/appionlib/apTomoFullRecon.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/appionlib/apTomoFullRecon.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/appionlib/apTomoFullRecon.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | 1
|
2019-09-05T20:58:37.000Z
|
2019-09-05T20:58:37.000Z
|
#!/usr/bin/env python
import os
import shutil
import subprocess
#appion
from appionlib import apTomoMakerBase
from appionlib import apImod
from appionlib import apTomo
from appionlib import apParam
from appionlib import apDisplay
#=====================
#=====================
class ImodMaker(apTomoMakerBase.TomoMaker):
#=====================
def checkConflicts(self):
super(ImodMaker,self).checkConflicts()
if self.params['method'] not in self.methods:
apDisplay.printError("No valid recon method specified")
#=====================
def postProcessingRecon(self):
# Full tomogram created with imod is left-handed XZY
voltransform = 'flipx'
origtomopath = os.path.join(self.params['rundir'], self.seriesname+"_full.rec")
currenttomopath = apImod.transformVolume(origtomopath,voltransform)
shutil.move(currenttomopath, origtomopath)
class ImodFullMaker(ImodMaker):
def setMethod(self):
self.params['method'] = 'imodwbp'
def recon3D(self):
processdir = self.params['rundir']
stackdir = self.params['tiltseriesdir']
bin = self.params['bin']
# Create Aligned Stack
apImod.createAlignedStack(stackdir, processdir, self.seriesname,bin)
# Reconstruction
apImod.recon3D(stackdir, processdir, self.seriesname, self.imgshape, self.params['thickness']/bin, False, self.excludelist)
class ETomoMaker(ImodMaker):
def setMethod(self):
self.params['method'] = 'etomo'
def setupParserOptions(self):
#super(ETomoMaker,self).setupParserOptions()
self.parser.add_option("-s", "--session", dest="sessionname",
help="Session name (e.g. 06mar12a)", metavar="SESSION")
self.parser.add_option("--samplerunid", dest="samplerunid", type="int",
help="Runid of the recon sample generation, e.g. --samplerunid=2", metavar="int")
self.parser.remove_option("--runname")
self.parser.remove_option("--rundir")
return
def checkConflicts(self):
self.rundata = apTomo.getFullTomoRunById(self.params['samplerunid'])
if self.rundata['method'] != 'etomo':
apDisplay.printError('The fulltomoram run is not made for ETOMO manual reconstruction')
paramfile = os.path.join(self.rundata['path']['path'],'%s_sample.params' % (self.rundata['runname']))
sampleparams = apParam.readRunParameters(paramfile)
self.params['alignerid'] = sampleparams['alignerid']
self.params['description'] = sampleparams['description'] + '\n' + self.params['description']
self.params['method'] = self.rundata['method']
self.params['runname'] = self.rundata['runname']
self.params['rundir'] = self.rundata['path']['path']
def setupExcludeList(self):
self.params['exclude'] = apImod.getETomoExcludeTiltNumber(self.params['rundir'])
super(ETomoMaker,self).setupExcludeList()
def createTransformFile(self):
pass
def prepareRecon(self):
pass
def recon3D(self):
proc = subprocess.Popen("etomo --debug --fg %s.edf" % (self.seriesname), shell=True)
proc.wait()
reconfilepath = os.path.join(self.params['rundir'],'%s_full.rec' % (self.seriesname))
if not os.path.exists(reconfilepath):
apDisplay.printError('%s not generated, can not commit to database.' % (reconfilepath))
def getReconParams(self):
tilt_angle_offset = float(apImod.getETomoParam(self.params['rundir'], 'tilt.com', ['OFFSET'])[0])
z_shift = apImod.getImodZShift(self.params['rundir'])
tilt_axis_tilt = float(apImod.getETomoParam(self.params['rundir'], 'tilt.com', ['XAXISTILT'])[0])
image_rotation = float(apImod.getETomoParam(self.params['rundir'], self.seriesname+'.edf', ['Setup.ImageRotationA='])[0])
return apTomo.insertFullReconParams(tilt_angle_offset,z_shift,tilt_axis_tilt,image_rotation)
def commitToDatabase(self):
self.params['bin'] = apImod.getETomoBin(self.params['rundir'],self.seriesname)
self.params['thickness'] = apImod.getETomoThickness(self.params['rundir'],self.seriesname)
super(ETomoMaker,self).commitToDatabase()
def onClose(self):
if self.fulltomodata:
apDisplay.printMsg('------------------------')
apDisplay.printWarning('To create sub tomogram reconstruction and commit the result to database with this full tomogram, you need to use etomo_subrecon.py to start eTOMO and continue at "Post-Processing" with the .edf file by running this AppionScript:')
apDisplay.printColor('etomo_subrecon.py --session=%s --projectid=%d --fulltomoid=%d --description="" --commit --expId=%d --jobtype=%s --runname=etomosub' % (self.params['sessionname'],self.params['projectid'],self.fulltomodata.dbid,self.params['expid'],'etomo_subrecon'),'cyan')
apDisplay.printMsg('------------------------')
#=====================
#=====================
if __name__ == '__main__':
app = tomoMaker()
app.start()
app.close()
| 41.723214
| 281
| 0.71453
|
661f41153f5f22bb6be87ca3f946d5174fe13ef1
| 1,707
|
py
|
Python
|
forecast_dataset_to_stories.py
|
Cyntwikip/PreSumm
|
9abb00ca9910e210fe786c5a2805821a81ceca73
|
[
"MIT"
] | null | null | null |
forecast_dataset_to_stories.py
|
Cyntwikip/PreSumm
|
9abb00ca9910e210fe786c5a2805821a81ceca73
|
[
"MIT"
] | null | null | null |
forecast_dataset_to_stories.py
|
Cyntwikip/PreSumm
|
9abb00ca9910e210fe786c5a2805821a81ceca73
|
[
"MIT"
] | null | null | null |
import pandas as pd
import re, os
import click
from tqdm import tqdm
def split_lines(text):
text = [' '.join(i.split()) for i in re.split(r'\n{2,}', text)]
text = [i for i in text if i]
return text
def preprocess(file, forecast_folder):
# file = '~/notebooks/Cognitive_Search/sash/data/feb_20/ulm_forecasts.csv'
df = pd.read_csv(file, usecols=[2,4,5])
df['reference_id'] = df['reference_id'].apply(lambda x: 0 if x!=x else x).astype(int)
df = df.where(df['isLesson']==1).dropna()
df.drop('isLesson', axis=1, inplace=True)
df['paragraph'] = df['paragraph'].apply(split_lines)
df = df.reset_index(drop=True)
df['reference_id'] = df['reference_id'].astype(int)
df['title'] = df[['reference_id']].apply(lambda x: f'dummy lesson number {x.name} - {x[0]}', axis=1)
# path_story = '../Presumm2/PreSumm/raw_data/eva_forecast_02_21_2020/'
path_story = './raw_data/{}/'.format(forecast_folder)
if not os.path.isdir(path_story):
print('Path does not exist...')
print('Creating folder...')
os.mkdir(path_story)
for idx, rows in tqdm(df.iterrows()):
fn = '{:05} - {}.story'.format(idx, rows['reference_id'])
content = rows['paragraph'] + ['@highlight', rows['title']]
content = '\n\n'.join(content)
with open(path_story+fn, 'w+') as f:
f.write(content)
return
@click.group()
def cli():
pass
@cli.command()
@click.argument('filename')
@click.argument('forecast_folder')
def convert(filename, forecast_folder):
print(f'Converting {filename} to {forecast_folder}')
preprocess(filename, forecast_folder)
return
if __name__=='__main__':
cli()
| 31.611111
| 104
| 0.632689
|
2f73df2075b6c7be91ff7129344df378a4379f5f
| 154,425
|
py
|
Python
|
Tests/test_number.py
|
Xen0byte/ironpython3
|
2e5bb6025d01ab52ea4bfbaf6fd7e0fec8ec0194
|
[
"Apache-2.0"
] | null | null | null |
Tests/test_number.py
|
Xen0byte/ironpython3
|
2e5bb6025d01ab52ea4bfbaf6fd7e0fec8ec0194
|
[
"Apache-2.0"
] | null | null | null |
Tests/test_number.py
|
Xen0byte/ironpython3
|
2e5bb6025d01ab52ea4bfbaf6fd7e0fec8ec0194
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import sys
import unittest
from iptest import IronPythonTestCase, is_cli, big, run_test, skipUnlessIronPython
def get_builtins_dict():
if type(__builtins__) is type(sys):
return __builtins__.__dict__
return __builtins__
class complextest:
def __init__(self, value): self.value = value
def __float__(self) : return self.value
class myfloat(float): pass
class SillyLong(int):
def __rmul__(self, other):
return big(42)
templates1 = [ "C(%s) %s C(%s)", "C2(%s) %s C2(%s)",
"C(%s) %s D(%s)", "D(%s) %s C(%s)",
"C2(%s) %s D(%s)", "D(%s) %s C2(%s)",
"C(%s) %s D2(%s)", "D2(%s) %s C(%s)",
"C2(%s) %s D2(%s)", "D2(%s) %s C2(%s)"]
templates2 = [x for x in templates1 if x.startswith('C')]
if is_cli:
from System import *
class NumberTest(IronPythonTestCase):
def setUp(self):
super(NumberTest, self).setUp()
self.load_iron_python_test()
def test_complex(self):
self.assertEqual(complex(complextest(2.0)), 2+0j)
self.assertEqual(complex(complextest(myfloat(2.0))), 2+0j)
self.assertRaises(TypeError, complex, complextest(2))
def test_silly_long(self):
self.assertEqual(1 * SillyLong(2), 42)
self.assertEqual(SillyLong(2) * 1, 2)
if is_cli:
self.assertEqual((1).__mul__(SillyLong(2)), NotImplemented)
@skipUnlessIronPython()
def test_clr(self):
self.assertTrue(Single.IsInfinity(Single.PositiveInfinity))
self.assertTrue(not Single.IsInfinity(1.0))
x = [333, 1234.5, 1, 333, -1, 66.6]
x.sort()
self.assertTrue(x == [-1, 1, 66.6, 333, 333, 1234.5])
self.assertTrue(10 < 76927465928764592743659287465928764598274369258736489327465298374695287346592837496)
self.assertTrue(76927465928764592743659287465928764598274369258736489327465298374695287346592837496 > 10)
x = 3e1000
self.assertTrue(Double.IsInfinity(x))
self.assertTrue(Double.IsPositiveInfinity(x))
x = -3e1000
self.assertTrue(Double.IsInfinity(x))
self.assertTrue(Double.IsNegativeInfinity(x))
x = 3e1000 - 3e1000
self.assertTrue(Double.IsNaN(x))
f_x = "4.75"
f_y = "3.25"
i_x = "4"
i_y = "3"
def Parse(type, value):
return type.Parse(value, Globalization.CultureInfo.InvariantCulture.NumberFormat)
def VerifyTypes(v):
self.assertEqual(str(v.x.GetType()), v.n)
self.assertEqual(str(v.y.GetType()), v.n)
class float:
def __init__(self, type, name):
self.x = Parse(type, f_x)
self.y = Parse(type, f_y)
self.n = name
class fixed:
def __init__(self, type, name):
self.x = type.Parse(i_x)
self.y = type.Parse(i_y)
self.n = name
s = float(Single, "System.Single")
d = float(Double, "System.Double")
sb = fixed(SByte, "System.SByte")
sh = fixed(Int16, "System.Int16")
i = fixed(Int32, "System.Int32")
l = fixed(Int64, "System.Int64")
ub = fixed(Byte, "System.Byte")
ui = fixed(UInt32, "System.UInt32")
ul = fixed(UInt64, "System.UInt64")
def float_test(x,y):
self.assertTrue(x + y == y + x)
self.assertTrue(x * y == y * x)
self.assertTrue(x / y == x / y)
self.assertTrue(x % y == x % y)
self.assertTrue(x - y == -(y - x))
self.assertTrue(x ** y == x ** y)
self.assertTrue(x // y == x // y)
z = x
z /= y
self.assertTrue(z == x / y)
z = x
z *= y
self.assertTrue(z == x * y)
z = x
z %= y
self.assertTrue(z == x % y)
z = x
z += y
self.assertTrue(z == x + y)
z = x
z -= y
self.assertTrue(z == x - y)
z = x
z **= y
self.assertTrue(z == x ** y)
z = x
z //= y
self.assertTrue(z == x // y)
self.assertTrue((x < y) == (not (x >= y)))
self.assertTrue((x <= y) == (not (x > y)))
self.assertTrue((x > y) == (not (x <= y)))
self.assertTrue((x >= y) == (not (x < y)))
self.assertTrue((x != y) == (not (x == y)))
self.assertEqual((x == y), (y == x))
self.assertTrue((x == y) == (y == x))
self.assertTrue((x == y) == (not (x != y)))
def type_test(tx, ty):
x = tx.x
y = ty.y
float_test(x,x)
float_test(x,y)
float_test(y,y)
float_test(y,x)
test_types = [s,d,i,l]
# BUG 10 : Add support for unsigned integer types (and other missing data types)
#test_types = [s,d,i,l,sb,sh,ub,ui,ul]
# /BUG
for a in test_types:
VerifyTypes(a)
for b in test_types:
VerifyTypes(b)
type_test(a, b)
type_test(b, a)
@skipUnlessIronPython()
def test_conversions(self):
"""implicit conversions (conversion defined on Derived)"""
from IronPythonTest import Base, Base2, ConversionStorage, Derived, Derived2
a = ConversionStorage()
b = Base(5)
d = Derived(23)
a.Base = d
self.assertEqual(a.Base.value, d.value)
a.Derived = d
self.assertEqual(a.Derived.value, d.value)
a.Base = b
self.assertEqual(a.Base.value, b.value)
def assignBaseToDerived(storage, base):
storage.Derived = base
self.assertRaises(TypeError, assignBaseToDerived, a, b)
# implicit conversions (conversion defined on base)
a = ConversionStorage()
b = Base2(5)
d = Derived2(23)
a.Base2 = d
self.assertEqual(a.Base2.value, d.value)
a.Derived2 = d
self.assertEqual(a.Derived2.value, d.value)
a.Base2 = b
self.assertEqual(a.Base2.value, b.value)
def assignBaseToDerived(storage, base):
storage.Derived2 = base
self.assertRaises(TypeError, assignBaseToDerived, a, b)
class myFakeInt:
def __int__(self):
return 23
class myFakeLong:
def __int__(self):
return big(23)
class myFakeComplex:
def __complex__(self):
return 0j + 23
class myFakeFloat:
def __float__(self):
return 23.0
class myNegative:
def __pos__(self):
return 23
self.assertEqual(int(myFakeInt()), 23)
self.assertEqual(int(myFakeLong()), 23)
self.assertEqual(complex(myFakeComplex()), 0j + 23)
self.assertEqual(get_builtins_dict()['float'](myFakeFloat()), 23.0) # we redefined float above, go directly to the real float...
self.assertEqual(+myNegative(), 23)
# True/False and None... They shouldn't convert to each other, but
# a truth test against none should always be false.
self.assertEqual(False == None, False)
self.assertEqual(True == None, False)
self.assertEqual(None == False, False)
self.assertEqual(None == True, False)
if None: self.fail("Unreachable code reached: none shouldn't be true")
a = None
if a: self.assertEqual(False, True)
self.assertEqual(int(Single.Parse("3.14159")), 3)
#TODO: @skip("interpreted") #Too slow
def test_operators(self):
def operator_add(a, b) :
return a + b
def test_add(a,b,c):
self.assertTrue(c == b + a)
self.assertTrue(a + b == c)
self.assertTrue(c - a == b)
self.assertTrue(c - b == a)
def operator_sub(a, b) :
return a - b
def test_sub(a,b,c):
self.assertTrue(c == -(b - a))
self.assertTrue(c == a - b)
self.assertTrue(a == b + c)
self.assertTrue(b == a - c)
def operator_mul(a, b) :
return a * b
def test_mul(a,b,c):
self.assertTrue(c == a * b)
self.assertTrue(c == b * a)
if a != 0:
self.assertTrue(b == c // a)
if b != 0:
self.assertTrue(a == c // b)
def operator_div(a, b) :
if b != 0:
return a // b
def test_div(a,b,c):
if b != 0:
#print(a,b,c)
self.assertTrue(a // b == c, '%s == %s' % (a//b, c))
self.assertTrue(((c * b) + (a % b)) == a)
def operator_mod(a, b) :
if b != 0:
return a % b
def test_mod(a,b,c):
if b != 0:
self.assertTrue(a % b == c)
self.assertTrue((a // b) * b + c == a)
self.assertTrue((a - c) % b == 0)
def operator_and(a, b) :
return a & b
def test_and(a,b,c):
self.assertTrue(a & b == c)
self.assertTrue(b & a == c)
def operator_or(a, b) :
return a | b
def test_or(a,b,c):
self.assertTrue(a | b == c)
self.assertTrue(b | a == c)
def operator_xor(a, b) :
return a ^ b
def test_xor(a,b,c):
self.assertTrue(a ^ b == c)
self.assertTrue(b ^ a == c)
pats = [big(0), big(1), big(42), big(0x7fffffff), big(0x80000000), big(0xabcdef01), big(0xffffffff)]
nums = []
for p0 in pats:
for p1 in pats:
#for p2 in pats:
n = p0+(p1<<32)
nums.append(n)
nums.append(-n)
bignums = []
for p0 in pats:
for p1 in pats:
for p2 in pats:
n = p0+(p1<<32)+(p2<<64)
bignums.append(n)
bignums.append(-n)
ops = [
('/', operator_div, test_div),
('+', operator_add, test_add),
('-', operator_sub, test_sub),
('*', operator_mul, test_mul),
('%', operator_mod, test_mod),
('&', operator_and, test_and),
('|', operator_or, test_or),
('^', operator_xor, test_xor),
]
def test_it_all(nums):
for sym, op, test in ops:
for x in nums:
for y in nums:
z = op(x, y)
try:
test(x,y,z)
except get_builtins_dict()['Exception'] as e:
print(x, " ", sym, " ", y, " ", z, "Failed")
print(e)
raise
test_it_all(bignums)
test_it_all(nums)
def scenarios_helper(self, templates, cmps, gbls, lcls):
values = [3.5, 4.5, 4, 0, big(-200), 12345678901234567890]
for l in values:
for r in values:
for t in templates:
for c in cmps:
easy = t % (l, c, r)
# need to compare the real values the classes hold,
# not the values we expect them to hold, incase truncation
# has occured
easy = easy.replace(')', ').value')
inst = t % (l, c, r)
#print inst, eval(easy), eval(inst)
self.assertTrue(eval(easy, gbls, lcls) == eval(inst, gbls, lcls), "%s == %s" % (easy, inst))
def test_usertype_cd(self):
"""UserType: both C and D define __lt__"""
class C(object):
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
class D(object):
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
class C2(C): pass
class D2(D): pass
self.scenarios_helper(templates1, ["<", ">"], globals(), locals())
def test_usertype_c(self):
"""UserType: C defines __lt__, D does not"""
class C(object):
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
class D(object):
def __init__(self, value):
self.value = value
class C2(C): pass
class D2(D): pass
self.scenarios_helper(templates2, ["<"], globals(), locals())
@skipUnlessIronPython()
def test_comparisions(self):
from IronPythonTest import ComparisonTest
def comparisons_helper(typeObj):
def assertEqual(first, second):
self.assertEqual(first,second)
def assertTrue(arg):
self.assertTrue(arg)
class Callback:
called = False
def __call__(self, value):
#print value, expected
assertEqual(value, expected)
self.called = True
def check(self):
assertTrue(self.called)
self.called = False
cb = Callback()
ComparisonTest.report = cb
values = [3.5, 4.5, 4, 0]
for l in values:
for r in values:
ctl = typeObj(l)
ctr = typeObj(r)
self.assertEqual(str(ctl), "ct<%s>" % str(l))
self.assertEqual(str(ctr), "ct<%s>" % str(r))
expected = "< on [ct<%s>, ct<%s>]" % (l, r)
self.assertEqual(ctl < ctr, l < r)
cb.check()
expected = "> on [ct<%s>, ct<%s>]" % (l, r)
self.assertEqual(ctl > ctr, l > r)
cb.check()
expected = "<= on [ct<%s>, ct<%s>]" % (l, r)
self.assertEqual(ctl <= ctr, l <= r)
cb.check()
expected = ">= on [ct<%s>, ct<%s>]" % (l, r)
self.assertEqual(ctl >= ctr, l >= r)
cb.check()
class ComparisonTest2(ComparisonTest): pass
comparisons_helper(ComparisonTest)
comparisons_helper(ComparisonTest2)
class C:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __gt__(self, other):
return self.value > other.value
class C2(C): pass
D = ComparisonTest
D2 = ComparisonTest2
self.scenarios_helper(templates1, ["<", ">"], globals(), locals())
def cmp(a, b): return (a > b) - (a < b)
ComparisonTest.report = None
self.assertTrue(cmp(ComparisonTest(5), ComparisonTest(5)) == 0)
self.assertTrue(cmp(ComparisonTest(5), ComparisonTest(8)) == -1)
self.assertTrue(cmp(ComparisonTest2(50), ComparisonTest(8)) == 1)
@skipUnlessIronPython()
def test_ipt_integertest(self):
def f():
self.assertTrue(it.self.assertEqual(it.UInt32Int32MaxValue,it.uintT(it.Int32Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Int32MaxValue,it.ulongT(it.Int32Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int32MaxValue,it.intT(it.Int32Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int32MaxValue,it.longT(it.Int32Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int32MinValue,it.intT(it.Int32Int32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int32MinValue,it.longT(it.Int32Int32MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32Int32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Int16MaxValue,it.uintT(it.Int32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Int16MaxValue,it.ushortT(it.Int32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Int16MaxValue,it.ulongT(it.Int32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int16MaxValue,it.intT(it.Int32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Int16MaxValue,it.shortT(it.Int32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int16MaxValue,it.longT(it.Int32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.CharInt16MaxValue,it.charT(it.Int32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int16MinValue,it.intT(it.Int32Int16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Int16MinValue,it.shortT(it.Int32Int16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int16MinValue,it.longT(it.Int32Int16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32Int16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32CharMaxValue,it.uintT(it.Int32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16CharMaxValue,it.ushortT(it.Int32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64CharMaxValue,it.ulongT(it.Int32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32CharMaxValue,it.intT(it.Int32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64CharMaxValue,it.longT(it.Int32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.CharCharMaxValue,it.charT(it.Int32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32ByteMaxValue,it.uintT(it.Int32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16ByteMaxValue,it.ushortT(it.Int32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64ByteMaxValue,it.ulongT(it.Int32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32ByteMaxValue,it.intT(it.Int32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16ByteMaxValue,it.shortT(it.Int32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64ByteMaxValue,it.longT(it.Int32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteByteMaxValue,it.byteT(it.Int32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharByteMaxValue,it.charT(it.Int32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32SByteMaxValue,it.uintT(it.Int32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16SByteMaxValue,it.ushortT(it.Int32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64SByteMaxValue,it.ulongT(it.Int32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32SByteMaxValue,it.intT(it.Int32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16SByteMaxValue,it.shortT(it.Int32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64SByteMaxValue,it.longT(it.Int32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteSByteMaxValue,it.byteT(it.Int32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteSByteMaxValue,it.sbyteT(it.Int32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharSByteMaxValue,it.charT(it.Int32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32SByteMinValue,it.intT(it.Int32SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16SByteMinValue,it.shortT(it.Int32SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64SByteMinValue,it.longT(it.Int32SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteSByteMinValue,it.sbyteT(it.Int32SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32CharMaxValue,it.uintT(it.Int32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16CharMaxValue,it.ushortT(it.Int32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64CharMaxValue,it.ulongT(it.Int32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32CharMaxValue,it.intT(it.Int32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64CharMaxValue,it.longT(it.Int32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharCharMaxValue,it.charT(it.Int32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val0,it.uintT(it.Int32Val0)))
self.assertTrue(it.self.assertEqual(it.UInt16Val0,it.ushortT(it.Int32Val0)))
self.assertTrue(it.self.assertEqual(it.UInt64Val0,it.ulongT(it.Int32Val0)))
self.assertTrue(it.self.assertEqual(it.Int32Val0,it.intT(it.Int32Val0)))
self.assertTrue(it.self.assertEqual(it.Int16Val0,it.shortT(it.Int32Val0)))
self.assertTrue(it.self.assertEqual(it.Int64Val0,it.longT(it.Int32Val0)))
self.assertTrue(it.self.assertEqual(it.ByteVal0,it.byteT(it.Int32Val0)))
self.assertTrue(it.self.assertEqual(it.SByteVal0,it.sbyteT(it.Int32Val0)))
self.assertTrue(it.self.assertEqual(it.CharVal0,it.charT(it.Int32Val0)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32Val0)))
self.assertTrue(it.self.assertEqual(it.UInt32Val1,it.uintT(it.Int32Val1)))
self.assertTrue(it.self.assertEqual(it.UInt16Val1,it.ushortT(it.Int32Val1)))
self.assertTrue(it.self.assertEqual(it.UInt64Val1,it.ulongT(it.Int32Val1)))
self.assertTrue(it.self.assertEqual(it.Int32Val1,it.intT(it.Int32Val1)))
self.assertTrue(it.self.assertEqual(it.Int16Val1,it.shortT(it.Int32Val1)))
self.assertTrue(it.self.assertEqual(it.Int64Val1,it.longT(it.Int32Val1)))
self.assertTrue(it.self.assertEqual(it.ByteVal1,it.byteT(it.Int32Val1)))
self.assertTrue(it.self.assertEqual(it.SByteVal1,it.sbyteT(it.Int32Val1)))
self.assertTrue(it.self.assertEqual(it.CharVal1,it.charT(it.Int32Val1)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32Val1)))
self.assertTrue(it.self.assertEqual(it.UInt32Val2,it.uintT(it.Int32Val2)))
self.assertTrue(it.self.assertEqual(it.UInt16Val2,it.ushortT(it.Int32Val2)))
self.assertTrue(it.self.assertEqual(it.UInt64Val2,it.ulongT(it.Int32Val2)))
self.assertTrue(it.self.assertEqual(it.Int32Val2,it.intT(it.Int32Val2)))
self.assertTrue(it.self.assertEqual(it.Int16Val2,it.shortT(it.Int32Val2)))
self.assertTrue(it.self.assertEqual(it.Int64Val2,it.longT(it.Int32Val2)))
self.assertTrue(it.self.assertEqual(it.ByteVal2,it.byteT(it.Int32Val2)))
self.assertTrue(it.self.assertEqual(it.SByteVal2,it.sbyteT(it.Int32Val2)))
self.assertTrue(it.self.assertEqual(it.CharVal2,it.charT(it.Int32Val2)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32Val2)))
self.assertTrue(it.self.assertEqual(it.Int32Val3,it.intT(it.Int32Val3)))
self.assertTrue(it.self.assertEqual(it.Int16Val3,it.shortT(it.Int32Val3)))
self.assertTrue(it.self.assertEqual(it.Int64Val3,it.longT(it.Int32Val3)))
self.assertTrue(it.self.assertEqual(it.SByteVal3,it.sbyteT(it.Int32Val3)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32Val3)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int32Val6)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int32Val6)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int32Val6)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int32Val6)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int32Val6)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int32Val6)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int32Val6)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int32Val6)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int32Val6)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int32Val6)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.Int32Val7)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.Int32Val7)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.Int32Val7)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.Int32Val7)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.Int32Val7)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.Int32Val7)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.Int32Val7)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.Int32Val7)))
self.assertTrue(it.self.assertEqual(it.CharVal7,it.charT(it.Int32Val7)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32Val7)))
self.assertTrue(it.self.assertEqual(it.Int32Val8,it.intT(it.Int32Val8)))
self.assertTrue(it.self.assertEqual(it.Int16Val8,it.shortT(it.Int32Val8)))
self.assertTrue(it.self.assertEqual(it.Int64Val8,it.longT(it.Int32Val8)))
self.assertTrue(it.self.assertEqual(it.SByteVal8,it.sbyteT(it.Int32Val8)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int32Val8)))
self.assertTrue(it.self.assertEqual(it.UInt32Int32MaxValue,it.uintT(it.UInt32Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Int32MaxValue,it.ulongT(it.UInt32Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int32MaxValue,it.intT(it.UInt32Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int32MaxValue,it.longT(it.UInt32Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt32Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32UInt32MaxValue,it.uintT(it.UInt32UInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64UInt32MaxValue,it.ulongT(it.UInt32UInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64UInt32MaxValue,it.longT(it.UInt32UInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt32UInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt32UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Int16MaxValue,it.uintT(it.UInt32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Int16MaxValue,it.ushortT(it.UInt32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Int16MaxValue,it.ulongT(it.UInt32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int16MaxValue,it.intT(it.UInt32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Int16MaxValue,it.shortT(it.UInt32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int16MaxValue,it.longT(it.UInt32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.CharInt16MaxValue,it.charT(it.UInt32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt32Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32CharMaxValue,it.uintT(it.UInt32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16CharMaxValue,it.ushortT(it.UInt32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64CharMaxValue,it.ulongT(it.UInt32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32CharMaxValue,it.intT(it.UInt32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64CharMaxValue,it.longT(it.UInt32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.CharCharMaxValue,it.charT(it.UInt32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt32UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt32UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt32UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32ByteMaxValue,it.uintT(it.UInt32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16ByteMaxValue,it.ushortT(it.UInt32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64ByteMaxValue,it.ulongT(it.UInt32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32ByteMaxValue,it.intT(it.UInt32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16ByteMaxValue,it.shortT(it.UInt32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64ByteMaxValue,it.longT(it.UInt32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteByteMaxValue,it.byteT(it.UInt32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharByteMaxValue,it.charT(it.UInt32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt32ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt32ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32SByteMaxValue,it.uintT(it.UInt32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16SByteMaxValue,it.ushortT(it.UInt32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64SByteMaxValue,it.ulongT(it.UInt32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32SByteMaxValue,it.intT(it.UInt32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16SByteMaxValue,it.shortT(it.UInt32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64SByteMaxValue,it.longT(it.UInt32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteSByteMaxValue,it.byteT(it.UInt32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteSByteMaxValue,it.sbyteT(it.UInt32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharSByteMaxValue,it.charT(it.UInt32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt32SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32CharMaxValue,it.uintT(it.UInt32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16CharMaxValue,it.ushortT(it.UInt32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64CharMaxValue,it.ulongT(it.UInt32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32CharMaxValue,it.intT(it.UInt32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64CharMaxValue,it.longT(it.UInt32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharCharMaxValue,it.charT(it.UInt32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt32CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt32CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val0,it.uintT(it.UInt32Val0)))
self.assertTrue(it.self.assertEqual(it.UInt16Val0,it.ushortT(it.UInt32Val0)))
self.assertTrue(it.self.assertEqual(it.UInt64Val0,it.ulongT(it.UInt32Val0)))
self.assertTrue(it.self.assertEqual(it.Int32Val0,it.intT(it.UInt32Val0)))
self.assertTrue(it.self.assertEqual(it.Int16Val0,it.shortT(it.UInt32Val0)))
self.assertTrue(it.self.assertEqual(it.Int64Val0,it.longT(it.UInt32Val0)))
self.assertTrue(it.self.assertEqual(it.ByteVal0,it.byteT(it.UInt32Val0)))
self.assertTrue(it.self.assertEqual(it.SByteVal0,it.sbyteT(it.UInt32Val0)))
self.assertTrue(it.self.assertEqual(it.CharVal0,it.charT(it.UInt32Val0)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt32Val0)))
self.assertTrue(it.self.assertEqual(it.UInt32Val1,it.uintT(it.UInt32Val1)))
self.assertTrue(it.self.assertEqual(it.UInt16Val1,it.ushortT(it.UInt32Val1)))
self.assertTrue(it.self.assertEqual(it.UInt64Val1,it.ulongT(it.UInt32Val1)))
self.assertTrue(it.self.assertEqual(it.Int32Val1,it.intT(it.UInt32Val1)))
self.assertTrue(it.self.assertEqual(it.Int16Val1,it.shortT(it.UInt32Val1)))
self.assertTrue(it.self.assertEqual(it.Int64Val1,it.longT(it.UInt32Val1)))
self.assertTrue(it.self.assertEqual(it.ByteVal1,it.byteT(it.UInt32Val1)))
self.assertTrue(it.self.assertEqual(it.SByteVal1,it.sbyteT(it.UInt32Val1)))
self.assertTrue(it.self.assertEqual(it.CharVal1,it.charT(it.UInt32Val1)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt32Val1)))
self.assertTrue(it.self.assertEqual(it.UInt32Val2,it.uintT(it.UInt32Val2)))
self.assertTrue(it.self.assertEqual(it.UInt16Val2,it.ushortT(it.UInt32Val2)))
self.assertTrue(it.self.assertEqual(it.UInt64Val2,it.ulongT(it.UInt32Val2)))
self.assertTrue(it.self.assertEqual(it.Int32Val2,it.intT(it.UInt32Val2)))
self.assertTrue(it.self.assertEqual(it.Int16Val2,it.shortT(it.UInt32Val2)))
self.assertTrue(it.self.assertEqual(it.Int64Val2,it.longT(it.UInt32Val2)))
self.assertTrue(it.self.assertEqual(it.ByteVal2,it.byteT(it.UInt32Val2)))
self.assertTrue(it.self.assertEqual(it.SByteVal2,it.sbyteT(it.UInt32Val2)))
self.assertTrue(it.self.assertEqual(it.CharVal2,it.charT(it.UInt32Val2)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt32Val2)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt32Val6)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt32Val6)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt32Val6)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt32Val6)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt32Val6)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt32Val6)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt32Val6)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt32Val6)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt32Val6)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt32Val6)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.UInt32Val7)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.UInt32Val7)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.UInt32Val7)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.UInt32Val7)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.UInt32Val7)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.UInt32Val7)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.UInt32Val7)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.UInt32Val7)))
self.assertTrue(it.self.assertEqual(it.CharVal7,it.charT(it.UInt32Val7)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt32Val7)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Int16MaxValue,it.uintT(it.Int16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Int16MaxValue,it.ushortT(it.Int16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Int16MaxValue,it.ulongT(it.Int16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int16MaxValue,it.intT(it.Int16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Int16MaxValue,it.shortT(it.Int16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int16MaxValue,it.longT(it.Int16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.CharInt16MaxValue,it.charT(it.Int16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int16MinValue,it.intT(it.Int16Int16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Int16MinValue,it.shortT(it.Int16Int16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int16MinValue,it.longT(it.Int16Int16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int16Int16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32ByteMaxValue,it.uintT(it.Int16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16ByteMaxValue,it.ushortT(it.Int16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64ByteMaxValue,it.ulongT(it.Int16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32ByteMaxValue,it.intT(it.Int16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16ByteMaxValue,it.shortT(it.Int16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64ByteMaxValue,it.longT(it.Int16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteByteMaxValue,it.byteT(it.Int16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharByteMaxValue,it.charT(it.Int16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32SByteMaxValue,it.uintT(it.Int16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16SByteMaxValue,it.ushortT(it.Int16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64SByteMaxValue,it.ulongT(it.Int16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32SByteMaxValue,it.intT(it.Int16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16SByteMaxValue,it.shortT(it.Int16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64SByteMaxValue,it.longT(it.Int16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteSByteMaxValue,it.byteT(it.Int16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteSByteMaxValue,it.sbyteT(it.Int16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharSByteMaxValue,it.charT(it.Int16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32SByteMinValue,it.intT(it.Int16SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16SByteMinValue,it.shortT(it.Int16SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64SByteMinValue,it.longT(it.Int16SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteSByteMinValue,it.sbyteT(it.Int16SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int16SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val0,it.uintT(it.Int16Val0)))
self.assertTrue(it.self.assertEqual(it.UInt16Val0,it.ushortT(it.Int16Val0)))
self.assertTrue(it.self.assertEqual(it.UInt64Val0,it.ulongT(it.Int16Val0)))
self.assertTrue(it.self.assertEqual(it.Int32Val0,it.intT(it.Int16Val0)))
self.assertTrue(it.self.assertEqual(it.Int16Val0,it.shortT(it.Int16Val0)))
self.assertTrue(it.self.assertEqual(it.Int64Val0,it.longT(it.Int16Val0)))
self.assertTrue(it.self.assertEqual(it.ByteVal0,it.byteT(it.Int16Val0)))
self.assertTrue(it.self.assertEqual(it.SByteVal0,it.sbyteT(it.Int16Val0)))
self.assertTrue(it.self.assertEqual(it.CharVal0,it.charT(it.Int16Val0)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int16Val0)))
self.assertTrue(it.self.assertEqual(it.UInt32Val1,it.uintT(it.Int16Val1)))
self.assertTrue(it.self.assertEqual(it.UInt16Val1,it.ushortT(it.Int16Val1)))
self.assertTrue(it.self.assertEqual(it.UInt64Val1,it.ulongT(it.Int16Val1)))
self.assertTrue(it.self.assertEqual(it.Int32Val1,it.intT(it.Int16Val1)))
self.assertTrue(it.self.assertEqual(it.Int16Val1,it.shortT(it.Int16Val1)))
self.assertTrue(it.self.assertEqual(it.Int64Val1,it.longT(it.Int16Val1)))
self.assertTrue(it.self.assertEqual(it.ByteVal1,it.byteT(it.Int16Val1)))
self.assertTrue(it.self.assertEqual(it.SByteVal1,it.sbyteT(it.Int16Val1)))
self.assertTrue(it.self.assertEqual(it.CharVal1,it.charT(it.Int16Val1)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int16Val1)))
self.assertTrue(it.self.assertEqual(it.UInt32Val2,it.uintT(it.Int16Val2)))
self.assertTrue(it.self.assertEqual(it.UInt16Val2,it.ushortT(it.Int16Val2)))
self.assertTrue(it.self.assertEqual(it.UInt64Val2,it.ulongT(it.Int16Val2)))
self.assertTrue(it.self.assertEqual(it.Int32Val2,it.intT(it.Int16Val2)))
self.assertTrue(it.self.assertEqual(it.Int16Val2,it.shortT(it.Int16Val2)))
self.assertTrue(it.self.assertEqual(it.Int64Val2,it.longT(it.Int16Val2)))
self.assertTrue(it.self.assertEqual(it.ByteVal2,it.byteT(it.Int16Val2)))
self.assertTrue(it.self.assertEqual(it.SByteVal2,it.sbyteT(it.Int16Val2)))
self.assertTrue(it.self.assertEqual(it.CharVal2,it.charT(it.Int16Val2)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int16Val2)))
self.assertTrue(it.self.assertEqual(it.Int32Val3,it.intT(it.Int16Val3)))
self.assertTrue(it.self.assertEqual(it.Int16Val3,it.shortT(it.Int16Val3)))
self.assertTrue(it.self.assertEqual(it.Int64Val3,it.longT(it.Int16Val3)))
self.assertTrue(it.self.assertEqual(it.SByteVal3,it.sbyteT(it.Int16Val3)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int16Val3)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int16Val6)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int16Val6)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int16Val6)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int16Val6)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int16Val6)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int16Val6)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int16Val6)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int16Val6)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int16Val6)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int16Val6)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.Int16Val7)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.Int16Val7)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.Int16Val7)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.Int16Val7)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.Int16Val7)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.Int16Val7)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.Int16Val7)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.Int16Val7)))
self.assertTrue(it.self.assertEqual(it.CharVal7,it.charT(it.Int16Val7)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int16Val7)))
self.assertTrue(it.self.assertEqual(it.Int32Val8,it.intT(it.Int16Val8)))
self.assertTrue(it.self.assertEqual(it.Int16Val8,it.shortT(it.Int16Val8)))
self.assertTrue(it.self.assertEqual(it.Int64Val8,it.longT(it.Int16Val8)))
self.assertTrue(it.self.assertEqual(it.SByteVal8,it.sbyteT(it.Int16Val8)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int16Val8)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt16UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Int16MaxValue,it.uintT(it.UInt16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Int16MaxValue,it.ushortT(it.UInt16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Int16MaxValue,it.ulongT(it.UInt16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int16MaxValue,it.intT(it.UInt16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Int16MaxValue,it.shortT(it.UInt16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int16MaxValue,it.longT(it.UInt16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.CharInt16MaxValue,it.charT(it.UInt16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt16Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32CharMaxValue,it.uintT(it.UInt16UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16CharMaxValue,it.ushortT(it.UInt16UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64CharMaxValue,it.ulongT(it.UInt16UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32CharMaxValue,it.intT(it.UInt16UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64CharMaxValue,it.longT(it.UInt16UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.CharCharMaxValue,it.charT(it.UInt16UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt16UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt16UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt16UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32ByteMaxValue,it.uintT(it.UInt16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16ByteMaxValue,it.ushortT(it.UInt16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64ByteMaxValue,it.ulongT(it.UInt16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32ByteMaxValue,it.intT(it.UInt16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16ByteMaxValue,it.shortT(it.UInt16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64ByteMaxValue,it.longT(it.UInt16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteByteMaxValue,it.byteT(it.UInt16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharByteMaxValue,it.charT(it.UInt16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt16ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt16ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32SByteMaxValue,it.uintT(it.UInt16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16SByteMaxValue,it.ushortT(it.UInt16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64SByteMaxValue,it.ulongT(it.UInt16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32SByteMaxValue,it.intT(it.UInt16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16SByteMaxValue,it.shortT(it.UInt16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64SByteMaxValue,it.longT(it.UInt16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteSByteMaxValue,it.byteT(it.UInt16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteSByteMaxValue,it.sbyteT(it.UInt16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharSByteMaxValue,it.charT(it.UInt16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt16SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32CharMaxValue,it.uintT(it.UInt16CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16CharMaxValue,it.ushortT(it.UInt16CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64CharMaxValue,it.ulongT(it.UInt16CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32CharMaxValue,it.intT(it.UInt16CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64CharMaxValue,it.longT(it.UInt16CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharCharMaxValue,it.charT(it.UInt16CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt16CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt16CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val0,it.uintT(it.UInt16Val0)))
self.assertTrue(it.self.assertEqual(it.UInt16Val0,it.ushortT(it.UInt16Val0)))
self.assertTrue(it.self.assertEqual(it.UInt64Val0,it.ulongT(it.UInt16Val0)))
self.assertTrue(it.self.assertEqual(it.Int32Val0,it.intT(it.UInt16Val0)))
self.assertTrue(it.self.assertEqual(it.Int16Val0,it.shortT(it.UInt16Val0)))
self.assertTrue(it.self.assertEqual(it.Int64Val0,it.longT(it.UInt16Val0)))
self.assertTrue(it.self.assertEqual(it.ByteVal0,it.byteT(it.UInt16Val0)))
self.assertTrue(it.self.assertEqual(it.SByteVal0,it.sbyteT(it.UInt16Val0)))
self.assertTrue(it.self.assertEqual(it.CharVal0,it.charT(it.UInt16Val0)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt16Val0)))
self.assertTrue(it.self.assertEqual(it.UInt32Val1,it.uintT(it.UInt16Val1)))
self.assertTrue(it.self.assertEqual(it.UInt16Val1,it.ushortT(it.UInt16Val1)))
self.assertTrue(it.self.assertEqual(it.UInt64Val1,it.ulongT(it.UInt16Val1)))
self.assertTrue(it.self.assertEqual(it.Int32Val1,it.intT(it.UInt16Val1)))
self.assertTrue(it.self.assertEqual(it.Int16Val1,it.shortT(it.UInt16Val1)))
self.assertTrue(it.self.assertEqual(it.Int64Val1,it.longT(it.UInt16Val1)))
self.assertTrue(it.self.assertEqual(it.ByteVal1,it.byteT(it.UInt16Val1)))
self.assertTrue(it.self.assertEqual(it.SByteVal1,it.sbyteT(it.UInt16Val1)))
self.assertTrue(it.self.assertEqual(it.CharVal1,it.charT(it.UInt16Val1)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt16Val1)))
self.assertTrue(it.self.assertEqual(it.UInt32Val2,it.uintT(it.UInt16Val2)))
self.assertTrue(it.self.assertEqual(it.UInt16Val2,it.ushortT(it.UInt16Val2)))
self.assertTrue(it.self.assertEqual(it.UInt64Val2,it.ulongT(it.UInt16Val2)))
self.assertTrue(it.self.assertEqual(it.Int32Val2,it.intT(it.UInt16Val2)))
self.assertTrue(it.self.assertEqual(it.Int16Val2,it.shortT(it.UInt16Val2)))
self.assertTrue(it.self.assertEqual(it.Int64Val2,it.longT(it.UInt16Val2)))
self.assertTrue(it.self.assertEqual(it.ByteVal2,it.byteT(it.UInt16Val2)))
self.assertTrue(it.self.assertEqual(it.SByteVal2,it.sbyteT(it.UInt16Val2)))
self.assertTrue(it.self.assertEqual(it.CharVal2,it.charT(it.UInt16Val2)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt16Val2)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt16Val6)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt16Val6)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt16Val6)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt16Val6)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt16Val6)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt16Val6)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt16Val6)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt16Val6)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt16Val6)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt16Val6)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.UInt16Val7)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.UInt16Val7)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.UInt16Val7)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.UInt16Val7)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.UInt16Val7)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.UInt16Val7)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.UInt16Val7)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.UInt16Val7)))
self.assertTrue(it.self.assertEqual(it.CharVal7,it.charT(it.UInt16Val7)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt16Val7)))
self.assertTrue(it.self.assertEqual(it.UInt32Int32MaxValue,it.uintT(it.Int64Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Int32MaxValue,it.ulongT(it.Int64Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int32MaxValue,it.intT(it.Int64Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int32MaxValue,it.longT(it.Int64Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int32MinValue,it.intT(it.Int64Int32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int32MinValue,it.longT(it.Int64Int32MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64Int32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32UInt32MaxValue,it.uintT(it.Int64UInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64UInt32MaxValue,it.ulongT(it.Int64UInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64UInt32MaxValue,it.longT(it.Int64UInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64UInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Int16MaxValue,it.uintT(it.Int64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Int16MaxValue,it.ushortT(it.Int64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Int16MaxValue,it.ulongT(it.Int64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int16MaxValue,it.intT(it.Int64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Int16MaxValue,it.shortT(it.Int64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int16MaxValue,it.longT(it.Int64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.CharInt16MaxValue,it.charT(it.Int64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int16MinValue,it.intT(it.Int64Int16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Int16MinValue,it.shortT(it.Int64Int16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int16MinValue,it.longT(it.Int64Int16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64Int16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32CharMaxValue,it.uintT(it.Int64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16CharMaxValue,it.ushortT(it.Int64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64CharMaxValue,it.ulongT(it.Int64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32CharMaxValue,it.intT(it.Int64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64CharMaxValue,it.longT(it.Int64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.CharCharMaxValue,it.charT(it.Int64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Int64MaxValue,it.ulongT(it.Int64Int64MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int64MaxValue,it.longT(it.Int64Int64MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64Int64MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int64MinValue,it.longT(it.Int64Int64MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64Int64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32ByteMaxValue,it.uintT(it.Int64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16ByteMaxValue,it.ushortT(it.Int64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64ByteMaxValue,it.ulongT(it.Int64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32ByteMaxValue,it.intT(it.Int64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16ByteMaxValue,it.shortT(it.Int64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64ByteMaxValue,it.longT(it.Int64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteByteMaxValue,it.byteT(it.Int64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharByteMaxValue,it.charT(it.Int64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32SByteMaxValue,it.uintT(it.Int64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16SByteMaxValue,it.ushortT(it.Int64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64SByteMaxValue,it.ulongT(it.Int64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32SByteMaxValue,it.intT(it.Int64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16SByteMaxValue,it.shortT(it.Int64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64SByteMaxValue,it.longT(it.Int64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteSByteMaxValue,it.byteT(it.Int64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteSByteMaxValue,it.sbyteT(it.Int64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharSByteMaxValue,it.charT(it.Int64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32SByteMinValue,it.intT(it.Int64SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16SByteMinValue,it.shortT(it.Int64SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64SByteMinValue,it.longT(it.Int64SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteSByteMinValue,it.sbyteT(it.Int64SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64SByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32CharMaxValue,it.uintT(it.Int64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16CharMaxValue,it.ushortT(it.Int64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64CharMaxValue,it.ulongT(it.Int64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32CharMaxValue,it.intT(it.Int64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64CharMaxValue,it.longT(it.Int64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharCharMaxValue,it.charT(it.Int64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val0,it.uintT(it.Int64Val0)))
self.assertTrue(it.self.assertEqual(it.UInt16Val0,it.ushortT(it.Int64Val0)))
self.assertTrue(it.self.assertEqual(it.UInt64Val0,it.ulongT(it.Int64Val0)))
self.assertTrue(it.self.assertEqual(it.Int32Val0,it.intT(it.Int64Val0)))
self.assertTrue(it.self.assertEqual(it.Int16Val0,it.shortT(it.Int64Val0)))
self.assertTrue(it.self.assertEqual(it.Int64Val0,it.longT(it.Int64Val0)))
self.assertTrue(it.self.assertEqual(it.ByteVal0,it.byteT(it.Int64Val0)))
self.assertTrue(it.self.assertEqual(it.SByteVal0,it.sbyteT(it.Int64Val0)))
self.assertTrue(it.self.assertEqual(it.CharVal0,it.charT(it.Int64Val0)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64Val0)))
self.assertTrue(it.self.assertEqual(it.UInt32Val1,it.uintT(it.Int64Val1)))
self.assertTrue(it.self.assertEqual(it.UInt16Val1,it.ushortT(it.Int64Val1)))
self.assertTrue(it.self.assertEqual(it.UInt64Val1,it.ulongT(it.Int64Val1)))
self.assertTrue(it.self.assertEqual(it.Int32Val1,it.intT(it.Int64Val1)))
self.assertTrue(it.self.assertEqual(it.Int16Val1,it.shortT(it.Int64Val1)))
self.assertTrue(it.self.assertEqual(it.Int64Val1,it.longT(it.Int64Val1)))
self.assertTrue(it.self.assertEqual(it.ByteVal1,it.byteT(it.Int64Val1)))
self.assertTrue(it.self.assertEqual(it.SByteVal1,it.sbyteT(it.Int64Val1)))
self.assertTrue(it.self.assertEqual(it.CharVal1,it.charT(it.Int64Val1)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64Val1)))
self.assertTrue(it.self.assertEqual(it.UInt32Val2,it.uintT(it.Int64Val2)))
self.assertTrue(it.self.assertEqual(it.UInt16Val2,it.ushortT(it.Int64Val2)))
self.assertTrue(it.self.assertEqual(it.UInt64Val2,it.ulongT(it.Int64Val2)))
self.assertTrue(it.self.assertEqual(it.Int32Val2,it.intT(it.Int64Val2)))
self.assertTrue(it.self.assertEqual(it.Int16Val2,it.shortT(it.Int64Val2)))
self.assertTrue(it.self.assertEqual(it.Int64Val2,it.longT(it.Int64Val2)))
self.assertTrue(it.self.assertEqual(it.ByteVal2,it.byteT(it.Int64Val2)))
self.assertTrue(it.self.assertEqual(it.SByteVal2,it.sbyteT(it.Int64Val2)))
self.assertTrue(it.self.assertEqual(it.CharVal2,it.charT(it.Int64Val2)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64Val2)))
self.assertTrue(it.self.assertEqual(it.Int32Val3,it.intT(it.Int64Val3)))
self.assertTrue(it.self.assertEqual(it.Int16Val3,it.shortT(it.Int64Val3)))
self.assertTrue(it.self.assertEqual(it.Int64Val3,it.longT(it.Int64Val3)))
self.assertTrue(it.self.assertEqual(it.SByteVal3,it.sbyteT(it.Int64Val3)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64Val3)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.Int64Val6)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.Int64Val6)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.Int64Val6)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.Int64Val6)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.Int64Val6)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.Int64Val6)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.Int64Val6)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.Int64Val6)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.Int64Val6)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.Int64Val6)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.Int64Val7)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.Int64Val7)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.Int64Val7)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.Int64Val7)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.Int64Val7)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.Int64Val7)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.Int64Val7)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.Int64Val7)))
self.assertTrue(it.self.assertEqual(it.CharVal7,it.charT(it.Int64Val7)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64Val7)))
self.assertTrue(it.self.assertEqual(it.Int32Val8,it.intT(it.Int64Val8)))
self.assertTrue(it.self.assertEqual(it.Int16Val8,it.shortT(it.Int64Val8)))
self.assertTrue(it.self.assertEqual(it.Int64Val8,it.longT(it.Int64Val8)))
self.assertTrue(it.self.assertEqual(it.SByteVal8,it.sbyteT(it.Int64Val8)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.Int64Val8)))
self.assertTrue(it.self.assertEqual(it.UInt32Int32MaxValue,it.uintT(it.UInt64Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Int32MaxValue,it.ulongT(it.UInt64Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int32MaxValue,it.intT(it.UInt64Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int32MaxValue,it.longT(it.UInt64Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64Int32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32UInt32MaxValue,it.uintT(it.UInt64UInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64UInt32MaxValue,it.ulongT(it.UInt64UInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64UInt32MaxValue,it.longT(it.UInt64UInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64UInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt64UInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Int16MaxValue,it.uintT(it.UInt64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Int16MaxValue,it.ushortT(it.UInt64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Int16MaxValue,it.ulongT(it.UInt64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Int16MaxValue,it.intT(it.UInt64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Int16MaxValue,it.shortT(it.UInt64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int16MaxValue,it.longT(it.UInt64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.CharInt16MaxValue,it.charT(it.UInt64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64Int16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32CharMaxValue,it.uintT(it.UInt64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16CharMaxValue,it.ushortT(it.UInt64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64CharMaxValue,it.ulongT(it.UInt64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32CharMaxValue,it.intT(it.UInt64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64CharMaxValue,it.longT(it.UInt64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.CharCharMaxValue,it.charT(it.UInt64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64UInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt64UInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Int64MaxValue,it.ulongT(it.UInt64Int64MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Int64MaxValue,it.longT(it.UInt64Int64MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64Int64MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64UInt64MaxValue,it.ulongT(it.UInt64UInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64UInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt64UInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32ByteMaxValue,it.uintT(it.UInt64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16ByteMaxValue,it.ushortT(it.UInt64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64ByteMaxValue,it.ulongT(it.UInt64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32ByteMaxValue,it.intT(it.UInt64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16ByteMaxValue,it.shortT(it.UInt64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64ByteMaxValue,it.longT(it.UInt64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteByteMaxValue,it.byteT(it.UInt64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharByteMaxValue,it.charT(it.UInt64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64ByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt64ByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32SByteMaxValue,it.uintT(it.UInt64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16SByteMaxValue,it.ushortT(it.UInt64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64SByteMaxValue,it.ulongT(it.UInt64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32SByteMaxValue,it.intT(it.UInt64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16SByteMaxValue,it.shortT(it.UInt64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64SByteMaxValue,it.longT(it.UInt64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteSByteMaxValue,it.byteT(it.UInt64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteSByteMaxValue,it.sbyteT(it.UInt64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharSByteMaxValue,it.charT(it.UInt64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64SByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32CharMaxValue,it.uintT(it.UInt64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16CharMaxValue,it.ushortT(it.UInt64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64CharMaxValue,it.ulongT(it.UInt64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32CharMaxValue,it.intT(it.UInt64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64CharMaxValue,it.longT(it.UInt64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharCharMaxValue,it.charT(it.UInt64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64CharMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt64CharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val0,it.uintT(it.UInt64Val0)))
self.assertTrue(it.self.assertEqual(it.UInt16Val0,it.ushortT(it.UInt64Val0)))
self.assertTrue(it.self.assertEqual(it.UInt64Val0,it.ulongT(it.UInt64Val0)))
self.assertTrue(it.self.assertEqual(it.Int32Val0,it.intT(it.UInt64Val0)))
self.assertTrue(it.self.assertEqual(it.Int16Val0,it.shortT(it.UInt64Val0)))
self.assertTrue(it.self.assertEqual(it.Int64Val0,it.longT(it.UInt64Val0)))
self.assertTrue(it.self.assertEqual(it.ByteVal0,it.byteT(it.UInt64Val0)))
self.assertTrue(it.self.assertEqual(it.SByteVal0,it.sbyteT(it.UInt64Val0)))
self.assertTrue(it.self.assertEqual(it.CharVal0,it.charT(it.UInt64Val0)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64Val0)))
self.assertTrue(it.self.assertEqual(it.UInt32Val1,it.uintT(it.UInt64Val1)))
self.assertTrue(it.self.assertEqual(it.UInt16Val1,it.ushortT(it.UInt64Val1)))
self.assertTrue(it.self.assertEqual(it.UInt64Val1,it.ulongT(it.UInt64Val1)))
self.assertTrue(it.self.assertEqual(it.Int32Val1,it.intT(it.UInt64Val1)))
self.assertTrue(it.self.assertEqual(it.Int16Val1,it.shortT(it.UInt64Val1)))
self.assertTrue(it.self.assertEqual(it.Int64Val1,it.longT(it.UInt64Val1)))
self.assertTrue(it.self.assertEqual(it.ByteVal1,it.byteT(it.UInt64Val1)))
self.assertTrue(it.self.assertEqual(it.SByteVal1,it.sbyteT(it.UInt64Val1)))
self.assertTrue(it.self.assertEqual(it.CharVal1,it.charT(it.UInt64Val1)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64Val1)))
self.assertTrue(it.self.assertEqual(it.UInt32Val2,it.uintT(it.UInt64Val2)))
self.assertTrue(it.self.assertEqual(it.UInt16Val2,it.ushortT(it.UInt64Val2)))
self.assertTrue(it.self.assertEqual(it.UInt64Val2,it.ulongT(it.UInt64Val2)))
self.assertTrue(it.self.assertEqual(it.Int32Val2,it.intT(it.UInt64Val2)))
self.assertTrue(it.self.assertEqual(it.Int16Val2,it.shortT(it.UInt64Val2)))
self.assertTrue(it.self.assertEqual(it.Int64Val2,it.longT(it.UInt64Val2)))
self.assertTrue(it.self.assertEqual(it.ByteVal2,it.byteT(it.UInt64Val2)))
self.assertTrue(it.self.assertEqual(it.SByteVal2,it.sbyteT(it.UInt64Val2)))
self.assertTrue(it.self.assertEqual(it.CharVal2,it.charT(it.UInt64Val2)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64Val2)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.UInt64Val6)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.UInt64Val6)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.UInt64Val6)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.UInt64Val6)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.UInt64Val6)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.UInt64Val6)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.UInt64Val6)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.UInt64Val6)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.UInt64Val6)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.UInt64Val6)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.UInt64Val7)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.UInt64Val7)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.UInt64Val7)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.UInt64Val7)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.UInt64Val7)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.UInt64Val7)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.UInt64Val7)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.UInt64Val7)))
self.assertTrue(it.self.assertEqual(it.CharVal7,it.charT(it.UInt64Val7)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.UInt64Val7)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.ByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.ByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.ByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.ByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.ByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.ByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.ByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.ByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.ByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.ByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.ByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.ByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.ByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.ByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.ByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.ByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.ByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.ByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.ByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.ByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.ByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.ByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.ByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.ByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.ByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.ByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.ByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.ByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.ByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.ByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32ByteMaxValue,it.uintT(it.ByteByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16ByteMaxValue,it.ushortT(it.ByteByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64ByteMaxValue,it.ulongT(it.ByteByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32ByteMaxValue,it.intT(it.ByteByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16ByteMaxValue,it.shortT(it.ByteByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64ByteMaxValue,it.longT(it.ByteByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteByteMaxValue,it.byteT(it.ByteByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharByteMaxValue,it.charT(it.ByteByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.ByteByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.ByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.ByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.ByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.ByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.ByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.ByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.ByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.ByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.ByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.ByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32SByteMaxValue,it.uintT(it.ByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16SByteMaxValue,it.ushortT(it.ByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64SByteMaxValue,it.ulongT(it.ByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32SByteMaxValue,it.intT(it.ByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16SByteMaxValue,it.shortT(it.ByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64SByteMaxValue,it.longT(it.ByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteSByteMaxValue,it.byteT(it.ByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteSByteMaxValue,it.sbyteT(it.ByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharSByteMaxValue,it.charT(it.ByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.ByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.ByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.ByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.ByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.ByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.ByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.ByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.ByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.ByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.ByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.ByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val0,it.uintT(it.ByteVal0)))
self.assertTrue(it.self.assertEqual(it.UInt16Val0,it.ushortT(it.ByteVal0)))
self.assertTrue(it.self.assertEqual(it.UInt64Val0,it.ulongT(it.ByteVal0)))
self.assertTrue(it.self.assertEqual(it.Int32Val0,it.intT(it.ByteVal0)))
self.assertTrue(it.self.assertEqual(it.Int16Val0,it.shortT(it.ByteVal0)))
self.assertTrue(it.self.assertEqual(it.Int64Val0,it.longT(it.ByteVal0)))
self.assertTrue(it.self.assertEqual(it.ByteVal0,it.byteT(it.ByteVal0)))
self.assertTrue(it.self.assertEqual(it.SByteVal0,it.sbyteT(it.ByteVal0)))
self.assertTrue(it.self.assertEqual(it.CharVal0,it.charT(it.ByteVal0)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.ByteVal0)))
self.assertTrue(it.self.assertEqual(it.UInt32Val1,it.uintT(it.ByteVal1)))
self.assertTrue(it.self.assertEqual(it.UInt16Val1,it.ushortT(it.ByteVal1)))
self.assertTrue(it.self.assertEqual(it.UInt64Val1,it.ulongT(it.ByteVal1)))
self.assertTrue(it.self.assertEqual(it.Int32Val1,it.intT(it.ByteVal1)))
self.assertTrue(it.self.assertEqual(it.Int16Val1,it.shortT(it.ByteVal1)))
self.assertTrue(it.self.assertEqual(it.Int64Val1,it.longT(it.ByteVal1)))
self.assertTrue(it.self.assertEqual(it.ByteVal1,it.byteT(it.ByteVal1)))
self.assertTrue(it.self.assertEqual(it.SByteVal1,it.sbyteT(it.ByteVal1)))
self.assertTrue(it.self.assertEqual(it.CharVal1,it.charT(it.ByteVal1)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.ByteVal1)))
self.assertTrue(it.self.assertEqual(it.UInt32Val2,it.uintT(it.ByteVal2)))
self.assertTrue(it.self.assertEqual(it.UInt16Val2,it.ushortT(it.ByteVal2)))
self.assertTrue(it.self.assertEqual(it.UInt64Val2,it.ulongT(it.ByteVal2)))
self.assertTrue(it.self.assertEqual(it.Int32Val2,it.intT(it.ByteVal2)))
self.assertTrue(it.self.assertEqual(it.Int16Val2,it.shortT(it.ByteVal2)))
self.assertTrue(it.self.assertEqual(it.Int64Val2,it.longT(it.ByteVal2)))
self.assertTrue(it.self.assertEqual(it.ByteVal2,it.byteT(it.ByteVal2)))
self.assertTrue(it.self.assertEqual(it.SByteVal2,it.sbyteT(it.ByteVal2)))
self.assertTrue(it.self.assertEqual(it.CharVal2,it.charT(it.ByteVal2)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.ByteVal2)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.ByteVal6)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.ByteVal6)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.ByteVal6)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.ByteVal6)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.ByteVal6)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.ByteVal6)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.ByteVal6)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.ByteVal6)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.ByteVal6)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.ByteVal6)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.ByteVal7)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.ByteVal7)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.ByteVal7)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.ByteVal7)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.ByteVal7)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.ByteVal7)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.ByteVal7)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.ByteVal7)))
self.assertTrue(it.self.assertEqual(it.CharVal7,it.charT(it.ByteVal7)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.ByteVal7)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.SByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.SByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.SByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.SByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.SByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.SByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.SByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.SByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.SByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.SByteUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.SByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.SByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.SByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.SByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.SByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.SByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.SByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.SByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.SByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.SByteUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.SByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.SByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.SByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.SByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.SByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.SByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.SByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.SByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.SByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.SByteUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.SByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.SByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.SByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.SByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.SByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.SByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.SByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.SByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.SByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.SByteByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32SByteMaxValue,it.uintT(it.SByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16SByteMaxValue,it.ushortT(it.SByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64SByteMaxValue,it.ulongT(it.SByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32SByteMaxValue,it.intT(it.SByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16SByteMaxValue,it.shortT(it.SByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64SByteMaxValue,it.longT(it.SByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteSByteMaxValue,it.byteT(it.SByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteSByteMaxValue,it.sbyteT(it.SByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.CharSByteMaxValue,it.charT(it.SByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.SByteSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32SByteMinValue,it.intT(it.SByteSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16SByteMinValue,it.shortT(it.SByteSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64SByteMinValue,it.longT(it.SByteSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteSByteMinValue,it.sbyteT(it.SByteSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.SByteSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.SByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.SByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.SByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.SByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.SByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.SByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.SByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.SByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.SByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.SByteCharMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val0,it.uintT(it.SByteVal0)))
self.assertTrue(it.self.assertEqual(it.UInt16Val0,it.ushortT(it.SByteVal0)))
self.assertTrue(it.self.assertEqual(it.UInt64Val0,it.ulongT(it.SByteVal0)))
self.assertTrue(it.self.assertEqual(it.Int32Val0,it.intT(it.SByteVal0)))
self.assertTrue(it.self.assertEqual(it.Int16Val0,it.shortT(it.SByteVal0)))
self.assertTrue(it.self.assertEqual(it.Int64Val0,it.longT(it.SByteVal0)))
self.assertTrue(it.self.assertEqual(it.ByteVal0,it.byteT(it.SByteVal0)))
self.assertTrue(it.self.assertEqual(it.SByteVal0,it.sbyteT(it.SByteVal0)))
self.assertTrue(it.self.assertEqual(it.CharVal0,it.charT(it.SByteVal0)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.SByteVal0)))
self.assertTrue(it.self.assertEqual(it.UInt32Val1,it.uintT(it.SByteVal1)))
self.assertTrue(it.self.assertEqual(it.UInt16Val1,it.ushortT(it.SByteVal1)))
self.assertTrue(it.self.assertEqual(it.UInt64Val1,it.ulongT(it.SByteVal1)))
self.assertTrue(it.self.assertEqual(it.Int32Val1,it.intT(it.SByteVal1)))
self.assertTrue(it.self.assertEqual(it.Int16Val1,it.shortT(it.SByteVal1)))
self.assertTrue(it.self.assertEqual(it.Int64Val1,it.longT(it.SByteVal1)))
self.assertTrue(it.self.assertEqual(it.ByteVal1,it.byteT(it.SByteVal1)))
self.assertTrue(it.self.assertEqual(it.SByteVal1,it.sbyteT(it.SByteVal1)))
self.assertTrue(it.self.assertEqual(it.CharVal1,it.charT(it.SByteVal1)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.SByteVal1)))
self.assertTrue(it.self.assertEqual(it.UInt32Val2,it.uintT(it.SByteVal2)))
self.assertTrue(it.self.assertEqual(it.UInt16Val2,it.ushortT(it.SByteVal2)))
self.assertTrue(it.self.assertEqual(it.UInt64Val2,it.ulongT(it.SByteVal2)))
self.assertTrue(it.self.assertEqual(it.Int32Val2,it.intT(it.SByteVal2)))
self.assertTrue(it.self.assertEqual(it.Int16Val2,it.shortT(it.SByteVal2)))
self.assertTrue(it.self.assertEqual(it.Int64Val2,it.longT(it.SByteVal2)))
self.assertTrue(it.self.assertEqual(it.ByteVal2,it.byteT(it.SByteVal2)))
self.assertTrue(it.self.assertEqual(it.SByteVal2,it.sbyteT(it.SByteVal2)))
self.assertTrue(it.self.assertEqual(it.CharVal2,it.charT(it.SByteVal2)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.SByteVal2)))
self.assertTrue(it.self.assertEqual(it.Int32Val3,it.intT(it.SByteVal3)))
self.assertTrue(it.self.assertEqual(it.Int16Val3,it.shortT(it.SByteVal3)))
self.assertTrue(it.self.assertEqual(it.Int64Val3,it.longT(it.SByteVal3)))
self.assertTrue(it.self.assertEqual(it.SByteVal3,it.sbyteT(it.SByteVal3)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.SByteVal3)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.SByteVal6)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.SByteVal6)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.SByteVal6)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.SByteVal6)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.SByteVal6)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.SByteVal6)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.SByteVal6)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.SByteVal6)))
self.assertTrue(it.self.assertEqual(it.CharVal6,it.charT(it.SByteVal6)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.SByteVal6)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.SByteVal7)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.SByteVal7)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.SByteVal7)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.SByteVal7)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.SByteVal7)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.SByteVal7)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.SByteVal7)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.SByteVal7)))
self.assertTrue(it.self.assertEqual(it.CharVal7,it.charT(it.SByteVal7)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.SByteVal7)))
self.assertTrue(it.self.assertEqual(it.Int32Val8,it.intT(it.SByteVal8)))
self.assertTrue(it.self.assertEqual(it.Int16Val8,it.shortT(it.SByteVal8)))
self.assertTrue(it.self.assertEqual(it.Int64Val8,it.longT(it.SByteVal8)))
self.assertTrue(it.self.assertEqual(it.SByteVal8,it.sbyteT(it.SByteVal8)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.SByteVal8)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanUInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanUInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanUInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanUInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanUInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanUInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanUInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanUInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanUInt32MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.BooleanUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.BooleanUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.BooleanUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.BooleanUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.BooleanUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.BooleanUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.BooleanUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.BooleanUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.BooleanUInt32MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanUInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanUInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanUInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanUInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanUInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanUInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanUInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanUInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanUInt16MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.BooleanUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.BooleanUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.BooleanUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.BooleanUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.BooleanUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.BooleanUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.BooleanUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.BooleanUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.BooleanUInt16MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanUInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanUInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanUInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanUInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanUInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanUInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanUInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanUInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanUInt64MaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.BooleanUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.BooleanUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.BooleanUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.BooleanUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.BooleanUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.BooleanUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.BooleanUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.BooleanUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.BooleanUInt64MinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.BooleanByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.BooleanByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.BooleanByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.BooleanByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.BooleanByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.BooleanByteMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.BooleanByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.BooleanByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.BooleanByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanSByteMaxValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanSByteMinValue)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanVal1)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanVal1)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanVal1)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanVal1)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanVal1)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanVal1)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanVal1)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanVal1)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanVal1)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanVal2)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanVal2)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanVal2)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanVal2)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanVal2)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanVal2)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanVal2)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanVal2)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanVal2)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanVal3)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanVal3)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanVal3)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanVal3)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanVal3)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanVal3)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanVal3)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanVal3)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanVal3)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanVal4)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanVal4)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanVal4)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanVal4)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanVal4)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanVal4)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanVal4)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanVal4)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanVal4)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.BooleanVal5)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.BooleanVal5)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.BooleanVal5)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.BooleanVal5)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.BooleanVal5)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.BooleanVal5)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.BooleanVal5)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.BooleanVal5)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.BooleanVal5)))
self.assertTrue(it.self.assertEqual(it.UInt32Val6,it.uintT(it.BooleanVal6)))
self.assertTrue(it.self.assertEqual(it.UInt16Val6,it.ushortT(it.BooleanVal6)))
self.assertTrue(it.self.assertEqual(it.UInt64Val6,it.ulongT(it.BooleanVal6)))
self.assertTrue(it.self.assertEqual(it.Int32Val6,it.intT(it.BooleanVal6)))
self.assertTrue(it.self.assertEqual(it.Int16Val6,it.shortT(it.BooleanVal6)))
self.assertTrue(it.self.assertEqual(it.Int64Val6,it.longT(it.BooleanVal6)))
self.assertTrue(it.self.assertEqual(it.ByteVal6,it.byteT(it.BooleanVal6)))
self.assertTrue(it.self.assertEqual(it.SByteVal6,it.sbyteT(it.BooleanVal6)))
self.assertTrue(it.self.assertEqual(it.BooleanVal6,it.boolT(it.BooleanVal6)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanVal7)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanVal7)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanVal7)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanVal7)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanVal7)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanVal7)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanVal7)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanVal7)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanVal7)))
self.assertTrue(it.self.assertEqual(it.UInt32Val7,it.uintT(it.BooleanVal8)))
self.assertTrue(it.self.assertEqual(it.UInt16Val7,it.ushortT(it.BooleanVal8)))
self.assertTrue(it.self.assertEqual(it.UInt64Val7,it.ulongT(it.BooleanVal8)))
self.assertTrue(it.self.assertEqual(it.Int32Val7,it.intT(it.BooleanVal8)))
self.assertTrue(it.self.assertEqual(it.Int16Val7,it.shortT(it.BooleanVal8)))
self.assertTrue(it.self.assertEqual(it.Int64Val7,it.longT(it.BooleanVal8)))
self.assertTrue(it.self.assertEqual(it.ByteVal7,it.byteT(it.BooleanVal8)))
self.assertTrue(it.self.assertEqual(it.SByteVal7,it.sbyteT(it.BooleanVal8)))
self.assertTrue(it.self.assertEqual(it.BooleanVal8,it.boolT(it.BooleanVal8)))
def test_long(self):
class myint(int):
def __str__(self): return 'myint'
self.assertEqual(repr(myint(int(3))), '3')
def test_override_eq(self):
for base_type in [float, int]:
class F(base_type):
def __eq__(self, other):
return other == 'abc'
def __ne__(self, other):
return other == 'def'
self.assertEqual(F() == 'abc', True)
self.assertEqual(F() != 'def', True)
self.assertEqual(F() == 'qwe', False)
self.assertEqual(F() != 'qwe', False)
def test_bad_float_to_int(self):
self.assertRaises(OverflowError, int, 1.0e340) # Positive Infinity
self.assertRaises(OverflowError, int, -1.0e340) # Negative Infinity
self.assertRaises(ValueError, int, 1.0e340-1.0e340) # NAN
def test_int___int__(self):
for x in [-(int(2**(32-1)-1)), -3, -2, -1, 0, 1, 2, 3, int(2**(32-1)-1)]:
self.assertEqual(x.__int__(), x)
@skipUnlessIronPython()
def test_long_conv(self):
class Foo(int):
def __int__(self):
return big(42)
self.assertEqual(int(Foo()), 42)
def test_long_div(self):
x = int('2'*400 + '9')
y = int('3'*400 + '8')
nx = -x
self.assertEqual(x/y, 2/3)
self.assertEqual(x/(x+1), 1.0)
self.assertEqual((x+1)/x, 1.0)
self.assertEqual(nx/(x+1), -1.0)
self.assertEqual((x+1)/nx, -1.0)
def test_pow_edges(self):
class foo(object):
def __pow__(self, *args): return NotImplemented
self.assertRaisesPartialMessage(TypeError, "3rd argument not allowed unless all arguments are integers", pow, foo(), 2.0, 3.0)
self.assertRaisesPartialMessage(TypeError, "unsupported operand type(s)", pow, foo(), 2, 3)
x = 3
self.assertEqual(x.__pow__(2.0, 3.0), NotImplemented)
self.assertEqual(x.__pow__(2.0, 3), NotImplemented)
self.assertEqual(x.__pow__(2, 3.0), NotImplemented)
def test_int_from_long(self):
"""int(longVal) should return an Int32 if it's within range"""
class x(int): pass
if is_cli: import System
for base in (int, x):
for num, num_repr in [
(big(-2**31-2), '-2147483650'),
(big(-2**31-1), '-2147483649'),
(big(-2**31), '-2147483648'),
(big(-2**31+1), '-2147483647'),
(big(-2**31+2), '-2147483646'),
(big(0), '0'),
(big(1), '1'),
(big(2**31-2), '2147483646'),
(big(2**31-1), '2147483647'),
(big(2**31), '2147483648'),
(big(2**31+1), '2147483649'),
]:
self.assertEqual(repr(int(base(num))), num_repr)
if is_cli:
if num < 2**31 and num >= -2**31:
self.assertTrue(hasattr(int(base(num)), "MaxValue"))
self.assertTrue(hasattr(int(base(num)), "MinValue"))
else:
self.assertFalse(hasattr(int(base(num)), "MaxValue"))
self.assertFalse(hasattr(int(base(num)), "MinValue"))
def test_float_special_methods(self):
self.assertEqual(float.__lt__(2.0, 3.0), True)
self.assertEqual(float.__lt__(3.0, 2.0), False)
self.assertEqual(float.__lt__(2.0, 2.0), False)
self.assertEqual(float.__lt__(-1.0e340, 1.0e340), True)
self.assertEqual(float.__gt__(2.0, 3.0), False)
self.assertEqual(float.__gt__(3.0, 2.0), True)
self.assertEqual(float.__gt__(2.0, 2.0), False)
self.assertEqual(float.__ge__(2.0, 3.0), False)
self.assertEqual(float.__ge__(3.0, 2.0), True)
self.assertEqual(float.__ge__(2.0, 2.0), True)
self.assertEqual(float.__le__(2.0, 3.0), True)
self.assertEqual(float.__le__(3.0, 2.0), False)
self.assertEqual(float.__le__(2.0, 2.0), True)
self.assertEqual(float.__eq__(2.0, 3.0), False)
self.assertEqual(float.__eq__(3.0, 3.0), True)
self.assertEqual(float.__ne__(2.0, 3.0), True)
self.assertEqual(float.__ne__(3.0, 3.0), False)
def test_float_divmod(self):
# https://github.com/IronLanguages/main/issues/1236
self.assertEqual(divmod(0.123, 0.001), (122.0, 0.0009999999999999957))
self.assertEqual(divmod(-0.123, 0.001), (-123.0, 4.336808689942018e-18))
self.assertEqual(divmod(0.123, -0.001), (-123.0, -4.336808689942018e-18))
self.assertEqual(divmod(-0.123, -0.001), (122.0, -0.0009999999999999957))
def test_float_mod(self):
self.assertEqual(0.123 % 0.001, 0.0009999999999999957)
self.assertEqual(-0.123 % 0.001, 4.336808689942018e-18)
self.assertEqual(0.123 % -0.001, -4.336808689942018e-18)
self.assertEqual(-0.123 % -0.001, -0.0009999999999999957)
def test_float_format_gprec(self):
# https://github.com/IronLanguages/main/issues/1276
self.assertEqual("%.17g" % 1021095.0286738087, '1021095.0286738087')
def test_hex_and_octal(self):
for num, num_repr in [
(big(0x20), '32'),
(big(0X20), '32'), #Capital X
(int(0x20), '32'),
(float(-0x20), '-32.0'),
(big(0o10), '8'),
(int(-0o10), '-8'),
(float(0o0010), '8.0'),
]:
self.assertEqual(repr(num), num_repr)
for num in [ "0xx2", "09", "0P32", "0G" ]:
self.assertRaises(SyntaxError, lambda: eval(num))
def test_cp27383(self):
self.assertEqual(int('0 ', 0), 0)
self.assertEqual(int(' 0', 0), 0)
self.assertEqual(int('0', 0), 0)
run_test(__name__)
| 75.921829
| 138
| 0.693411
|
b5d0ac4da90d385c057aaa53edd5e7f498390331
| 470
|
py
|
Python
|
app/request.py
|
aleki21/blogip
|
3a5e71b4b91dad3e0967d11b07a7cbfad26b664d
|
[
"MIT"
] | null | null | null |
app/request.py
|
aleki21/blogip
|
3a5e71b4b91dad3e0967d11b07a7cbfad26b664d
|
[
"MIT"
] | null | null | null |
app/request.py
|
aleki21/blogip
|
3a5e71b4b91dad3e0967d11b07a7cbfad26b664d
|
[
"MIT"
] | null | null | null |
import urllib.request, json
QUOTE_API = 'http://quotes.stormconsultancy.co.uk/random.json'
def get_quotes():
with urllib.request.urlopen(QUOTE_API) as url:
get_quotes_data = url.read()
get_quotes_response = json.loads(get_quotes_data)
quotes_dict = {}
if get_quotes_response:
quotes_dict['author'] = get_quotes_response['author']
quotes_dict['quote']= get_quotes_response['quote']
return quotes_dict
| 27.647059
| 66
| 0.678723
|
b5556d62c40085a5b7a12340e5d54f5ba47d0044
| 5,764
|
py
|
Python
|
VL-T5/src/adapters/adapter_modeling.py
|
ylsung/VL_adapter
|
287409f383f89a11764fc45806864693a4d3e498
|
[
"MIT"
] | 41
|
2021-12-14T02:50:16.000Z
|
2022-03-30T07:41:19.000Z
|
VL-T5/src/adapters/adapter_modeling.py
|
ylsung/VL_adapter
|
287409f383f89a11764fc45806864693a4d3e498
|
[
"MIT"
] | 1
|
2022-01-07T03:31:47.000Z
|
2022-03-25T00:31:53.000Z
|
VL-T5/src/adapters/adapter_modeling.py
|
ylsung/VL_adapter
|
287409f383f89a11764fc45806864693a4d3e498
|
[
"MIT"
] | 2
|
2021-12-14T03:10:18.000Z
|
2022-03-29T04:59:23.000Z
|
"""Implements an Adapter, Low-rank adapters and Hyper-adapter Layers."""
import torch.nn as nn
from .adapter_utils import Activations
from .hypercomplex.layers import PHMLinear
from .low_rank_layer import LowRankLinear
class LowRankAdapter(nn.Module):
"""This is the low-rank adapter, in which each adapter is composed of two rank-one matrices.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.input_dim = config.input_dim
self.down_sample_size = self.input_dim // config.reduction_factor
self.activation = Activations(config.non_linearity.lower())
self.down_sampler = LowRankLinear(self.input_dim, self.down_sample_size,
w_init=config.low_rank_w_init,
rank=config.low_rank_rank)
self.up_sampler = LowRankLinear(self.down_sample_size, self.input_dim,
w_init=config.low_rank_w_init,
rank=config.low_rank_rank)
self.track_z = config.track_z
def forward(self, x):
z = self.down_sampler(x)
z = self.activation(z)
if self.track_z:
self.z = z
output = self.up_sampler(z)
return output
class Adapter(nn.Module):
"""Conventional Adapter layer, in which the weights of up and down sampler modules
are parameters and are optimized."""
def __init__(self, config):
super().__init__()
self.config = config
self.input_dim = config.d_model
reduction_factor = config.reduction_factor
self.down_sample_size = self.input_dim // reduction_factor
self.activation = Activations(config.non_linearity.lower())
self.down_sampler = nn.Linear(self.input_dim, self.down_sample_size)
self.up_sampler = nn.Linear(self.down_sample_size, self.input_dim)
self.track_z = config.track_z
def forward(self, x):
z = self.down_sampler(x)
z = self.activation(z)
if self.track_z:
self.z = z
output = self.up_sampler(z)
return output
class OutputAdapter(nn.Module):
"""Conventional Adapter layer, in which the weights of up and down sampler modules
are parameters and are optimized."""
def __init__(self, config, output_dim):
super().__init__()
self.config = config
self.input_dim = config.d_model
reduction_factor = 16
self.down_sample_size = self.input_dim // reduction_factor
self.activation = Activations(config.non_linearity.lower())
self.down_sampler = nn.Linear(self.input_dim, self.down_sample_size)
self.up_sampler = nn.Linear(self.down_sample_size, output_dim)
def forward(self, x):
z = self.down_sampler(x)
z = self.activation(z)
output = self.up_sampler(z)
return output
def resize_up_sampler(self, resized_size):
self.up_sampler = nn.Linear(self.down_sample_size, resized_size)
class HyperComplexAdapter(nn.Module):
"""Hypercomplex Adapter layer, in which the weights of up and down sampler modules
are parameters are 1/n times of the conventional adapter layers, where n is
hypercomplex division number."""
def __init__(self, config):
super().__init__()
self.config = config
self.input_dim = config.input_dim
self.down_sample_size = self.input_dim // config.reduction_factor
self.activation = Activations(config.non_linearity.lower())
self.down_sampler = PHMLinear(in_features=self.input_dim,
out_features=self.down_sample_size,
bias=True,
c_init=config.phm_c_init,
phm_dim=config.hypercomplex_division,
learn_phm=config.learn_phm,
w_init=config.hypercomplex_nonlinearity,
shared_phm_rule=config.shared_phm_rule,
factorized_phm=config.factorized_phm,
shared_W_phm=config.shared_W_phm,
factorized_phm_rule=config.factorized_phm_rule,
phm_rank=config.phm_rank,
phm_init_range=config.phm_init_range,
kronecker_prod=config.kronecker_prod)
self.up_sampler = PHMLinear(in_features=self.down_sample_size,
out_features=self.input_dim,
bias=True,
c_init=config.phm_c_init,
phm_dim=config.hypercomplex_division,
learn_phm=config.learn_phm,
w_init=config.hypercomplex_nonlinearity,
shared_phm_rule=config.shared_phm_rule,
factorized_phm=config.factorized_phm,
shared_W_phm=config.shared_W_phm,
factorized_phm_rule=config.factorized_phm_rule,
phm_rank=config.phm_rank,
phm_init_range=config.phm_init_range,
kronecker_prod=config.kronecker_prod)
self.track_z = config.track_z
def forward(self, x):
z = self.down_sampler(x)
z = self.activation(z)
if self.track_z:
self.z = z
return self.up_sampler(z)
| 43.666667
| 96
| 0.577897
|
6166049db9647174a1e85de9003d454bfd4c1928
| 964
|
py
|
Python
|
envs/setup.py
|
a-akram/ctd2022
|
70b59c90e060b9f40f12754e596617154a3230d9
|
[
"MIT"
] | null | null | null |
envs/setup.py
|
a-akram/ctd2022
|
70b59c90e060b9f40f12754e596617154a3230d9
|
[
"MIT"
] | null | null | null |
envs/setup.py
|
a-akram/ctd2022
|
70b59c90e060b9f40f12754e596617154a3230d9
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
dependencies = [
"decorator",
"memory_profiler",
"traintrack",
"trackml@ https://github.com/LAL/trackml-library/tarball/master#egg=trackml-3",
]
setup(
name="exatrkx-pipeline",
version="0.4.0",
description="Models, pipelines, and utilities for solving tracking problems with machine learning.",
author="Daniel Murnane",
install_requires=dependencies,
packages=find_packages(include=["examples", "src", "src.*"]),
entry_points={"console_scripts": []},
long_description=read("README.md"),
license="Apache License, Version 2.0",
keywords=[
"graph networks",
"track finding",
"tracking",
"seeding",
"GNN",
"machine learning",
],
url="https://github.com/HSF-reco-and-software-triggers/Tracking-ML-Exa.TrkX",
)
| 26.777778
| 104
| 0.655602
|
bf017e44f410febdc897a4de0a2d6f095643a3aa
| 34,055
|
py
|
Python
|
tests/unit/test_fileio.py
|
cojenco/python-storage
|
79b669bbede1cd4f06f1d697b71c7f9f2442fb80
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_fileio.py
|
cojenco/python-storage
|
79b669bbede1cd4f06f1d697b71c7f9f2442fb80
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_fileio.py
|
cojenco/python-storage
|
79b669bbede1cd4f06f1d697b71c7f9f2442fb80
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import io
import string
import mock
from google.api_core.exceptions import RequestRangeNotSatisfiable
from google.cloud.storage.retry import DEFAULT_RETRY
TEST_TEXT_DATA = string.ascii_lowercase + "\n" + string.ascii_uppercase + "\n"
TEST_BINARY_DATA = TEST_TEXT_DATA.encode("utf-8")
TEST_MULTIBYTE_TEXT_DATA = u"あいうえおかきくけこさしすせそたちつてと"
PLAIN_CONTENT_TYPE = "text/plain"
NUM_RETRIES = 2
class _BlobReaderBase:
@staticmethod
def _make_blob_reader(*args, **kwargs):
from google.cloud.storage.fileio import BlobReader
return BlobReader(*args, **kwargs)
class _BlobWriterBase:
@staticmethod
def _make_blob_writer(*args, **kwargs):
from google.cloud.storage.fileio import BlobWriter
return BlobWriter(*args, **kwargs)
class TestBlobReaderBinary(unittest.TestCase, _BlobReaderBase):
def test_attributes(self):
blob = mock.Mock()
blob.chunk_size = 256
reader = self._make_blob_reader(blob)
self.assertTrue(reader.seekable())
self.assertTrue(reader.readable())
self.assertFalse(reader.writable())
self.assertEqual(reader._chunk_size, 256)
self.assertEqual(reader._retry, DEFAULT_RETRY)
def test_attributes_explict(self):
blob = mock.Mock()
blob.chunk_size = 256
reader = self._make_blob_reader(blob, chunk_size=1024, retry=None)
self.assertEqual(reader._chunk_size, 1024)
self.assertIsNone(reader._retry)
def test_read(self):
blob = mock.Mock()
def read_from_fake_data(start=0, end=None, **_):
return TEST_BINARY_DATA[start:end]
blob.download_as_bytes = mock.Mock(side_effect=read_from_fake_data)
download_kwargs = {"if_metageneration_match": 1}
reader = self._make_blob_reader(blob, chunk_size=8, **download_kwargs)
# Read and trigger the first download of chunk_size.
self.assertEqual(reader.read(1), TEST_BINARY_DATA[0:1])
blob.download_as_bytes.assert_called_once_with(
start=0, end=8, checksum=None, retry=DEFAULT_RETRY, **download_kwargs
)
# Read from buffered data only.
self.assertEqual(reader.read(3), TEST_BINARY_DATA[1:4])
blob.download_as_bytes.assert_called_once()
# Read remaining buffer plus an additional chunk read.
self.assertEqual(reader.read(8), TEST_BINARY_DATA[4:12])
self.assertEqual(reader._pos, 12)
self.assertEqual(blob.download_as_bytes.call_count, 2)
blob.download_as_bytes.assert_called_with(
start=8, end=16, checksum=None, retry=DEFAULT_RETRY, **download_kwargs
)
# Read a larger amount, requiring a download larger than chunk_size.
self.assertEqual(reader.read(16), TEST_BINARY_DATA[12:28])
self.assertEqual(reader._pos, 28)
self.assertEqual(blob.download_as_bytes.call_count, 3)
blob.download_as_bytes.assert_called_with(
start=16, end=28, checksum=None, retry=DEFAULT_RETRY, **download_kwargs
)
# Read all remaining data.
self.assertEqual(reader.read(), TEST_BINARY_DATA[28:])
self.assertEqual(blob.download_as_bytes.call_count, 4)
blob.download_as_bytes.assert_called_with(
start=28, end=None, checksum=None, retry=DEFAULT_RETRY, **download_kwargs
)
reader.close()
def test_retry_passed_through(self):
blob = mock.Mock()
def read_from_fake_data(start=0, end=None, **_):
return TEST_BINARY_DATA[start:end]
blob.download_as_bytes = mock.Mock(side_effect=read_from_fake_data)
download_kwargs = {"if_metageneration_match": 1}
reader = self._make_blob_reader(
blob, chunk_size=8, retry=None, **download_kwargs
)
# Read and trigger the first download of chunk_size.
self.assertEqual(reader.read(1), TEST_BINARY_DATA[0:1])
blob.download_as_bytes.assert_called_once_with(
start=0, end=8, checksum=None, retry=None, **download_kwargs
)
reader.close()
def test_416_error_handled(self):
blob = mock.Mock()
blob.download_as_bytes = mock.Mock(
side_effect=RequestRangeNotSatisfiable("message")
)
reader = self._make_blob_reader(blob)
self.assertEqual(reader.read(), b"")
def test_readline(self):
blob = mock.Mock()
def read_from_fake_data(start=0, end=None, **_):
return TEST_BINARY_DATA[start:end]
blob.download_as_bytes = mock.Mock(side_effect=read_from_fake_data)
reader = self._make_blob_reader(blob, chunk_size=10)
# Read a line. With chunk_size=10, expect three chunks downloaded.
self.assertEqual(reader.readline(), TEST_BINARY_DATA[:27])
blob.download_as_bytes.assert_called_with(
start=20, end=30, checksum=None, retry=DEFAULT_RETRY
)
self.assertEqual(blob.download_as_bytes.call_count, 3)
# Read another line.
self.assertEqual(reader.readline(), TEST_BINARY_DATA[27:])
blob.download_as_bytes.assert_called_with(
start=50, end=60, checksum=None, retry=DEFAULT_RETRY
)
self.assertEqual(blob.download_as_bytes.call_count, 6)
blob.size = len(TEST_BINARY_DATA)
reader.seek(0)
# Read all lines. The readlines algorithm will attempt to read past the end of the last line once to verify there is no more to read.
self.assertEqual(b"".join(reader.readlines()), TEST_BINARY_DATA)
blob.download_as_bytes.assert_called_with(
start=len(TEST_BINARY_DATA),
end=len(TEST_BINARY_DATA) + 10,
checksum=None,
retry=DEFAULT_RETRY,
)
self.assertEqual(blob.download_as_bytes.call_count, 13)
reader.close()
def test_seek(self):
blob = mock.Mock()
def read_from_fake_data(start=0, end=None, **_):
return TEST_BINARY_DATA[start:end]
blob.download_as_bytes = mock.Mock(side_effect=read_from_fake_data)
blob.size = None
download_kwargs = {"if_metageneration_match": 1}
reader = self._make_blob_reader(blob, chunk_size=8, **download_kwargs)
# Seek needs the blob size to work and should call reload() if the size
# is not known. Set a mock to initialize the size if reload() is called.
def initialize_size(**_):
blob.size = len(TEST_BINARY_DATA)
blob.reload = mock.Mock(side_effect=initialize_size)
# Seek, forcing a blob reload in order to validate the seek doesn't
# exceed the end of the blob.
self.assertEqual(reader.seek(4), 4)
blob.reload.assert_called_once_with(**download_kwargs)
self.assertEqual(reader.read(4), TEST_BINARY_DATA[4:8])
self.assertEqual(blob.download_as_bytes.call_count, 1)
# Seek forward 2 bytes with whence=1. Position is still in buffer.
self.assertEqual(reader.seek(2, 1), 10)
self.assertEqual(reader.read(2), TEST_BINARY_DATA[10:12])
self.assertEqual(blob.download_as_bytes.call_count, 1)
# Attempt seek past end of file. Position should be at end of file.
self.assertEqual(
reader.seek(len(TEST_BINARY_DATA) + 100), len(TEST_BINARY_DATA)
)
# Seek to beginning. The next read will need to download data again.
self.assertEqual(reader.seek(0), 0)
self.assertEqual(reader.read(4), TEST_BINARY_DATA[0:4])
self.assertEqual(blob.download_as_bytes.call_count, 2)
# Seek relative to end with whence=2.
self.assertEqual(reader.seek(-1, 2), len(TEST_BINARY_DATA) - 1)
self.assertEqual(reader.read(), TEST_BINARY_DATA[-1:])
self.assertEqual(blob.download_as_bytes.call_count, 3)
with self.assertRaises(ValueError):
reader.seek(1, 4)
# tell() is an inherited method that uses seek().
self.assertEqual(reader.tell(), reader._pos)
reader.close()
def test_close(self):
blob = mock.Mock()
reader = self._make_blob_reader(blob)
reader.close()
with self.assertRaises(ValueError):
reader.read()
with self.assertRaises(ValueError):
reader.seek(0)
def test_context_mgr(self):
# Just very that the context manager form doesn't crash.
blob = mock.Mock()
with self._make_blob_reader(blob) as reader:
reader.close()
def test_rejects_invalid_kwargs(self):
blob = mock.Mock()
with self.assertRaises(ValueError):
self._make_blob_reader(blob, invalid_kwarg=1)
class TestBlobWriterBinary(unittest.TestCase, _BlobWriterBase):
def test_attributes(self):
blob = mock.Mock()
blob.chunk_size = 256 * 1024
writer = self._make_blob_writer(blob)
self.assertFalse(writer.seekable())
self.assertFalse(writer.readable())
self.assertTrue(writer.writable())
self.assertEqual(writer._chunk_size, 256 * 1024)
def test_attributes_explicit(self):
blob = mock.Mock()
blob.chunk_size = 256 * 1024
writer = self._make_blob_writer(
blob, chunk_size=512 * 1024, retry=DEFAULT_RETRY
)
self.assertEqual(writer._chunk_size, 512 * 1024)
self.assertEqual(writer._retry, DEFAULT_RETRY)
def test_deprecated_text_mode_attribute(self):
blob = mock.Mock()
blob.chunk_size = 256 * 1024
writer = self._make_blob_writer(blob, text_mode=True)
self.assertTrue(writer._ignore_flush)
writer.flush() # This should do nothing and not raise an error.
def test_reject_wrong_chunk_size(self):
blob = mock.Mock()
blob.chunk_size = 123
with self.assertRaises(ValueError):
_ = self._make_blob_writer(blob)
@mock.patch("warnings.warn")
def test_write(self, mock_warn):
from google.cloud.storage._helpers import _NUM_RETRIES_MESSAGE
blob = mock.Mock()
upload = mock.Mock()
transport = mock.Mock()
blob._initiate_resumable_upload.return_value = (upload, transport)
with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1):
# Create a writer with (arbitrary) arguments so we can validate the
# arguments are used.
# It would be normal to use a context manager here, but not doing so
# gives us more control over close() for test purposes.
upload_kwargs = {"if_metageneration_match": 1}
chunk_size = 8 # Note: Real upload requires a multiple of 256KiB.
writer = self._make_blob_writer(
blob,
chunk_size=chunk_size,
num_retries=NUM_RETRIES,
content_type=PLAIN_CONTENT_TYPE,
**upload_kwargs
)
# The transmit_next_chunk method must actually consume bytes from the
# sliding buffer for the flush() feature to work properly.
upload.transmit_next_chunk.side_effect = lambda _: writer._buffer.read(
chunk_size
)
# Write under chunk_size. This should be buffered and the upload not
# initiated.
writer.write(TEST_BINARY_DATA[0:4])
blob.initiate_resumable_upload.assert_not_called()
# Write over chunk_size. This should result in upload initialization
# and multiple chunks uploaded.
writer.write(TEST_BINARY_DATA[4:32])
blob._initiate_resumable_upload.assert_called_once_with(
blob.bucket.client,
writer._buffer,
PLAIN_CONTENT_TYPE,
None,
NUM_RETRIES,
chunk_size=chunk_size,
retry=None,
**upload_kwargs
)
upload.transmit_next_chunk.assert_called_with(transport)
self.assertEqual(upload.transmit_next_chunk.call_count, 4)
# Write another byte, finalize and close.
writer.write(TEST_BINARY_DATA[32:33])
self.assertEqual(writer.tell(), 33)
writer.close()
self.assertEqual(upload.transmit_next_chunk.call_count, 5)
mock_warn.assert_called_once_with(
_NUM_RETRIES_MESSAGE, DeprecationWarning, stacklevel=2,
)
def test_flush_fails(self):
blob = mock.Mock(chunk_size=None)
writer = self._make_blob_writer(blob)
with self.assertRaises(io.UnsupportedOperation):
writer.flush()
def test_seek_fails(self):
blob = mock.Mock(chunk_size=None)
writer = self._make_blob_writer(blob)
with self.assertRaises(io.UnsupportedOperation):
writer.seek()
def test_conditional_retry_failure(self):
blob = mock.Mock()
upload = mock.Mock()
transport = mock.Mock()
blob._initiate_resumable_upload.return_value = (upload, transport)
with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1):
# Create a writer.
# It would be normal to use a context manager here, but not doing so
# gives us more control over close() for test purposes.
chunk_size = 8 # Note: Real upload requires a multiple of 256KiB.
writer = self._make_blob_writer(
blob, chunk_size=chunk_size, content_type=PLAIN_CONTENT_TYPE,
)
# The transmit_next_chunk method must actually consume bytes from the
# sliding buffer for the flush() feature to work properly.
upload.transmit_next_chunk.side_effect = lambda _: writer._buffer.read(
chunk_size
)
# Write under chunk_size. This should be buffered and the upload not
# initiated.
writer.write(TEST_BINARY_DATA[0:4])
blob.initiate_resumable_upload.assert_not_called()
# Write over chunk_size. This should result in upload initialization
# and multiple chunks uploaded.
# Due to the condition not being fulfilled, retry should be None.
writer.write(TEST_BINARY_DATA[4:32])
blob._initiate_resumable_upload.assert_called_once_with(
blob.bucket.client,
writer._buffer,
PLAIN_CONTENT_TYPE,
None, # size
None, # num_retries
chunk_size=chunk_size,
retry=None,
)
upload.transmit_next_chunk.assert_called_with(transport)
self.assertEqual(upload.transmit_next_chunk.call_count, 4)
# Write another byte, finalize and close.
writer.write(TEST_BINARY_DATA[32:33])
writer.close()
self.assertEqual(upload.transmit_next_chunk.call_count, 5)
def test_conditional_retry_pass(self):
blob = mock.Mock()
upload = mock.Mock()
transport = mock.Mock()
blob._initiate_resumable_upload.return_value = (upload, transport)
with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1):
# Create a writer.
# It would be normal to use a context manager here, but not doing so
# gives us more control over close() for test purposes.
chunk_size = 8 # Note: Real upload requires a multiple of 256KiB.
writer = self._make_blob_writer(
blob,
chunk_size=chunk_size,
content_type=PLAIN_CONTENT_TYPE,
if_generation_match=123456,
)
# The transmit_next_chunk method must actually consume bytes from the
# sliding buffer for the flush() feature to work properly.
upload.transmit_next_chunk.side_effect = lambda _: writer._buffer.read(
chunk_size
)
# Write under chunk_size. This should be buffered and the upload not
# initiated.
writer.write(TEST_BINARY_DATA[0:4])
blob.initiate_resumable_upload.assert_not_called()
# Write over chunk_size. This should result in upload initialization
# and multiple chunks uploaded.
# Due to the condition being fulfilled, retry should be DEFAULT_RETRY.
writer.write(TEST_BINARY_DATA[4:32])
blob._initiate_resumable_upload.assert_called_once_with(
blob.bucket.client,
writer._buffer,
PLAIN_CONTENT_TYPE,
None, # size
None, # num_retries
chunk_size=chunk_size,
retry=DEFAULT_RETRY,
if_generation_match=123456,
)
upload.transmit_next_chunk.assert_called_with(transport)
self.assertEqual(upload.transmit_next_chunk.call_count, 4)
# Write another byte, finalize and close.
writer.write(TEST_BINARY_DATA[32:33])
writer.close()
self.assertEqual(upload.transmit_next_chunk.call_count, 5)
def test_forced_default_retry(self):
blob = mock.Mock()
upload = mock.Mock()
transport = mock.Mock()
blob._initiate_resumable_upload.return_value = (upload, transport)
with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1):
# Create a writer.
# It would be normal to use a context manager here, but not doing so
# gives us more control over close() for test purposes.
chunk_size = 8 # Note: Real upload requires a multiple of 256KiB.
writer = self._make_blob_writer(
blob,
chunk_size=chunk_size,
content_type=PLAIN_CONTENT_TYPE,
retry=DEFAULT_RETRY,
)
# The transmit_next_chunk method must actually consume bytes from the
# sliding buffer for the flush() feature to work properly.
upload.transmit_next_chunk.side_effect = lambda _: writer._buffer.read(
chunk_size
)
# Write under chunk_size. This should be buffered and the upload not
# initiated.
writer.write(TEST_BINARY_DATA[0:4])
blob.initiate_resumable_upload.assert_not_called()
# Write over chunk_size. This should result in upload initialization
# and multiple chunks uploaded.
writer.write(TEST_BINARY_DATA[4:32])
blob._initiate_resumable_upload.assert_called_once_with(
blob.bucket.client,
writer._buffer,
PLAIN_CONTENT_TYPE,
None, # size
None, # num_retries
chunk_size=chunk_size,
retry=DEFAULT_RETRY,
)
upload.transmit_next_chunk.assert_called_with(transport)
self.assertEqual(upload.transmit_next_chunk.call_count, 4)
# Write another byte, finalize and close.
writer.write(TEST_BINARY_DATA[32:33])
writer.close()
self.assertEqual(upload.transmit_next_chunk.call_count, 5)
@mock.patch("warnings.warn")
def test_num_retries_and_retry_conflict(self, mock_warn):
from google.cloud.storage._helpers import _NUM_RETRIES_MESSAGE
blob = mock.Mock()
blob._initiate_resumable_upload.side_effect = ValueError
with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1):
# Create a writer.
# It would be normal to use a context manager here, but not doing so
# gives us more control over close() for test purposes.
chunk_size = 8 # Note: Real upload requires a multiple of 256KiB.
writer = self._make_blob_writer(
blob,
chunk_size=chunk_size,
content_type=PLAIN_CONTENT_TYPE,
num_retries=2,
retry=DEFAULT_RETRY,
)
# Write under chunk_size. This should be buffered and the upload not
# initiated.
writer.write(TEST_BINARY_DATA[0:4])
blob.initiate_resumable_upload.assert_not_called()
# Write over chunk_size. The mock will raise a ValueError, simulating
# actual behavior when num_retries and retry are both specified.
with self.assertRaises(ValueError):
writer.write(TEST_BINARY_DATA[4:32])
blob._initiate_resumable_upload.assert_called_once_with(
blob.bucket.client,
writer._buffer,
PLAIN_CONTENT_TYPE,
None, # size
2, # num_retries
chunk_size=chunk_size,
retry=DEFAULT_RETRY,
)
mock_warn.assert_called_once_with(
_NUM_RETRIES_MESSAGE, DeprecationWarning, stacklevel=2,
)
@mock.patch("warnings.warn")
def test_num_retries_only(self, mock_warn):
from google.cloud.storage._helpers import _NUM_RETRIES_MESSAGE
blob = mock.Mock()
upload = mock.Mock()
transport = mock.Mock()
blob._initiate_resumable_upload.return_value = (upload, transport)
with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1):
# Create a writer.
# It would be normal to use a context manager here, but not doing so
# gives us more control over close() for test purposes.
chunk_size = 8 # Note: Real upload requires a multiple of 256KiB.
writer = self._make_blob_writer(
blob,
chunk_size=chunk_size,
content_type=PLAIN_CONTENT_TYPE,
num_retries=2,
)
# The transmit_next_chunk method must actually consume bytes from the
# sliding buffer for the flush() feature to work properly.
upload.transmit_next_chunk.side_effect = lambda _: writer._buffer.read(
chunk_size
)
# Write under chunk_size. This should be buffered and the upload not
# initiated.
writer.write(TEST_BINARY_DATA[0:4])
blob.initiate_resumable_upload.assert_not_called()
# Write over chunk_size. This should result in upload initialization
# and multiple chunks uploaded.
writer.write(TEST_BINARY_DATA[4:32])
blob._initiate_resumable_upload.assert_called_once_with(
blob.bucket.client,
writer._buffer,
PLAIN_CONTENT_TYPE,
None, # size
2, # num_retries
chunk_size=chunk_size,
retry=None,
)
upload.transmit_next_chunk.assert_called_with(transport)
self.assertEqual(upload.transmit_next_chunk.call_count, 4)
mock_warn.assert_called_once_with(
_NUM_RETRIES_MESSAGE, DeprecationWarning, stacklevel=2
)
# Write another byte, finalize and close.
writer.write(TEST_BINARY_DATA[32:33])
writer.close()
self.assertEqual(upload.transmit_next_chunk.call_count, 5)
def test_rejects_invalid_kwargs(self):
blob = mock.Mock()
with self.assertRaises(ValueError):
self._make_blob_writer(blob, invalid_kwarg=1)
class Test_SlidingBuffer(unittest.TestCase):
@staticmethod
def _make_sliding_buffer(*args, **kwargs):
from google.cloud.storage.fileio import SlidingBuffer
return SlidingBuffer(*args, **kwargs)
def test_write_and_read(self):
buff = self._make_sliding_buffer()
# Write and verify tell() still reports 0 and len is correct.
buff.write(TEST_BINARY_DATA)
self.assertEqual(buff.tell(), 0)
self.assertEqual(len(buff), len(TEST_BINARY_DATA))
# Read and verify tell() reports end.
self.assertEqual(buff.read(), TEST_BINARY_DATA)
self.assertEqual(buff.tell(), len(TEST_BINARY_DATA))
self.assertEqual(len(buff), len(TEST_BINARY_DATA))
def test_flush(self):
buff = self._make_sliding_buffer()
# Write and verify tell() still reports 0 and len is correct.
buff.write(TEST_BINARY_DATA)
self.assertEqual(buff.tell(), 0)
self.assertEqual(len(buff), len(TEST_BINARY_DATA))
# Read 8 bytes and verify tell reports correctly.
self.assertEqual(buff.read(8), TEST_BINARY_DATA[:8])
self.assertEqual(buff.tell(), 8)
self.assertEqual(len(buff), len(TEST_BINARY_DATA))
# Flush buffer and verify tell doesn't change but len does.
buff.flush()
self.assertEqual(buff.tell(), 8)
self.assertEqual(len(buff), len(TEST_BINARY_DATA) - 8)
# Read remainder.
self.assertEqual(buff.read(), TEST_BINARY_DATA[8:])
self.assertEqual(buff.tell(), len(TEST_BINARY_DATA))
self.assertEqual(len(buff), len(TEST_BINARY_DATA[8:]))
def test_seek(self):
buff = self._make_sliding_buffer()
buff.write(TEST_BINARY_DATA)
# Try to seek forward. Verify the tell() doesn't change.
with self.assertRaises(ValueError):
pos = buff.tell()
buff.seek(len(TEST_BINARY_DATA) + 1)
self.assertEqual(pos, buff.tell())
# Read 8 bytes, test seek backwards, read again, and flush.
self.assertEqual(buff.read(8), TEST_BINARY_DATA[:8])
buff.seek(0)
self.assertEqual(buff.read(8), TEST_BINARY_DATA[:8])
buff.flush()
self.assertEqual(buff.tell(), 8)
# Try to seek to a byte that has already been flushed.
with self.assertRaises(ValueError):
pos = buff.tell()
buff.seek(0)
self.assertEqual(pos, buff.tell())
def test_close(self):
buff = self._make_sliding_buffer()
buff.close()
with self.assertRaises(ValueError):
buff.read()
class TestBlobReaderText(unittest.TestCase, _BlobReaderBase):
def test_attributes(self):
blob = mock.Mock()
reader = io.TextIOWrapper(self._make_blob_reader(blob))
self.assertTrue(reader.seekable())
self.assertTrue(reader.readable())
self.assertFalse(reader.writable())
def test_read(self):
blob = mock.Mock()
def read_from_fake_data(start=0, end=None, **_):
return TEST_TEXT_DATA.encode("utf-8")[start:end]
blob.download_as_bytes = mock.Mock(side_effect=read_from_fake_data)
blob.chunk_size = None
blob.size = len(TEST_TEXT_DATA.encode("utf-8"))
download_kwargs = {"if_metageneration_match": 1}
reader = io.TextIOWrapper(self._make_blob_reader(blob, **download_kwargs))
# The TextIOWrapper class has an internally defined chunk size which
# will override ours. The wrapper class is not under test.
# Read and trigger the first download of chunk_size.
self.assertEqual(reader.read(1), TEST_TEXT_DATA[0:1])
blob.download_as_bytes.assert_called_once()
# Read from buffered data only.
self.assertEqual(reader.read(3), TEST_TEXT_DATA[1:4])
blob.download_as_bytes.assert_called_once()
# Read all remaining data.
self.assertEqual(reader.read(), TEST_TEXT_DATA[4:])
# Seek to 0 and read all remaining data again.
reader.seek(0)
self.assertEqual(reader.read(), TEST_TEXT_DATA)
reader.close()
def test_multibyte_read(self):
blob = mock.Mock()
def read_from_fake_data(start=0, end=None, **_):
return TEST_MULTIBYTE_TEXT_DATA.encode("utf-8")[start:end]
blob.download_as_bytes = mock.Mock(side_effect=read_from_fake_data)
blob.chunk_size = None
blob.size = len(TEST_MULTIBYTE_TEXT_DATA.encode("utf-8"))
download_kwargs = {"if_metageneration_match": 1}
reader = io.TextIOWrapper(self._make_blob_reader(blob, **download_kwargs))
# The TextIOWrapper class has an internally defined chunk size which
# will override ours. The wrapper class is not under test.
# Read and trigger the first download of chunk_size.
self.assertEqual(reader.read(1), TEST_MULTIBYTE_TEXT_DATA[0:1])
blob.download_as_bytes.assert_called_once()
# Read from buffered data only.
self.assertEqual(reader.read(3), TEST_MULTIBYTE_TEXT_DATA[1:4])
blob.download_as_bytes.assert_called_once()
# Read all remaining data.
self.assertEqual(reader.read(), TEST_MULTIBYTE_TEXT_DATA[4:])
# Seek to 0 and read all remaining data again.
reader.seek(0)
self.assertEqual(reader.read(), TEST_MULTIBYTE_TEXT_DATA)
reader.close()
def test_seek(self):
blob = mock.Mock()
def read_from_fake_data(start=0, end=None, **_):
return TEST_TEXT_DATA.encode("utf-8")[start:end]
blob.download_as_bytes = mock.Mock(side_effect=read_from_fake_data)
blob.size = None
blob.chunk_size = None
download_kwargs = {"if_metageneration_match": 1}
reader = io.TextIOWrapper(self._make_blob_reader(blob, **download_kwargs))
# Seek needs the blob size to work and should call reload() if the size
# is not known. Set a mock to initialize the size if reload() is called.
def initialize_size(**_):
blob.size = len(TEST_TEXT_DATA.encode("utf-8"))
blob.reload = mock.Mock(side_effect=initialize_size)
# Seek, forcing a blob reload in order to validate the seek doesn't
# exceed the end of the blob.
self.assertEqual(reader.seek(4), 4)
blob.reload.assert_called_once_with(**download_kwargs)
self.assertEqual(reader.read(4), TEST_TEXT_DATA[4:8])
self.assertEqual(blob.download_as_bytes.call_count, 1)
# Seek to beginning. The next read will need to download data again.
self.assertEqual(reader.seek(0), 0)
self.assertEqual(reader.read(), TEST_TEXT_DATA)
self.assertEqual(blob.download_as_bytes.call_count, 2)
reader.close()
def test_multibyte_seek(self):
blob = mock.Mock()
def read_from_fake_data(start=0, end=None, **_):
return TEST_MULTIBYTE_TEXT_DATA.encode("utf-8")[start:end]
blob.download_as_bytes = mock.Mock(side_effect=read_from_fake_data)
blob.size = None
blob.chunk_size = None
download_kwargs = {"if_metageneration_match": 1}
reader = io.TextIOWrapper(self._make_blob_reader(blob, **download_kwargs))
# Seek needs the blob size to work and should call reload() if the size
# is not known. Set a mock to initialize the size if reload() is called.
def initialize_size(**_):
blob.size = len(TEST_MULTIBYTE_TEXT_DATA.encode("utf-8"))
blob.reload = mock.Mock(side_effect=initialize_size)
# Seek, forcing a blob reload in order to validate the seek doesn't
# exceed the end of the blob.
self.assertEqual(reader.seek(4), 4)
blob.reload.assert_called_once_with(**download_kwargs)
# Seek to beginning.
self.assertEqual(reader.seek(0), 0)
self.assertEqual(reader.read(), TEST_MULTIBYTE_TEXT_DATA)
self.assertEqual(blob.download_as_bytes.call_count, 1)
# tell() is an inherited method that uses seek().
self.assertEqual(reader.tell(), len(TEST_MULTIBYTE_TEXT_DATA.encode("utf-8")))
reader.close()
def test_close(self):
blob = mock.Mock()
reader = self._make_blob_reader(blob)
reader.close()
with self.assertRaises(ValueError):
reader.read()
with self.assertRaises(ValueError):
reader.seek(0)
class TestBlobWriterText(unittest.TestCase, _BlobWriterBase):
@mock.patch("warnings.warn")
def test_write(self, mock_warn):
from google.cloud.storage._helpers import _NUM_RETRIES_MESSAGE
blob = mock.Mock()
upload = mock.Mock()
transport = mock.Mock()
blob._initiate_resumable_upload.return_value = (upload, transport)
with mock.patch("google.cloud.storage.fileio.CHUNK_SIZE_MULTIPLE", 1):
# Create a writer in text mode.
# It would be normal to use a context manager here, but not doing so
# gives us more control over close() for test purposes.
chunk_size = 8 # Note: Real upload requires a multiple of 256KiB.
unwrapped_writer = self._make_blob_writer(
blob,
chunk_size=chunk_size,
ignore_flush=True,
num_retries=NUM_RETRIES,
content_type=PLAIN_CONTENT_TYPE,
)
writer = io.TextIOWrapper(unwrapped_writer)
# The transmit_next_chunk method must actually consume bytes from the
# sliding buffer for the flush() feature to work properly.
upload.transmit_next_chunk.side_effect = lambda _: unwrapped_writer._buffer.read(
chunk_size
)
# Write under chunk_size. This should be buffered and the upload not
# initiated.
writer.write(TEST_MULTIBYTE_TEXT_DATA[0:2])
blob.initiate_resumable_upload.assert_not_called()
# Write all data and close.
writer.write(TEST_MULTIBYTE_TEXT_DATA[2:])
writer.close()
blob._initiate_resumable_upload.assert_called_once_with(
blob.bucket.client,
unwrapped_writer._buffer,
PLAIN_CONTENT_TYPE,
None,
NUM_RETRIES,
chunk_size=chunk_size,
retry=None,
)
upload.transmit_next_chunk.assert_called_with(transport)
mock_warn.assert_called_once_with(
_NUM_RETRIES_MESSAGE, DeprecationWarning, stacklevel=2,
)
| 37.713178
| 141
| 0.652386
|
c4936bc25fb30510e7bbab77dcf691f008e5c954
| 1,666
|
py
|
Python
|
pachong.py
|
yetote/PythonTest
|
2b987f706c7d8cc7e94003051801c652cec5318a
|
[
"Apache-2.0"
] | null | null | null |
pachong.py
|
yetote/PythonTest
|
2b987f706c7d8cc7e94003051801c652cec5318a
|
[
"Apache-2.0"
] | null | null | null |
pachong.py
|
yetote/PythonTest
|
2b987f706c7d8cc7e94003051801c652cec5318a
|
[
"Apache-2.0"
] | null | null | null |
import re
import requests
from lxml import etree
def urlChange(url, page_num):
page_group = []
page_group.append('http://www.bytravel.cn/view/index109_list.html')
now_page = int(re.search(('_list(\d+)'), url, re.S).group(1))
for i in range(now_page, page_num):
link = re.sub('_list\d+', '_list%d' % i, url, re.S)
page_group.append(link)
return page_group
def viewDict(html, title_xpath, content_xpath, img_xpath):
selector = etree.HTML(html)
title = selector.xpath(title_xpath)
content = selector.xpath(content_xpath)
img = selector.xpath(img_xpath)
return title,content,img
def total_html(url):
html = requests.get(url)
html.encoding = 'gb2312'
return html.text
def saveinfo(classinfo):
f = open("/pythonWorkSpace/info.txt", 'a', encoding='utf-8')
for each in classinfo:
f.writelines('title:' + each['title:'] + '\n')
f.writelines('content:' + each['content:'] + '\n')
f.writelines('img:' + each['img:'] + '\n\n')
f.close()
new_url = urlChange('http://www.bytravel.cn/view/index109_list1.html', 15)
# print(new_url)
classinfo = []
for url in new_url:
html = total_html(url)
title,content,img = viewDict(html, '//*[@id="tctitle"]/a/text()',
'// *[ @ id = "tcjs"]/text()',
'//*[@id="bright"]/a/img/@src')
for i in range(0,len(title)):
dict = {}
dict['title:']=title[i]
dict['content:']=content[i]
dict['img:']=img[i]
classinfo.append(dict)
# print(classinfo)
saveinfo(classinfo)
| 29.75
| 75
| 0.57443
|
3a2aacabd19f36ed56fdb6919b3782719b52b8b5
| 1,980
|
py
|
Python
|
pychron/data_mapper/import_spec.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/data_mapper/import_spec.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/data_mapper/import_spec.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
class Irradiation:
levels = None
doses = None
class Level:
name = None
positions = None
production = None
holder = None
z = None
note = None
class Production:
name = None
K4039 = None
K3839 = None
K3739 = None
Ca3937 = None
Ca3837 = None
Ca3637 = None
Cl3638 = None
Ca_K = None
Cl_K = None
class Position:
position = None
sample = None
identifier = None
j = None
j_err = None
note = None
weight = None
class Sample:
project = None
material = None
class Project:
name = None
principal_investigator = None
class Analysis:
position = None
runid = None
timestamp = None
isotopes = None
class BaseMeasurement:
name = None
xs = None
ys = None
class Isotope(BaseMeasurement):
baseline = None
class Baseline(BaseMeasurement):
pass
class ImportSpec:
irradiation = None
analyses = None
analysis = None
# ============= EOF =============================================
| 20
| 81
| 0.554545
|
463fcd408b18bb0854968967d1abef9f0eef049e
| 3,280
|
py
|
Python
|
examples/examples.py
|
DisruptiveLabs/balanced-python
|
dbabd8a1c8cd0728d322f6f1caa08891d590e0a1
|
[
"MIT"
] | 12
|
2015-04-12T06:18:33.000Z
|
2021-03-03T23:54:19.000Z
|
examples/examples.py
|
DisruptiveLabs/balanced-python
|
dbabd8a1c8cd0728d322f6f1caa08891d590e0a1
|
[
"MIT"
] | 1
|
2021-11-24T20:10:19.000Z
|
2021-11-24T20:10:19.000Z
|
examples/examples.py
|
DisruptiveLabs/balanced-python
|
dbabd8a1c8cd0728d322f6f1caa08891d590e0a1
|
[
"MIT"
] | 14
|
2015-03-23T17:52:06.000Z
|
2021-11-24T11:04:15.000Z
|
from __future__ import unicode_literals
import balanced
print "create our new api key"
api_key = balanced.APIKey().save()
print "Our secret is: ", api_key.secret
print "configure with our secret " + api_key.secret
balanced.configure(api_key.secret)
print "create our marketplace"
marketplace = balanced.Marketplace().save()
# what's my marketplace?
if not balanced.Marketplace.my_marketplace:
raise Exception("Marketplace.my_marketplace should not be nil")
print "what's my marketplace?, easy: Marketplace.my_marketplace: {0}".format(
balanced.Marketplace.my_marketplace
)
print "My marketplace's name is: {0}".format(marketplace.name)
print "Changing it to TestFooey"
marketplace.name = "TestFooey"
marketplace.save()
print "My marketplace name is now: {0}".format(marketplace.name)
if marketplace.name != 'TestFooey':
raise Exception("Marketplace name is NOT TestFooey!")
print "cool! let's create a new card."
card = balanced.Card(
number="5105105105105100",
expiration_month="12",
expiration_year="2015",
).save()
print "Our card href: " + card.href
print "create our **buyer** account"
buyer = balanced.Customer(email="buyer@example.org", source=card).save()
print "our buyer account: " + buyer.href
print "hold some amount of funds on the buyer, lets say 15$"
the_hold = card.hold(1500)
print "ok, no more holds! lets just capture it (for the full amount)"
debit = the_hold.capture()
print "hmm, how much money do i have in escrow? should equal the debit amount"
marketplace = balanced.Marketplace.my_marketplace
if marketplace.in_escrow != 1500:
raise Exception("1500 is not in escrow! this is wrong")
print "i have {0} in escrow!".format(marketplace.in_escrow)
print "cool. now let me refund the full amount"
refund = debit.refund() # the full amount!
print ("ok, we have a merchant that's signing up, let's create an account for "
"them first, lets create their bank account.")
bank_account = balanced.BankAccount(
account_number="1234567890",
routing_number="321174851",
name="Jack Q Merchant",
).save()
merchant = balanced.Customer(
email_address="merchant@example.org",
name="Billy Jones",
address={
'street_address': "801 High St.",
'postal_code': "94301",
'country': "USA",
},
dob="1842-01",
phone_number="+16505551234",
destination=bank_account,
).save()
print "oh our buyer is interested in buying something for 130.00$"
another_debit = card.debit(13000, appears_on_statement_as="MARKETPLACE.COM")
print "lets credit our merchant 110.00$"
credit = bank_account.credit(
11000, description="Buyer purchased something on MARKETPLACE.COM")
print "lets assume the marketplace charges 15%, so it earned $20"
mp_credit = marketplace.owner_customer.bank_accounts.first().credit(
2000, description="Our commission from MARKETPLACE.COM")
print "ok lets invalid a card"
card.delete()
assert buyer.cards.count() == 0
print "invalidating a bank account"
bank_account.delete()
print "associate a card with an exiting customer"
card = balanced.Card(
number="5105105105105100",
expiration_month="12",
expiration_year="2015",
).save()
card.associate_to_customer(buyer)
assert buyer.cards.count() == 1
print "and there you have it :)"
| 29.285714
| 79
| 0.730793
|
33e967c1c6370852d0c96bbc10e853bc8d73ba0e
| 536
|
py
|
Python
|
users/urls.py
|
Celoka/flight_booking_system
|
17d0fb0f4cf4b039cc05d814e881b5f31366c538
|
[
"MIT"
] | 3
|
2019-03-15T00:25:04.000Z
|
2019-04-12T14:06:01.000Z
|
users/urls.py
|
Celoka/flight_booking_system
|
17d0fb0f4cf4b039cc05d814e881b5f31366c538
|
[
"MIT"
] | 9
|
2020-02-11T23:44:44.000Z
|
2022-03-11T23:41:14.000Z
|
users/urls.py
|
Celoka/flight-booking-system
|
17d0fb0f4cf4b039cc05d814e881b5f31366c538
|
[
"MIT"
] | 1
|
2021-02-10T14:56:00.000Z
|
2021-02-10T14:56:00.000Z
|
from django.urls import path
from .views import (RegisterUserView,
LoginView,
ImageUploadViewSet,
index)
urlpatterns = [
path('', index, name="home-route"),
path('auth/register/', RegisterUserView.as_view(), name="auth-register"),
path('auth/login/', LoginView.as_view(), name="auth-login"),
path('user/upload/', ImageUploadViewSet.as_view(), name="file-upload"),
path('user/upload/<int:pk>/', ImageUploadViewSet.as_view(), name="file-upload-detail")
]
| 38.285714
| 90
| 0.623134
|
8bdf5446d6ba6b6534e256ec5bdea48d01fab34d
| 3,070
|
py
|
Python
|
interacting_with_regulondb.py
|
ASintsova/lab-notebook
|
f177738c4550d05e9e43ed3da637b51ea4a1fb35
|
[
"MIT"
] | null | null | null |
interacting_with_regulondb.py
|
ASintsova/lab-notebook
|
f177738c4550d05e9e43ed3da637b51ea4a1fb35
|
[
"MIT"
] | null | null | null |
interacting_with_regulondb.py
|
ASintsova/lab-notebook
|
f177738c4550d05e9e43ed3da637b51ea4a1fb35
|
[
"MIT"
] | null | null | null |
import sqlite3
import pandas as pd
import os
import sys
sys.path.append('/Users/annasintsova/git_repos/HUTI-RNAseq/analysis/methods')
import helpers
def get_all_regulators(db):
"""Returns a list of regulators"""
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute("SELECT transcription_factor_id "
"FROM TRANSCRIPTION_FACTOR ")
rows = cur.fetchall()
conn.close()
return [r[0] for r in rows]
def get_all_regulators_iterator(db):
"""Returns one regulator at a time """
conn = sqlite3.connect(db)
cur = conn.cursor()
for row in cur.execute("SELECT transcription_factor_id "
"FROM TRANSCRIPTION_FACTOR "):
yield row[0]
conn.close()
def get_bnum(db, object_id=""):
"""given id try to find bnum"""
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute("SELECT ext_reference_id "
"FROM OBJECT_EXTERNAL_DB_LINK "
"WHERE object_id=? ",
(object_id,))
rows = cur.fetchall()
conn.close()
if rows:
return rows[0][0]
return object_id
def get_regulon(db, gene_name_regulator="", tf_id_regulator=""):
"""Returns list of tuples (regulator_name,
regulated_name, regulated_id, repressed/activated)"""
conn = sqlite3.connect(db)
cur = conn.cursor() # use a context manager
cur.execute("SELECT gene_name_regulator, gene_name_regulated, "
"gene_id_regulated, generegulation_function, tf_conformation "
"FROM GENEREGULATION_TMP "
"WHERE gene_name_regulator=? OR tf_id_regulator=?",
(gene_name_regulator, tf_id_regulator))
regulon = cur.fetchall()
conn.close()
reg_network = []
for reg in regulon:
eck = reg[2]
bnum = get_bnum(db, object_id=eck)
reg_network.append((reg[0], reg[1], bnum, reg[3], reg[4])) # Should I make this a dictionary?
return reg_network
def get_all_regulons(db, filename):
regulators = get_all_regulators(db)
pd_list = []
for regulator in regulators:
regulon = get_regulon(db, tf_id_regulator=regulator)
pd_list.append(pd.DataFrame(regulon))
df = pd.concat(pd_list, ).reset_index(drop=True)
df.columns = ["regulator", "regulated_name", "regulated_bnum", "regulator_function",
"conformation"]
df.to_csv(filename)
return df
def create_regulon_csv():
config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config")
config_dict = helpers.process_config(config)
filename = config_dict["out_dir"]["regulon_csv"]
rdb = config_dict["db"]["path"]
rn = get_all_regulons(rdb, filename)
return rn
if __name__ == "__main__":
print(create_regulon_csv().head())
# config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config")
# config_dict = helpers.process_config(config)
# filename = config_dict["out_dir"]["regulon_csv"]
# rdb = config_dict["db"]["path"]
# rn = get_regulon(rdb, 'crp')
# print(rn)
| 31.979167
| 102
| 0.646906
|
50f09ce6a9dfe3581fd9e2b71d9c557ca2993e1f
| 3,385
|
py
|
Python
|
python3/koans/about_deleting_objects.py
|
Bege13mot/python_koans_solution
|
a679e685ef3bbc1c0ec29064d5eb6564c98b979e
|
[
"MIT"
] | 1
|
2017-05-21T11:45:30.000Z
|
2017-05-21T11:45:30.000Z
|
python3/koans/about_deleting_objects.py
|
bege13mot/python_koans_solution
|
a679e685ef3bbc1c0ec29064d5eb6564c98b979e
|
[
"MIT"
] | null | null | null |
python3/koans/about_deleting_objects.py
|
bege13mot/python_koans_solution
|
a679e685ef3bbc1c0ec29064d5eb6564c98b979e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutDeletingObjects(Koan):
def test_del_can_remove_slices(self):
lottery_nums = [4, 8, 15, 16, 23, 42]
del lottery_nums[1]
del lottery_nums[2:4]
self.assertEqual([4, 15, 42], lottery_nums)
def test_del_can_remove_entire_lists(self):
lottery_nums = [4, 8, 15, 16, 23, 42]
del lottery_nums
with self.assertRaises(UnboundLocalError): win = lottery_nums
# ====================================================================
class ClosingSale:
def __init__(self):
self.hamsters = 7
self.zebras = 84
def cameras(self):
return 34
def toilet_brushes(self):
return 48
def jellies(self):
return 5
def test_del_can_remove_attributes(self):
crazy_discounts = self.ClosingSale()
del self.ClosingSale.toilet_brushes
del crazy_discounts.hamsters
try:
still_available = crazy_discounts.toilet_brushes()
except AttributeError as e:
err_msg1 = e.args[0]
try:
still_available = crazy_discounts.hamsters
except AttributeError as e:
err_msg2 = e.args[0]
self.assertRegexpMatches(err_msg1, 'ClosingSale')
self.assertRegexpMatches(err_msg2, 'ClosingSale')
# ====================================================================
class ClintEastwood:
def __init__(self):
self._name = None
def get_name(self):
try:
return self._name
except:
return "The man with no name"
def set_name(self, name):
self._name = name
def del_name(self):
del self._name
name = property(get_name, set_name, del_name, \
"Mr Eastwood's current alias")
def test_del_works_with_properties(self):
cowboy = self.ClintEastwood()
cowboy.name = 'Senor Ninguno'
self.assertEqual('Senor Ninguno', cowboy.name)
del cowboy.name
self.assertEqual('The man with no name', cowboy.name)
# ====================================================================
class Prisoner:
def __init__(self):
self._name = None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@name.deleter
def name(self):
self._name = 'Number Six'
def test_another_way_to_make_a_deletable_property(self):
citizen = self.Prisoner()
citizen.name = "Patrick"
self.assertEqual('Patrick', citizen.name)
del citizen.name
self.assertEqual('Number Six', citizen.name)
# ====================================================================
class MoreOrganisedClosingSale(ClosingSale):
def __init__(self):
self.last_deletion = None
super().__init__()
def __delattr__(self, attr_name):
self.last_deletion = attr_name
def tests_del_can_be_overriden(self):
sale = self.MoreOrganisedClosingSale()
self.assertEqual(5, sale.jellies())
del sale.jellies
self.assertEqual('jellies', sale.last_deletion)
| 27.08
| 74
| 0.54062
|
27d52aad4fe733f971a9e76559eb46bd762949be
| 1,145
|
py
|
Python
|
Python27/ExcelUtil.py
|
smujm/MyProjects
|
057da8137ffc0eff0b8d94e4f45494961d523385
|
[
"MIT"
] | 1
|
2020-08-25T10:46:44.000Z
|
2020-08-25T10:46:44.000Z
|
Python27/ExcelUtil.py
|
smujm/MyProjects
|
057da8137ffc0eff0b8d94e4f45494961d523385
|
[
"MIT"
] | null | null | null |
Python27/ExcelUtil.py
|
smujm/MyProjects
|
057da8137ffc0eff0b8d94e4f45494961d523385
|
[
"MIT"
] | 1
|
2021-12-15T09:59:46.000Z
|
2021-12-15T09:59:46.000Z
|
import xlwt
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
'''
这里是操作excel的工具类,以后也可以直接复用
方法调用SpiderUtils.create_excel(...)
'''
class ExcelUtils(object):
@staticmethod
def create_excel(sheet_name, row_titles):
'''
创建excel文件与sheet表,并创建他们的第一行标题
:param sheet_name: excel中sheet_name文件的名称
:param row_titles: excel文件的标题行
:return: excel_file,excel_sheet
'''
f = xlwt.Workbook()
sheet_info = f.add_sheet(sheet_name, cell_overwrite_ok=True)
for i in range(0, len(row_titles)):
sheet_info.write(0, i, row_titles[i])
return f, sheet_info
@staticmethod
def write_excel(excel_file, excel_sheet, count, data, excel_name):
'''
把数据写入到excel中.这是一个静态方法
注意:这里所有的数据都不要写死,方便复用.
:param excel_file: 传入一个excel文件
:param excel_sheet: 传入一个excel_sheet表
:param count: excel文件的行数
:param data: 要传入的一条数据
:param excel_name: excel文件名
:return: None
'''
for j in range(len(data)):
excel_sheet.write(count, j, data[j])
excel_file.save(excel_name)
| 24.361702
| 70
| 0.630568
|
1adfc03246cfc78791ce17c8ef2496423cda9652
| 626
|
py
|
Python
|
blog/models.py
|
PocketsOfAir/django-girls-tutorial
|
c39bab2c97935872fb5a8017b584ed781dcf6cdc
|
[
"MIT"
] | null | null | null |
blog/models.py
|
PocketsOfAir/django-girls-tutorial
|
c39bab2c97935872fb5a8017b584ed781dcf6cdc
|
[
"MIT"
] | null | null | null |
blog/models.py
|
PocketsOfAir/django-girls-tutorial
|
c39bab2c97935872fb5a8017b584ed781dcf6cdc
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True,
null=True)
def display_date(self):
return self.published_date if self.published_date else self.created_date
def publish(self):
self.published_date = timezone.now
self.save()
def __str__(self):
return self.title
| 28.454545
| 80
| 0.666134
|
574f219a79474a92990540e463db929b95047711
| 22,193
|
py
|
Python
|
freezer_api/storage/elastic.py
|
ctpegasus/freezer-api
|
b784327252ac6132a4d3b87c50e9a99c70d6c938
|
[
"Apache-2.0"
] | null | null | null |
freezer_api/storage/elastic.py
|
ctpegasus/freezer-api
|
b784327252ac6132a4d3b87c50e9a99c70d6c938
|
[
"Apache-2.0"
] | null | null | null |
freezer_api/storage/elastic.py
|
ctpegasus/freezer-api
|
b784327252ac6132a4d3b87c50e9a99c70d6c938
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2015 Hewlett-Packard
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import elasticsearch
import logging
import os
from freezer_api.common import _i18n
from freezer_api.common import exceptions as freezer_api_exc
from freezer_api.common import utils
from oslo_config import cfg
from oslo_log import log
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class TypeManager(object):
def __init__(self, es, doc_type, index):
self.es = es
self.index = index
self.doc_type = doc_type
@staticmethod
def get_base_search_filter(user_id, search=None):
search = search or {}
user_id_filter = {"term": {"user_id": user_id}}
base_filter = [user_id_filter]
match_list = [{"match": m} for m in search.get('match', [])]
match_not_list = [{"match": m} for m in search.get('match_not', [])]
base_filter.append({"query": {"bool": {"must": match_list,
"must_not": match_not_list}}})
return base_filter
@staticmethod
def get_search_query(user_id, doc_id, search=None):
search = search or {}
try:
base_filter = TypeManager.get_base_search_filter(user_id, search)
query_filter = {"filter": {"bool": {"must": base_filter}}}
return {'query': {'filtered': query_filter}}
except Exception:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('search operation failed: query not valid'))
def get(self, user_id, doc_id):
try:
res = self.es.get(index=self.index,
doc_type=self.doc_type,
id=doc_id)
doc = res['_source']
except elasticsearch.TransportError:
raise freezer_api_exc.DocumentNotFound(
message=_i18n._('No document found with ID %s') % doc_id)
except Exception as e:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('Get operation failed: %s') % e)
if doc['user_id'] != user_id:
raise freezer_api_exc.AccessForbidden(
_i18n._("Document access forbidden"))
if '_version' in res:
doc['_version'] = res['_version']
return doc
def search(self, user_id, doc_id=None, search=None, offset=0, limit=10):
search = search or {}
query_dsl = self.get_search_query(user_id, doc_id, search)
try:
res = self.es.search(index=self.index, doc_type=self.doc_type,
size=limit, from_=offset, body=query_dsl)
except elasticsearch.ConnectionError:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('unable to connect to db server'))
except Exception as e:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('search operation failed: %s') % e)
hit_list = res['hits']['hits']
return [x['_source'] for x in hit_list]
def insert(self, doc, doc_id=None):
try:
# remove _version from the document
doc.pop('_version', None)
res = self.es.index(index=self.index, doc_type=self.doc_type,
body=doc, id=doc_id)
created = res['created']
version = res['_version']
self.es.indices.refresh(index=self.index)
except elasticsearch.TransportError as e:
if e.status_code == 409:
raise freezer_api_exc.DocumentExists(message=e.error)
raise freezer_api_exc.StorageEngineError(
message=_i18n._('index operation failed %s') % e)
except Exception as e:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('index operation failed %s') % e)
return (created, version)
def delete(self, user_id, doc_id):
query_dsl = self.get_search_query(user_id, doc_id)
try:
results = self.es.search(index=self.index,
doc_type=self.doc_type,
body=query_dsl)
results = results['hits']['hits']
except Exception as e:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('Scan operation failed: %s') % e)
id = None
for res in results:
id = res.get('_id')
try:
self.es.delete(index=self.index, doc_type=self.doc_type, id=id)
self.es.indices.refresh(index=self.index)
except Exception as e:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('Delete operation failed: %s') % e)
return id
class BackupTypeManager(TypeManager):
def __init__(self, es, doc_type, index='freezer'):
TypeManager.__init__(self, es, doc_type, index=index)
@staticmethod
def get_search_query(user_id, doc_id, search=None):
search = search or {}
base_filter = TypeManager.get_base_search_filter(user_id, search)
if doc_id is not None:
base_filter.append({"term": {"backup_id": doc_id}})
if 'time_after' in search:
base_filter.append(
{"range": {"timestamp": {"gte": int(search['time_after'])}}}
)
if 'time_before' in search:
base_filter.append(
{"range": {"timestamp": {"lte": int(search['time_before'])}}}
)
query_filter = {"filter": {"bool": {"must": base_filter}}}
return {'query': {'filtered': query_filter}}
class ClientTypeManager(TypeManager):
def __init__(self, es, doc_type, index='freezer'):
TypeManager.__init__(self, es, doc_type, index=index)
@staticmethod
def get_search_query(user_id, doc_id, search=None):
search = search or {}
base_filter = TypeManager.get_base_search_filter(user_id, search)
if doc_id is not None:
base_filter.append({"term": {"client.client_id": doc_id}})
query_filter = {"filter": {"bool": {"must": base_filter}}}
return {'query': {'filtered': query_filter}}
class JobTypeManager(TypeManager):
def __init__(self, es, doc_type, index='freezer'):
TypeManager.__init__(self, es, doc_type, index=index)
@staticmethod
def get_search_query(user_id, doc_id, search=None):
search = search or {}
base_filter = TypeManager.get_base_search_filter(user_id, search)
if doc_id is not None:
base_filter.append({"term": {"job_id": doc_id}})
query_filter = {"filter": {"bool": {"must": base_filter}}}
return {'query': {'filtered': query_filter}}
def update(self, job_id, job_update_doc):
# remove _version from the document
job_update_doc.pop('_version', 0)
update_doc = {"doc": job_update_doc}
try:
res = self.es.update(index=self.index, doc_type=self.doc_type,
id=job_id, body=update_doc)
version = res['_version']
self.es.indices.refresh(index=self.index)
except elasticsearch.TransportError as e:
if e.status_code == 409:
raise freezer_api_exc.DocumentExists(message=e.error)
raise freezer_api_exc.DocumentNotFound(
message=_i18n._('Unable to find job to update '
'with id %(id)s. %(e)s') % {'id': job_id,
'e': e})
except Exception:
raise freezer_api_exc.StorageEngineError(
message=_i18n._('Unable to update job with id %s') % job_id)
return version
class ActionTypeManager(TypeManager):
def __init__(self, es, doc_type, index='freezer'):
TypeManager.__init__(self, es, doc_type, index=index)
@staticmethod
def get_search_query(user_id, doc_id, search=None):
search = search or {}
base_filter = TypeManager.get_base_search_filter(user_id, search)
if doc_id is not None:
base_filter.append({"term": {"action_id": doc_id}})
query_filter = {"filter": {"bool": {"must": base_filter}}}
return {'query': {'filtered': query_filter}}
def update(self, action_id, action_update_doc):
# remove _version from the document
action_update_doc.pop('_version', 0)
update_doc = {"doc": action_update_doc}
try:
res = self.es.update(index=self.index, doc_type=self.doc_type,
id=action_id, body=update_doc)
version = res['_version']
self.es.indices.refresh(index=self.index)
except elasticsearch.TransportError as e:
if e.status_code == 409:
raise freezer_api_exc.DocumentExists(message=e.error)
raise freezer_api_exc.DocumentNotFound(
message=_i18n._('Unable to find action to update '
'with id %s') % action_id)
except Exception:
raise freezer_api_exc.StorageEngineError(
message=_i18n._(
'Unable to update action with id %s') % action_id)
return version
class SessionTypeManager(TypeManager):
def __init__(self, es, doc_type, index='freezer'):
TypeManager.__init__(self, es, doc_type, index=index)
@staticmethod
def get_search_query(user_id, doc_id, search=None):
search = search or {}
base_filter = TypeManager.get_base_search_filter(user_id, search)
if doc_id is not None:
base_filter.append({"term": {"session_id": doc_id}})
query_filter = {"filter": {"bool": {"must": base_filter}}}
return {'query': {'filtered': query_filter}}
def update(self, session_id, session_update_doc):
# remove _version from the document
session_update_doc.pop('_version', 0)
update_doc = {"doc": session_update_doc}
try:
res = self.es.update(index=self.index, doc_type=self.doc_type,
id=session_id, body=update_doc)
version = res['_version']
self.es.indices.refresh(index=self.index)
except elasticsearch.TransportError as e:
if e.status_code == 409:
raise freezer_api_exc.DocumentExists(message=e.error)
raise freezer_api_exc.DocumentNotFound(
message=_i18n._('Unable to update session '
'%(id)s %(e)s') % {'id': session_id, 'e': e}
)
except Exception:
raise freezer_api_exc.StorageEngineError(
message=_i18n._(
'Unable to update session with id %s') % session_id)
return version
class ElasticSearchEngine(object):
_OPTS = [
cfg.ListOpt('hosts',
default=['http://127.0.0.1:9200'],
help='specify the storage hosts'),
cfg.StrOpt('index',
default='freezer',
help='specify the name of the elasticsearch index'),
cfg.IntOpt('timeout',
default=60,
help='specify the connection timeout'),
cfg.IntOpt('retries',
default=20,
help='number of retries to allow before raising and error'),
cfg.BoolOpt('use_ssl',
default=False,
help='explicitly turn on SSL'),
cfg.BoolOpt('verify_certs',
default=False,
help='turn on SSL certs verification'),
cfg.StrOpt('ca_certs',
help='path to CA certs on disk'),
cfg.IntOpt('number_of_replicas',
default=0,
help='Number of replicas for elk cluster. Default is 0. '
'Use 0 for no replicas. This should be set to (number '
'of node in the ES cluter -1).')
]
def __init__(self, backend):
"""backend: name of the section in the config file to load
elasticsearch opts
"""
self.index = None
self.es = None
self.backup_manager = None
self.client_manager = None
self.job_manager = None
self.action_manager = None
self.session_manager = None
# register elasticsearch opts
CONF.register_opts(self._OPTS, group=backend)
self.conf = dict(CONF.get(backend))
self.backend = backend
self._validate_opts()
self.init(**self.conf)
def _validate_opts(self):
if not 'hosts' or 'endpoint' in self.conf.keys():
raise ValueError("Couldn't find hosts in {0} section".format(
self.backend)
)
if self.conf.get('ca_certs'):
if not os.path.isfile(self.conf.get('ca_certs')):
raise Exception("File not found: ca_certs file ({0}) not "
"found".format(self.conf.get('ca_certs')))
def get_opts(self):
return self._OPTS
def init(self, index='freezer', **kwargs):
self.index = index
self.es = elasticsearch.Elasticsearch(**kwargs)
logging.info('Storage backend: Elasticsearch '
'at %s' % kwargs['hosts'])
self.backup_manager = BackupTypeManager(self.es, 'backups')
self.client_manager = ClientTypeManager(self.es, 'clients')
self.job_manager = JobTypeManager(self.es, 'jobs')
self.action_manager = ActionTypeManager(self.es, 'actions')
self.session_manager = SessionTypeManager(self.es, 'sessions')
def get_backup(self, user_id, backup_id):
return self.backup_manager.get(user_id, backup_id)
def search_backup(self, user_id, offset=0, limit=10, search=None):
search = search or {}
return self.backup_manager.search(user_id,
search=search,
offset=offset,
limit=limit)
def add_backup(self, user_id, user_name, doc):
# raises if data is malformed (HTTP_400) or already present (HTTP_409)
backup_metadata_doc = utils.BackupMetadataDoc(user_id, user_name, doc)
if not backup_metadata_doc.is_valid():
raise freezer_api_exc.BadDataFormat(
message=_i18n._('Bad Data Format'))
backup_id = backup_metadata_doc.backup_id
self.backup_manager.insert(backup_metadata_doc.serialize(), backup_id)
return backup_id
def delete_backup(self, user_id, backup_id):
return self.backup_manager.delete(user_id, backup_id)
def get_client(self, user_id, client_id=None,
offset=0, limit=10, search=None):
search = search or {}
return self.client_manager.search(user_id,
client_id,
search=search,
offset=offset,
limit=limit)
def add_client(self, user_id, doc):
client_doc = utils.ClientDoc.create(doc, user_id)
client_id = client_doc['client']['client_id']
existing = self.client_manager.search(user_id, client_id)
if existing:
raise freezer_api_exc.DocumentExists(
message=_i18n._(
'Client already registered with ID %s') % client_id)
self.client_manager.insert(client_doc)
logging.info('Client registered, client_id: %s' % client_id)
return client_id
def delete_client(self, user_id, client_id):
return self.client_manager.delete(user_id, client_id)
def get_job(self, user_id, job_id):
return self.job_manager.get(user_id, job_id)
def search_job(self, user_id, offset=0, limit=10, search=None):
search = search or {}
return self.job_manager.search(user_id,
search=search,
offset=offset,
limit=limit)
def add_job(self, user_id, doc):
jobdoc = utils.JobDoc.create(doc, user_id)
job_id = jobdoc['job_id']
self.job_manager.insert(jobdoc, job_id)
logging.info('Job registered, job id: %s' % job_id)
return job_id
def delete_job(self, user_id, job_id):
return self.job_manager.delete(user_id, job_id)
def update_job(self, user_id, job_id, patch_doc):
valid_patch = utils.JobDoc.create_patch(patch_doc)
# check that document exists
assert (self.job_manager.get(user_id, job_id))
version = self.job_manager.update(job_id, valid_patch)
logging.info('Job %(id)s updated to version %(version)s' %
{'id': job_id, 'version': version})
return version
def replace_job(self, user_id, job_id, doc):
# check that no document exists with
# same job_id and different user_id
try:
self.job_manager.get(user_id, job_id)
except freezer_api_exc.DocumentNotFound:
pass
valid_doc = utils.JobDoc.update(doc, user_id, job_id)
(created, version) = self.job_manager.insert(valid_doc, job_id)
if created:
logging.info('Job %s created' % job_id)
else:
logging.info(
'Job %(id)s replaced with version %(version)s' %
{'id': job_id, 'version': version})
return version
def get_action(self, user_id, action_id):
return self.action_manager.get(user_id, action_id)
def search_action(self, user_id, offset=0, limit=10, search=None):
search = search or {}
return self.action_manager.search(user_id,
search=search,
offset=offset,
limit=limit)
def add_action(self, user_id, doc):
actiondoc = utils.ActionDoc.create(doc, user_id)
action_id = actiondoc['action_id']
self.action_manager.insert(actiondoc, action_id)
logging.info('Action registered, action id: %s' % action_id)
return action_id
def delete_action(self, user_id, action_id):
return self.action_manager.delete(user_id, action_id)
def update_action(self, user_id, action_id, patch_doc):
valid_patch = utils.ActionDoc.create_patch(patch_doc)
# check that document exists
assert (self.action_manager.get(user_id, action_id))
version = self.action_manager.update(action_id, valid_patch)
logging.info(
'Action %(id)s updated to version %(version)s' %
{'id': action_id, 'version': version})
return version
def replace_action(self, user_id, action_id, doc):
# check that no document exists with
# same action_id and different user_id
try:
self.action_manager.get(user_id, action_id)
except freezer_api_exc.DocumentNotFound:
pass
valid_doc = utils.ActionDoc.update(doc, user_id, action_id)
(created, version) = self.action_manager.insert(valid_doc, action_id)
if created:
logging.info('Action %s created' % action_id)
else:
logging.info(
'Action %(id)s replaced with version %(version)s'
% {'id': action_id, 'version': version})
return version
def get_session(self, user_id, session_id):
return self.session_manager.get(user_id, session_id)
def search_session(self, user_id, offset=0, limit=10, search=None):
search = search or {}
return self.session_manager.search(user_id,
search=search,
offset=offset,
limit=limit)
def add_session(self, user_id, doc):
session_doc = utils.SessionDoc.create(doc, user_id)
session_id = session_doc['session_id']
self.session_manager.insert(session_doc, session_id)
logging.info(
'Session registered, session id: %s' % session_id)
return session_id
def delete_session(self, user_id, session_id):
return self.session_manager.delete(user_id, session_id)
def update_session(self, user_id, session_id, patch_doc):
valid_patch = utils.SessionDoc.create_patch(patch_doc)
# check that document exists
assert (self.session_manager.get(user_id, session_id))
version = self.session_manager.update(session_id, valid_patch)
logging.info(
'Session %(id)s updated to version %(version)s' %
{'id': session_id, 'version': version})
return version
def replace_session(self, user_id, session_id, doc):
# check that no document exists with
# same session_id and different user_id
try:
self.session_manager.get(user_id, session_id)
except freezer_api_exc.DocumentNotFound:
pass
valid_doc = utils.SessionDoc.update(doc, user_id, session_id)
(created, version) = self.session_manager.insert(valid_doc, session_id)
if created:
logging.info('Session %s created' % session_id)
else:
logging.info(
'Session %(id)s replaced with version %(version)s'
% {'id': session_id, 'version': version})
return version
| 40.20471
| 79
| 0.590907
|
11c485d983cbd4ac979681cd348f4bf8705b8b3d
| 944
|
py
|
Python
|
day06/part2/orbits.py
|
FernandoBuenoLima/advent-of-code-2019
|
33acbcf541f00682856b9dcfc54155bdad07d1ae
|
[
"Unlicense"
] | null | null | null |
day06/part2/orbits.py
|
FernandoBuenoLima/advent-of-code-2019
|
33acbcf541f00682856b9dcfc54155bdad07d1ae
|
[
"Unlicense"
] | null | null | null |
day06/part2/orbits.py
|
FernandoBuenoLima/advent-of-code-2019
|
33acbcf541f00682856b9dcfc54155bdad07d1ae
|
[
"Unlicense"
] | null | null | null |
from tree import *
def pathfind(origin, target):
path = [origin]
return doPathfindRecursive(path, target)
def doPathfindRecursive(path, target):
current = path[-1]
if current == target:
return len(path)-1
nodes = []
for node in current.children:
if node not in path:
nodes.append(node)
if current.parent not in path:
nodes.append(current.parent)
for node in nodes:
newPath = path.copy()
newPath.append(node)
ret = doPathfindRecursive(newPath, target)
if ret > -1:
return ret
return -1
orbits = []
with open("input.txt") as file:
for line in file:
orbits.append(line.strip())
tree = Tree()
for orbit in orbits:
orbitNames = orbit.split(')')
tree.addOrbit(orbitNames[0], orbitNames[1])
you = tree.getNode("YOU")
san = tree.getNode("SAN")
print(pathfind(you.parent, san.parent))
| 20.977778
| 50
| 0.603814
|
b1254341ad1fe65722bc75976d4a7c25bad27bc1
| 417
|
py
|
Python
|
ResidenciAppServidor/wsgi.py
|
ResidenciApp/ResidenciAppServidor
|
7cc8c66bc07d198be37232fdc74d64227d14ce3d
|
[
"MIT"
] | 2
|
2019-10-31T03:21:07.000Z
|
2019-12-11T16:25:33.000Z
|
ResidenciAppServidor/wsgi.py
|
ResidenciApp/ResidenciAppServidor
|
7cc8c66bc07d198be37232fdc74d64227d14ce3d
|
[
"MIT"
] | 6
|
2021-03-19T02:43:26.000Z
|
2022-02-10T10:42:00.000Z
|
ResidenciAppServidor/wsgi.py
|
lmbaeza/ResidenciAppServidor
|
7cc8c66bc07d198be37232fdc74d64227d14ce3d
|
[
"MIT"
] | null | null | null |
"""
WSGI config for ResidenciAppServidor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ResidenciAppServidor.settings')
application = get_wsgi_application()
| 24.529412
| 80
| 0.798561
|
4ad2c19ecd2d9be68519abd42ba8d88043792a0c
| 2,673
|
py
|
Python
|
OnToology/mock/__init__.py
|
runzbuzz/auton
|
d30a15d8573321f78831cf1456ef51e735772381
|
[
"Apache-2.0"
] | 54
|
2015-05-20T16:12:12.000Z
|
2022-03-28T10:48:06.000Z
|
OnToology/mock/__init__.py
|
runzbuzz/auton
|
d30a15d8573321f78831cf1456ef51e735772381
|
[
"Apache-2.0"
] | 360
|
2015-05-14T00:14:01.000Z
|
2022-03-24T16:05:53.000Z
|
OnToology/mock/__init__.py
|
runzbuzz/auton
|
d30a15d8573321f78831cf1456ef51e735772381
|
[
"Apache-2.0"
] | 18
|
2015-07-08T10:58:02.000Z
|
2022-03-24T12:25:50.000Z
|
from .commit import *
from .fork import *
from .org import *
from .pull import *
from .repo import *
from .tree import *
from .user import *
from .milestone import *
user = "ahmad88me"
repo_name = "ontoology-auto-test-no-res"
repo_name_with_res = "ontoology-auto-test-with-res"
repo = user + "/" + repo_name
repo_with_res = user + "/" + repo_name_with_res
ontology_name = "alo.owl"
branch = "master"
mock_dict_success = {
"/repos/%s" % repo: {
"GET": {
"status": 200,
"body": get_repo_dict(repo)
}
},
"/repos/%s" % repo_with_res: {
"GET": {
"status": 200,
"body": get_repo_dict(repo_with_res)
}
},
"/user": {
"GET": {
"status": 200,
"body": get_auth_user_dict(user),
}
},
"/users/%s" % user: {
"GET": {
"status": 200,
"body": get_user_dict(user),
}
},
"/repos/%s/commits" % repo: {
"GET": {
"status": 200,
"body": get_commits_dict(repo),
}
},
"/repos/%s/git/trees/6dcb09b5b57875f334f61aebed695e2e4193db5e" % repo: {
"GET": {
"status": 200,
"body": get_tree_dict()
}
},
"/repos/%s/forks" % repo:{
"POST": {
"status": 202,
"body": get_fork_dict(repo)
}
},
"/repos/%s/pulls" % repo: {
"GET":{
"status": 200,
"body": get_pulls_dict(repo, branch="master") # regardless of the branch. It always pass
},
"POST": {
"status": 201,
"body": get_pulls_dict(repo, branch="master")[0]
}
},
"/repos/%s/collaborators/%s" % (repo,user) : {
# "GET": {
# "status": 204,
# "body": ""
# },
"GET": {
"status": 404,
"body": ""
},
"PUT": {
"status": 201,
"body": get_add_collaborator_dict(repo)
}
},
"/repos/%s/contents/OnToology/%s/documentation/.htaccess" % (repo_with_res, ontology_name): {
"GET": {
"status": 200,
"body": get_file_content_dict(repo_with_res, branch, "OnToology/%s/documentation/.htaccess" % ontology_name)
},
"PUT": {
"status": 200,
"body": get_update_content_dict(repo_with_res, branch, "OnToology/%s/documentation/.htaccess" % ontology_name)
}
},
"/user/repository_invitations/%s" % "1" :{
"PATCH": {
"status": 204,
"body": ""
}
}
}
mock_dict = {
'success': mock_dict_success
}
| 24.981308
| 122
| 0.479237
|
d73950f8604bd848018e63f064a7ec004ce5dd66
| 4,777
|
py
|
Python
|
Lib/site-packages/django/contrib/gis/gdal/datasource.py
|
ashutoshsuman99/Web-Blog-D19
|
a01a0ccc40e8823110c01ebe4f43d9351df57295
|
[
"bzip2-1.0.6"
] | 123
|
2015-01-15T06:56:45.000Z
|
2022-03-19T22:18:55.000Z
|
Lib/site-packages/django/contrib/gis/gdal/datasource.py
|
ashutoshsuman99/Web-Blog-D19
|
a01a0ccc40e8823110c01ebe4f43d9351df57295
|
[
"bzip2-1.0.6"
] | 21
|
2015-03-25T18:00:33.000Z
|
2019-08-12T17:11:10.000Z
|
Lib/site-packages/django/contrib/gis/gdal/datasource.py
|
ashutoshsuman99/Web-Blog-D19
|
a01a0ccc40e8823110c01ebe4f43d9351df57295
|
[
"bzip2-1.0.6"
] | 72
|
2015-01-14T16:29:47.000Z
|
2021-10-09T16:31:47.000Z
|
"""
DataSource is a wrapper for the OGR Data Source object, which provides
an interface for reading vector geometry data from many different file
formats (including ESRI shapefiles).
When instantiating a DataSource object, use the filename of a
GDAL-supported data source. For example, a SHP file or a
TIGER/Line file from the government.
The ds_driver keyword is used internally when a ctypes pointer
is passed in directly.
Example:
ds = DataSource('/home/foo/bar.shp')
for layer in ds:
for feature in layer:
# Getting the geometry for the feature.
g = feature.geom
# Getting the 'description' field for the feature.
desc = feature['description']
# We can also increment through all of the fields
# attached to this feature.
for field in feature:
# Get the name of the field (e.g. 'description')
nm = field.name
# Get the type (integer) of the field, e.g. 0 => OFTInteger
t = field.type
# Returns the value the field; OFTIntegers return ints,
# OFTReal returns floats, all else returns string.
val = field.value
"""
from ctypes import byref
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.layer import Layer
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_DS_* routines are relevant here.
class DataSource(GDALBase):
"Wraps an OGR Data Source object."
def __init__(self, ds_input, ds_driver=False, write=False, encoding='utf-8'):
# The write flag.
if write:
self._write = 1
else:
self._write = 0
# See also http://trac.osgeo.org/gdal/wiki/rfc23_ogr_unicode
self.encoding = encoding
Driver.ensure_registered()
if isinstance(ds_input, six.string_types):
# The data source driver is a void pointer.
ds_driver = Driver.ptr_type()
try:
# OGROpen will auto-detect the data source type.
ds = capi.open_ds(force_bytes(ds_input), self._write, byref(ds_driver))
except GDALException:
# Making the error message more clear rather than something
# like "Invalid pointer returned from OGROpen".
raise GDALException('Could not open the datasource at "%s"' % ds_input)
elif isinstance(ds_input, self.ptr_type) and isinstance(ds_driver, Driver.ptr_type):
ds = ds_input
else:
raise GDALException('Invalid data source input type: %s' % type(ds_input))
if ds:
self.ptr = ds
self.driver = Driver(ds_driver)
else:
# Raise an exception if the returned pointer is NULL
raise GDALException('Invalid data source file "%s"' % ds_input)
def __del__(self):
"Destroys this DataStructure object."
if self._ptr and capi:
capi.destroy_ds(self._ptr)
def __iter__(self):
"Allows for iteration over the layers in a data source."
for i in range(self.layer_count):
yield self[i]
def __getitem__(self, index):
"Allows use of the index [] operator to get a layer at the index."
if isinstance(index, six.string_types):
l = capi.get_layer_by_name(self.ptr, force_bytes(index))
if not l:
raise OGRIndexError('invalid OGR Layer name given: "%s"' % index)
elif isinstance(index, int):
if index < 0 or index >= self.layer_count:
raise OGRIndexError('index out of range')
l = capi.get_layer(self._ptr, index)
else:
raise TypeError('Invalid index type: %s' % type(index))
return Layer(l, self)
def __len__(self):
"Returns the number of layers within the data source."
return self.layer_count
def __str__(self):
"Returns OGR GetName and Driver for the Data Source."
return '%s (%s)' % (self.name, str(self.driver))
@property
def layer_count(self):
"Returns the number of layers in the data source."
return capi.get_layer_count(self._ptr)
@property
def name(self):
"Returns the name of the data source."
name = capi.get_ds_name(self._ptr)
return force_text(name, self.encoding, strings_only=True)
| 36.746154
| 92
| 0.637639
|
ff4767b0dbcef30d212db3f38884dfb67bfd9ace
| 953
|
py
|
Python
|
Python_ch4/Ch4_5_Sentinels.py
|
ninhnguyen01/Python_Book
|
e5e372f1895b06e908cd0dd07dc68a260c34d7ad
|
[
"Apache-2.0"
] | null | null | null |
Python_ch4/Ch4_5_Sentinels.py
|
ninhnguyen01/Python_Book
|
e5e372f1895b06e908cd0dd07dc68a260c34d7ad
|
[
"Apache-2.0"
] | null | null | null |
Python_ch4/Ch4_5_Sentinels.py
|
ninhnguyen01/Python_Book
|
e5e372f1895b06e908cd0dd07dc68a260c34d7ad
|
[
"Apache-2.0"
] | null | null | null |
# Sentinels (Title)
# Reading
# This program displays property taxes.
TAX_FACTOR = 0.0065
# Get the first lot number.
print('Enter the property lot number or enter 0 to end.')
lot = int(input('Lot number: '))
# Continue processsing as long as the user does not enter lot number
# 0.
while lot != 0:
# Get the property value.
value = float(input('Enter the property value: '))
# Calculate the property's tax.
tax = value * TAX_FACTOR
# Display the tax.
print(f'Property tax: ${tax:,.2f}')
# Get the next lot number.
print('Enter the next lot number or enter 0 to end.')
lot = int(input('Lot number: '))
# End
# Checkpoint
# 4.18 What is a sentinel?
# A: A special value that marks the end of a sequence of items.
# 4.19 Why should you take care to choose a distinctive value as a
# sentinel?
# A: So that it will not be mistaken as a regular value in the
# sequence.
# End
| 19.44898
| 69
| 0.650577
|
609a681d2e6a78ad2de7ecca45fcb7f868689aa3
| 40,842
|
py
|
Python
|
pytorch_lightning/utilities/cli.py
|
VedPatwardhan/pytorch-lightning
|
623dc974f56505cfdb6a7c62ad75780229e101de
|
[
"Apache-2.0"
] | 2
|
2022-01-24T12:40:51.000Z
|
2022-01-25T02:26:32.000Z
|
pytorch_lightning/utilities/cli.py
|
VedPatwardhan/pytorch-lightning
|
623dc974f56505cfdb6a7c62ad75780229e101de
|
[
"Apache-2.0"
] | 1
|
2021-09-20T00:24:38.000Z
|
2021-09-20T00:24:38.000Z
|
pytorch_lightning/utilities/cli.py
|
VedPatwardhan/pytorch-lightning
|
623dc974f56505cfdb6a7c62ad75780229e101de
|
[
"Apache-2.0"
] | 2
|
2022-02-11T08:26:13.000Z
|
2022-03-21T03:48:34.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for LightningCLI."""
import inspect
import os
import sys
from functools import partial, update_wrapper
from types import MethodType, ModuleType
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
from unittest import mock
import torch
import yaml
from torch.optim import Optimizer
import pytorch_lightning as pl
from pytorch_lightning import Callback, LightningDataModule, LightningModule, seed_everything, Trainer
from pytorch_lightning.utilities import _JSONARGPARSE_AVAILABLE, rank_zero_warn, warnings
from pytorch_lightning.utilities.cloud_io import get_filesystem
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.types import LRSchedulerType, LRSchedulerTypeTuple, LRSchedulerTypeUnion
if _JSONARGPARSE_AVAILABLE:
from jsonargparse import ActionConfigFile, ArgumentParser, class_from_function, Namespace, set_config_read_mode
from jsonargparse.optionals import import_docstring_parse
set_config_read_mode(fsspec_enabled=True)
else:
locals()["ArgumentParser"] = object
locals()["Namespace"] = object
class _Registry(dict):
def __call__(self, cls: Type, key: Optional[str] = None, override: bool = False) -> Type:
"""Registers a class mapped to a name.
Args:
cls: the class to be mapped.
key: the name that identifies the provided class.
override: Whether to override an existing key.
"""
if key is None:
key = cls.__name__
elif not isinstance(key, str):
raise TypeError(f"`key` must be a str, found {key}")
if key in self and not override:
raise MisconfigurationException(f"'{key}' is already present in the registry. HINT: Use `override=True`.")
self[key] = cls
return cls
def register_classes(self, module: ModuleType, base_cls: Type, override: bool = False) -> None:
"""This function is an utility to register all classes from a module."""
for _, cls in inspect.getmembers(module, predicate=inspect.isclass):
if issubclass(cls, base_cls) and cls != base_cls:
self(cls=cls, override=override)
@property
def names(self) -> List[str]:
"""Returns the registered names."""
return list(self.keys())
@property
def classes(self) -> Tuple[Type, ...]:
"""Returns the registered classes."""
return tuple(self.values())
def __str__(self) -> str:
return f"Registered objects: {self.names}"
OPTIMIZER_REGISTRY = _Registry()
OPTIMIZER_REGISTRY.register_classes(torch.optim, Optimizer)
LR_SCHEDULER_REGISTRY = _Registry()
LR_SCHEDULER_REGISTRY.register_classes(torch.optim.lr_scheduler, torch.optim.lr_scheduler._LRScheduler)
class ReduceLROnPlateau(torch.optim.lr_scheduler.ReduceLROnPlateau):
def __init__(self, optimizer: Optimizer, monitor: str, *args: Any, **kwargs: Any) -> None:
super().__init__(optimizer, *args, **kwargs)
self.monitor = monitor
LR_SCHEDULER_REGISTRY(cls=ReduceLROnPlateau)
CALLBACK_REGISTRY = _Registry()
CALLBACK_REGISTRY.register_classes(pl.callbacks, pl.callbacks.Callback)
MODEL_REGISTRY = _Registry()
DATAMODULE_REGISTRY = _Registry()
class LightningArgumentParser(ArgumentParser):
"""Extension of jsonargparse's ArgumentParser for pytorch-lightning."""
# use class attribute because `parse_args` is only called on the main parser
_choices: Dict[str, Tuple[Tuple[Type, ...], bool]] = {}
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize argument parser that supports configuration file input.
For full details of accepted arguments see `ArgumentParser.__init__
<https://jsonargparse.readthedocs.io/en/stable/index.html#jsonargparse.ArgumentParser.__init__>`_.
"""
if not _JSONARGPARSE_AVAILABLE:
raise ModuleNotFoundError(
"`jsonargparse` is not installed but it is required for the CLI."
" Install it with `pip install -U jsonargparse[signatures]`."
)
super().__init__(*args, **kwargs)
self.add_argument(
"--config", action=ActionConfigFile, help="Path to a configuration file in json or yaml format."
)
self.callback_keys: List[str] = []
# separate optimizers and lr schedulers to know which were added
self._optimizers: Dict[str, Tuple[Union[Type, Tuple[Type, ...]], str]] = {}
self._lr_schedulers: Dict[str, Tuple[Union[Type, Tuple[Type, ...]], str]] = {}
def add_lightning_class_args(
self,
lightning_class: Union[
Callable[..., Union[Trainer, LightningModule, LightningDataModule, Callback]],
Type[Trainer],
Type[LightningModule],
Type[LightningDataModule],
Type[Callback],
],
nested_key: str,
subclass_mode: bool = False,
required: bool = True,
) -> List[str]:
"""Adds arguments from a lightning class to a nested key of the parser.
Args:
lightning_class: A callable or any subclass of {Trainer, LightningModule, LightningDataModule, Callback}.
nested_key: Name of the nested namespace to store arguments.
subclass_mode: Whether allow any subclass of the given class.
required: Whether the argument group is required.
Returns:
A list with the names of the class arguments added.
"""
if callable(lightning_class) and not isinstance(lightning_class, type):
lightning_class = class_from_function(lightning_class)
if isinstance(lightning_class, type) and issubclass(
lightning_class, (Trainer, LightningModule, LightningDataModule, Callback)
):
if issubclass(lightning_class, Callback):
self.callback_keys.append(nested_key)
if subclass_mode:
return self.add_subclass_arguments(lightning_class, nested_key, fail_untyped=False, required=required)
return self.add_class_arguments(
lightning_class,
nested_key,
fail_untyped=False,
instantiate=not issubclass(lightning_class, Trainer),
sub_configs=True,
)
raise MisconfigurationException(
f"Cannot add arguments from: {lightning_class}. You should provide either a callable or a subclass of: "
"Trainer, LightningModule, LightningDataModule, or Callback."
)
def add_optimizer_args(
self,
optimizer_class: Union[Type[Optimizer], Tuple[Type[Optimizer], ...]],
nested_key: str = "optimizer",
link_to: str = "AUTOMATIC",
) -> None:
"""Adds arguments from an optimizer class to a nested key of the parser.
Args:
optimizer_class: Any subclass of :class:`torch.optim.Optimizer`.
nested_key: Name of the nested namespace to store arguments.
link_to: Dot notation of a parser key to set arguments or AUTOMATIC.
"""
if isinstance(optimizer_class, tuple):
assert all(issubclass(o, Optimizer) for o in optimizer_class)
else:
assert issubclass(optimizer_class, Optimizer)
kwargs = {"instantiate": False, "fail_untyped": False, "skip": {"params"}}
if isinstance(optimizer_class, tuple):
self.add_subclass_arguments(optimizer_class, nested_key, **kwargs)
self.set_choices(nested_key, optimizer_class)
else:
self.add_class_arguments(optimizer_class, nested_key, sub_configs=True, **kwargs)
self._optimizers[nested_key] = (optimizer_class, link_to)
def add_lr_scheduler_args(
self,
lr_scheduler_class: Union[LRSchedulerType, Tuple[LRSchedulerType, ...]],
nested_key: str = "lr_scheduler",
link_to: str = "AUTOMATIC",
) -> None:
"""Adds arguments from a learning rate scheduler class to a nested key of the parser.
Args:
lr_scheduler_class: Any subclass of ``torch.optim.lr_scheduler.{_LRScheduler, ReduceLROnPlateau}``.
nested_key: Name of the nested namespace to store arguments.
link_to: Dot notation of a parser key to set arguments or AUTOMATIC.
"""
if isinstance(lr_scheduler_class, tuple):
assert all(issubclass(o, LRSchedulerTypeTuple) for o in lr_scheduler_class)
else:
assert issubclass(lr_scheduler_class, LRSchedulerTypeTuple)
kwargs = {"instantiate": False, "fail_untyped": False, "skip": {"optimizer"}}
if isinstance(lr_scheduler_class, tuple):
self.add_subclass_arguments(lr_scheduler_class, nested_key, **kwargs)
self.set_choices(nested_key, lr_scheduler_class)
else:
self.add_class_arguments(lr_scheduler_class, nested_key, sub_configs=True, **kwargs)
self._lr_schedulers[nested_key] = (lr_scheduler_class, link_to)
def parse_args(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:
argv = sys.argv
for k, v in self._choices.items():
if not any(arg.startswith(f"--{k}") for arg in argv):
# the key wasn't passed - maybe defined in a config, maybe it's optional
continue
classes, is_list = v
# knowing whether the argument is a list type automatically would be too complex
if is_list:
argv = self._convert_argv_issue_85(classes, k, argv)
else:
argv = self._convert_argv_issue_84(classes, k, argv)
self._choices.clear()
with mock.patch("sys.argv", argv):
return super().parse_args(*args, **kwargs)
def set_choices(self, nested_key: str, classes: Tuple[Type, ...], is_list: bool = False) -> None:
"""Adds support for shorthand notation for a particular nested key.
Args:
nested_key: The key whose choices will be set.
classes: A tuple of classes to choose from.
is_list: Whether the argument is a ``List[object]`` type.
"""
self._choices[nested_key] = (classes, is_list)
@staticmethod
def _convert_argv_issue_84(classes: Tuple[Type, ...], nested_key: str, argv: List[str]) -> List[str]:
"""Placeholder for https://github.com/omni-us/jsonargparse/issues/84.
Adds support for shorthand notation for ``object`` arguments.
"""
passed_args, clean_argv = {}, []
argv_key = f"--{nested_key}"
# get the argv args for this nested key
i = 0
while i < len(argv):
arg = argv[i]
if arg.startswith(argv_key):
if "=" in arg:
key, value = arg.split("=")
else:
key = arg
i += 1
value = argv[i]
passed_args[key] = value
else:
clean_argv.append(arg)
i += 1
# the user requested a help message
help_key = argv_key + ".help"
if help_key in passed_args:
argv_class = passed_args[help_key]
if "." in argv_class:
# user passed the class path directly
class_path = argv_class
else:
# convert shorthand format to the classpath
for cls in classes:
if cls.__name__ == argv_class:
class_path = _class_path_from_class(cls)
break
else:
raise ValueError(f"Could not generate get the class_path for {repr(argv_class)}")
return clean_argv + [help_key, class_path]
# generate the associated config file
argv_class = passed_args.pop(argv_key, "")
if not argv_class:
# the user passed a config as a str
class_path = passed_args[f"{argv_key}.class_path"]
init_args_key = f"{argv_key}.init_args"
init_args = {k[len(init_args_key) + 1 :]: v for k, v in passed_args.items() if k.startswith(init_args_key)}
config = str({"class_path": class_path, "init_args": init_args})
elif argv_class.startswith("{"):
# the user passed a config as a dict
config = argv_class
else:
# the user passed the shorthand format
init_args = {k[len(argv_key) + 1 :]: v for k, v in passed_args.items()} # +1 to account for the period
for cls in classes:
if cls.__name__ == argv_class:
config = str(_global_add_class_path(cls, init_args))
break
else:
raise ValueError(f"Could not generate a config for {repr(argv_class)}")
return clean_argv + [argv_key, config]
@staticmethod
def _convert_argv_issue_85(classes: Tuple[Type, ...], nested_key: str, argv: List[str]) -> List[str]:
"""Placeholder for https://github.com/omni-us/jsonargparse/issues/85.
Adds support for shorthand notation for ``List[object]`` arguments.
"""
passed_args, clean_argv = [], []
passed_configs = {}
argv_key = f"--{nested_key}"
# get the argv args for this nested key
i = 0
while i < len(argv):
arg = argv[i]
if arg.startswith(argv_key):
if "=" in arg:
key, value = arg.split("=")
else:
key = arg
i += 1
value = argv[i]
if "class_path" in value:
# the user passed a config as a dict
passed_configs[key] = yaml.safe_load(value)
else:
passed_args.append((key, value))
else:
clean_argv.append(arg)
i += 1
# generate the associated config file
config = []
i, n = 0, len(passed_args)
while i < n - 1:
ki, vi = passed_args[i]
# convert class name to class path
for cls in classes:
if cls.__name__ == vi:
cls_type = cls
break
else:
raise ValueError(f"Could not generate a config for {repr(vi)}")
config.append(_global_add_class_path(cls_type))
# get any init args
j = i + 1 # in case the j-loop doesn't run
for j in range(i + 1, n):
kj, vj = passed_args[j]
if ki == kj:
break
if kj.startswith(ki):
init_arg_name = kj.split(".")[-1]
config[-1]["init_args"][init_arg_name] = vj
i = j
# update at the end to preserve the order
for k, v in passed_configs.items():
config.extend(v)
if not config:
return clean_argv
return clean_argv + [argv_key, str(config)]
class SaveConfigCallback(Callback):
"""Saves a LightningCLI config to the log_dir when training starts.
Args:
parser: The parser object used to parse the configuration.
config: The parsed configuration that will be saved.
config_filename: Filename for the config file.
overwrite: Whether to overwrite an existing config file.
multifile: When input is multiple config files, saved config preserves this structure.
Raises:
RuntimeError: If the config file already exists in the directory to avoid overwriting a previous run
"""
def __init__(
self,
parser: LightningArgumentParser,
config: Namespace,
config_filename: str,
overwrite: bool = False,
multifile: bool = False,
) -> None:
self.parser = parser
self.config = config
self.config_filename = config_filename
self.overwrite = overwrite
self.multifile = multifile
def setup(self, trainer: Trainer, pl_module: LightningModule, stage: Optional[str] = None) -> None:
# save the config in `setup` because (1) we want it to save regardless of the trainer function run
# and we want to save before processes are spawned
log_dir = trainer.log_dir # this broadcasts the directory
assert log_dir is not None
config_path = os.path.join(log_dir, self.config_filename)
fs = get_filesystem(log_dir)
if not self.overwrite:
# check if the file exists on rank 0
file_exists = fs.isfile(config_path) if trainer.is_global_zero else False
# broadcast whether to fail to all ranks
file_exists = trainer.strategy.broadcast(file_exists)
if file_exists:
raise RuntimeError(
f"{self.__class__.__name__} expected {config_path} to NOT exist. Aborting to avoid overwriting"
" results of a previous run. You can delete the previous config file,"
" set `LightningCLI(save_config_callback=None)` to disable config saving,"
" or set `LightningCLI(save_config_overwrite=True)` to overwrite the config file."
)
# save the file on rank 0
if trainer.is_global_zero:
# save only on rank zero to avoid race conditions on DDP.
# the `log_dir` needs to be created as we rely on the logger to do it usually
# but it hasn't logged anything at this point
fs.makedirs(log_dir, exist_ok=True)
self.parser.save(
self.config, config_path, skip_none=False, overwrite=self.overwrite, multifile=self.multifile
)
def __reduce__(self) -> Tuple[Type["SaveConfigCallback"], Tuple, Dict]:
# `ArgumentParser` is un-pickleable. Drop it
return self.__class__, (None, self.config, self.config_filename), {}
class LightningCLI:
"""Implementation of a configurable command line tool for pytorch-lightning."""
def __init__(
self,
model_class: Optional[Union[Type[LightningModule], Callable[..., LightningModule]]] = None,
datamodule_class: Optional[Union[Type[LightningDataModule], Callable[..., LightningDataModule]]] = None,
save_config_callback: Optional[Type[SaveConfigCallback]] = SaveConfigCallback,
save_config_filename: str = "config.yaml",
save_config_overwrite: bool = False,
save_config_multifile: bool = False,
trainer_class: Union[Type[Trainer], Callable[..., Trainer]] = Trainer,
trainer_defaults: Optional[Dict[str, Any]] = None,
seed_everything_default: Optional[int] = None,
description: str = "pytorch-lightning trainer command line tool",
env_prefix: str = "PL",
env_parse: bool = False,
parser_kwargs: Optional[Union[Dict[str, Any], Dict[str, Dict[str, Any]]]] = None,
subclass_mode_model: bool = False,
subclass_mode_data: bool = False,
run: bool = True,
) -> None:
"""Receives as input pytorch-lightning classes (or callables which return pytorch-lightning classes), which
are called / instantiated using a parsed configuration file and / or command line args.
Parsing of configuration from environment variables can be enabled by setting ``env_parse=True``.
A full configuration yaml would be parsed from ``PL_CONFIG`` if set.
Individual settings are so parsed from variables named for example ``PL_TRAINER__MAX_EPOCHS``.
For more info, read :ref:`the CLI docs <common/lightning_cli:LightningCLI>`.
.. warning:: ``LightningCLI`` is in beta and subject to change.
Args:
model_class: An optional :class:`~pytorch_lightning.core.lightning.LightningModule` class to train on or a
callable which returns a :class:`~pytorch_lightning.core.lightning.LightningModule` instance when
called. If ``None``, you can pass a registered model with ``--model=MyModel``.
datamodule_class: An optional :class:`~pytorch_lightning.core.datamodule.LightningDataModule` class or a
callable which returns a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` instance when
called. If ``None``, you can pass a registered datamodule with ``--data=MyDataModule``.
save_config_callback: A callback class to save the training config.
save_config_filename: Filename for the config file.
save_config_overwrite: Whether to overwrite an existing config file.
save_config_multifile: When input is multiple config files, saved config preserves this structure.
trainer_class: An optional subclass of the :class:`~pytorch_lightning.trainer.trainer.Trainer` class or a
callable which returns a :class:`~pytorch_lightning.trainer.trainer.Trainer` instance when called.
trainer_defaults: Set to override Trainer defaults or add persistent callbacks. The callbacks added through
this argument will not be configurable from a configuration file and will always be present for
this particular CLI. Alternatively, configurable callbacks can be added as explained in
:ref:`the CLI docs <common/lightning_cli:Configurable callbacks>`.
seed_everything_default: Default value for the :func:`~pytorch_lightning.utilities.seed.seed_everything`
seed argument.
description: Description of the tool shown when running ``--help``.
env_prefix: Prefix for environment variables.
env_parse: Whether environment variable parsing is enabled.
parser_kwargs: Additional arguments to instantiate each ``LightningArgumentParser``.
subclass_mode_model: Whether model can be any `subclass
<https://jsonargparse.readthedocs.io/en/stable/#class-type-and-sub-classes>`_
of the given class.
subclass_mode_data: Whether datamodule can be any `subclass
<https://jsonargparse.readthedocs.io/en/stable/#class-type-and-sub-classes>`_
of the given class.
run: Whether subcommands should be added to run a :class:`~pytorch_lightning.trainer.trainer.Trainer`
method. If set to ``False``, the trainer and model classes will be instantiated only.
"""
self.save_config_callback = save_config_callback
self.save_config_filename = save_config_filename
self.save_config_overwrite = save_config_overwrite
self.save_config_multifile = save_config_multifile
self.trainer_class = trainer_class
self.trainer_defaults = trainer_defaults or {}
self.seed_everything_default = seed_everything_default
self.model_class = model_class
# used to differentiate between the original value and the processed value
self._model_class = model_class or LightningModule
self.subclass_mode_model = (model_class is None) or subclass_mode_model
self.datamodule_class = datamodule_class
# used to differentiate between the original value and the processed value
self._datamodule_class = datamodule_class or LightningDataModule
self.subclass_mode_data = (datamodule_class is None) or subclass_mode_data
main_kwargs, subparser_kwargs = self._setup_parser_kwargs(
parser_kwargs or {}, # type: ignore # github.com/python/mypy/issues/6463
{"description": description, "env_prefix": env_prefix, "default_env": env_parse},
)
self.setup_parser(run, main_kwargs, subparser_kwargs)
self.parse_arguments(self.parser)
self.subcommand = self.config["subcommand"] if run else None
seed = self._get(self.config, "seed_everything")
if seed is not None:
seed_everything(seed, workers=True)
self.before_instantiate_classes()
self.instantiate_classes()
if self.subcommand is not None:
self._run_subcommand(self.subcommand)
def _setup_parser_kwargs(
self, kwargs: Dict[str, Any], defaults: Dict[str, Any]
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
if kwargs.keys() & self.subcommands().keys():
# `kwargs` contains arguments per subcommand
return defaults, kwargs
main_kwargs = defaults
main_kwargs.update(kwargs)
return main_kwargs, {}
def init_parser(self, **kwargs: Any) -> LightningArgumentParser:
"""Method that instantiates the argument parser."""
return LightningArgumentParser(**kwargs)
def setup_parser(
self, add_subcommands: bool, main_kwargs: Dict[str, Any], subparser_kwargs: Dict[str, Any]
) -> None:
"""Initialize and setup the parser, subcommands, and arguments."""
self.parser = self.init_parser(**main_kwargs)
if add_subcommands:
self._subcommand_method_arguments: Dict[str, List[str]] = {}
self._add_subcommands(self.parser, **subparser_kwargs)
else:
self._add_arguments(self.parser)
def add_default_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
"""Adds default arguments to the parser."""
parser.add_argument(
"--seed_everything",
type=Optional[int],
default=self.seed_everything_default,
help="Set to an int to run seed_everything with this value before classes instantiation",
)
def add_core_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
"""Adds arguments from the core classes to the parser."""
parser.add_lightning_class_args(self.trainer_class, "trainer")
parser.set_choices("trainer.callbacks", CALLBACK_REGISTRY.classes, is_list=True)
trainer_defaults = {"trainer." + k: v for k, v in self.trainer_defaults.items() if k != "callbacks"}
parser.set_defaults(trainer_defaults)
parser.add_lightning_class_args(self._model_class, "model", subclass_mode=self.subclass_mode_model)
if self.model_class is None and len(MODEL_REGISTRY):
# did not pass a model and there are models registered
parser.set_choices("model", MODEL_REGISTRY.classes)
if self.datamodule_class is not None:
parser.add_lightning_class_args(self._datamodule_class, "data", subclass_mode=self.subclass_mode_data)
elif len(DATAMODULE_REGISTRY):
# this should not be required because the user might want to use the `LightningModule` dataloaders
parser.add_lightning_class_args(
self._datamodule_class, "data", subclass_mode=self.subclass_mode_data, required=False
)
parser.set_choices("data", DATAMODULE_REGISTRY.classes)
def _add_arguments(self, parser: LightningArgumentParser) -> None:
# default + core + custom arguments
self.add_default_arguments_to_parser(parser)
self.add_core_arguments_to_parser(parser)
self.add_arguments_to_parser(parser)
# add default optimizer args if necessary
if not parser._optimizers: # already added by the user in `add_arguments_to_parser`
parser.add_optimizer_args(OPTIMIZER_REGISTRY.classes)
if not parser._lr_schedulers: # already added by the user in `add_arguments_to_parser`
parser.add_lr_scheduler_args(LR_SCHEDULER_REGISTRY.classes)
self.link_optimizers_and_lr_schedulers(parser)
def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
"""Implement to add extra arguments to the parser or link arguments.
Args:
parser: The parser object to which arguments can be added
"""
@staticmethod
def subcommands() -> Dict[str, Set[str]]:
"""Defines the list of available subcommands and the arguments to skip."""
return {
"fit": {"model", "train_dataloaders", "val_dataloaders", "datamodule"},
"validate": {"model", "dataloaders", "datamodule"},
"test": {"model", "dataloaders", "datamodule"},
"predict": {"model", "dataloaders", "datamodule"},
"tune": {"model", "train_dataloaders", "val_dataloaders", "datamodule"},
}
def _add_subcommands(self, parser: LightningArgumentParser, **kwargs: Any) -> None:
"""Adds subcommands to the input parser."""
parser_subcommands = parser.add_subcommands()
# the user might have passed a builder function
trainer_class = (
self.trainer_class if isinstance(self.trainer_class, type) else class_from_function(self.trainer_class)
)
# register all subcommands in separate subcommand parsers under the main parser
for subcommand in self.subcommands():
subcommand_parser = self._prepare_subcommand_parser(trainer_class, subcommand, **kwargs.get(subcommand, {}))
fn = getattr(trainer_class, subcommand)
# extract the first line description in the docstring for the subcommand help message
description = _get_short_description(fn)
parser_subcommands.add_subcommand(subcommand, subcommand_parser, help=description)
def _prepare_subcommand_parser(self, klass: Type, subcommand: str, **kwargs: Any) -> LightningArgumentParser:
parser = self.init_parser(**kwargs)
self._add_arguments(parser)
# subcommand arguments
skip = self.subcommands()[subcommand]
added = parser.add_method_arguments(klass, subcommand, skip=skip)
# need to save which arguments were added to pass them to the method later
self._subcommand_method_arguments[subcommand] = added
return parser
@staticmethod
def link_optimizers_and_lr_schedulers(parser: LightningArgumentParser) -> None:
"""Creates argument links for optimizers and learning rate schedulers that specified a ``link_to``."""
optimizers_and_lr_schedulers = {**parser._optimizers, **parser._lr_schedulers}
for key, (class_type, link_to) in optimizers_and_lr_schedulers.items():
if link_to == "AUTOMATIC":
continue
if isinstance(class_type, tuple):
parser.link_arguments(key, link_to)
else:
add_class_path = _add_class_path_generator(class_type)
parser.link_arguments(key, link_to, compute_fn=add_class_path)
def parse_arguments(self, parser: LightningArgumentParser) -> None:
"""Parses command line arguments and stores it in ``self.config``."""
self.config = parser.parse_args()
def before_instantiate_classes(self) -> None:
"""Implement to run some code before instantiating the classes."""
def instantiate_classes(self) -> None:
"""Instantiates the classes and sets their attributes."""
self.config_init = self.parser.instantiate_classes(self.config)
self.datamodule = self._get(self.config_init, "data")
self.model = self._get(self.config_init, "model")
self._add_configure_optimizers_method_to_model(self.subcommand)
self.trainer = self.instantiate_trainer()
def instantiate_trainer(self, **kwargs: Any) -> Trainer:
"""Instantiates the trainer.
Args:
kwargs: Any custom trainer arguments.
"""
extra_callbacks = [self._get(self.config_init, c) for c in self._parser(self.subcommand).callback_keys]
trainer_config = {**self._get(self.config_init, "trainer"), **kwargs}
return self._instantiate_trainer(trainer_config, extra_callbacks)
def _instantiate_trainer(self, config: Dict[str, Any], callbacks: List[Callback]) -> Trainer:
config["callbacks"] = config["callbacks"] or []
config["callbacks"].extend(callbacks)
if "callbacks" in self.trainer_defaults:
if isinstance(self.trainer_defaults["callbacks"], list):
config["callbacks"].extend(self.trainer_defaults["callbacks"])
else:
config["callbacks"].append(self.trainer_defaults["callbacks"])
if self.save_config_callback and not config["fast_dev_run"]:
config_callback = self.save_config_callback(
self._parser(self.subcommand),
self.config.get(str(self.subcommand), self.config),
self.save_config_filename,
overwrite=self.save_config_overwrite,
multifile=self.save_config_multifile,
)
config["callbacks"].append(config_callback)
return self.trainer_class(**config)
def _parser(self, subcommand: Optional[str]) -> LightningArgumentParser:
if subcommand is None:
return self.parser
# return the subcommand parser for the subcommand passed
action_subcommand = self.parser._subcommands_action
return action_subcommand._name_parser_map[subcommand]
@staticmethod
def configure_optimizers(
lightning_module: LightningModule, optimizer: Optimizer, lr_scheduler: Optional[LRSchedulerTypeUnion] = None
) -> Any:
"""Override to customize the :meth:`~pytorch_lightning.core.lightning.LightningModule.configure_optimizers`
method.
Args:
lightning_module: A reference to the model.
optimizer: The optimizer.
lr_scheduler: The learning rate scheduler (if used).
"""
if lr_scheduler is None:
return optimizer
if isinstance(lr_scheduler, ReduceLROnPlateau):
return {
"optimizer": optimizer,
"lr_scheduler": {"scheduler": lr_scheduler, "monitor": lr_scheduler.monitor},
}
return [optimizer], [lr_scheduler]
def _add_configure_optimizers_method_to_model(self, subcommand: Optional[str]) -> None:
"""Overrides the model's :meth:`~pytorch_lightning.core.lightning.LightningModule.configure_optimizers`
method if a single optimizer and optionally a scheduler argument groups are added to the parser as
'AUTOMATIC'."""
parser = self._parser(subcommand)
def get_automatic(
class_type: Union[Type, Tuple[Type, ...]], register: Dict[str, Tuple[Union[Type, Tuple[Type, ...]], str]]
) -> List[str]:
automatic = []
for key, (base_class, link_to) in register.items():
if not isinstance(base_class, tuple):
base_class = (base_class,)
if link_to == "AUTOMATIC" and any(issubclass(c, class_type) for c in base_class):
automatic.append(key)
return automatic
optimizers = get_automatic(Optimizer, parser._optimizers)
lr_schedulers = get_automatic(LRSchedulerTypeTuple, parser._lr_schedulers)
if len(optimizers) == 0:
return
if len(optimizers) > 1 or len(lr_schedulers) > 1:
raise MisconfigurationException(
f"`{self.__class__.__name__}.add_configure_optimizers_method_to_model` expects at most one optimizer "
f"and one lr_scheduler to be 'AUTOMATIC', but found {optimizers+lr_schedulers}. In this case the user "
"is expected to link the argument groups and implement `configure_optimizers`, see "
"https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_cli.html"
"#optimizers-and-learning-rate-schedulers"
)
optimizer_class = parser._optimizers[optimizers[0]][0]
optimizer_init = self._get(self.config_init, optimizers[0])
if not isinstance(optimizer_class, tuple):
optimizer_init = _global_add_class_path(optimizer_class, optimizer_init)
if not optimizer_init:
# optimizers were registered automatically but not passed by the user
return
lr_scheduler_init = None
if lr_schedulers:
lr_scheduler_class = parser._lr_schedulers[lr_schedulers[0]][0]
lr_scheduler_init = self._get(self.config_init, lr_schedulers[0])
if not isinstance(lr_scheduler_class, tuple):
lr_scheduler_init = _global_add_class_path(lr_scheduler_class, lr_scheduler_init)
if is_overridden("configure_optimizers", self.model):
warnings._warn(
f"`{self.model.__class__.__name__}.configure_optimizers` will be overridden by "
f"`{self.__class__.__name__}.configure_optimizers`."
)
optimizer = instantiate_class(self.model.parameters(), optimizer_init)
lr_scheduler = instantiate_class(optimizer, lr_scheduler_init) if lr_scheduler_init else None
fn = partial(self.configure_optimizers, optimizer=optimizer, lr_scheduler=lr_scheduler)
update_wrapper(fn, self.model.configure_optimizers) # necessary for `is_overridden`
# override the existing method
self.model.configure_optimizers = MethodType(fn, self.model)
def _get(self, config: Dict[str, Any], key: str, default: Optional[Any] = None) -> Any:
"""Utility to get a config value which might be inside a subcommand."""
return config.get(str(self.subcommand), config).get(key, default)
def _run_subcommand(self, subcommand: str) -> None:
"""Run the chosen subcommand."""
before_fn = getattr(self, f"before_{subcommand}", None)
if callable(before_fn):
before_fn()
default = getattr(self.trainer, subcommand)
fn = getattr(self, subcommand, default)
fn_kwargs = self._prepare_subcommand_kwargs(subcommand)
fn(**fn_kwargs)
after_fn = getattr(self, f"after_{subcommand}", None)
if callable(after_fn):
after_fn()
def _prepare_subcommand_kwargs(self, subcommand: str) -> Dict[str, Any]:
"""Prepares the keyword arguments to pass to the subcommand to run."""
fn_kwargs = {
k: v for k, v in self.config_init[subcommand].items() if k in self._subcommand_method_arguments[subcommand]
}
fn_kwargs["model"] = self.model
if self.datamodule is not None:
fn_kwargs["datamodule"] = self.datamodule
return fn_kwargs
def _class_path_from_class(class_type: Type) -> str:
return class_type.__module__ + "." + class_type.__name__
def _global_add_class_path(
class_type: Type, init_args: Optional[Union[Namespace, Dict[str, Any]]] = None
) -> Dict[str, Any]:
if isinstance(init_args, Namespace):
init_args = init_args.as_dict()
return {"class_path": _class_path_from_class(class_type), "init_args": init_args or {}}
def _add_class_path_generator(class_type: Type) -> Callable[[Namespace], Dict[str, Any]]:
def add_class_path(init_args: Namespace) -> Dict[str, Any]:
return _global_add_class_path(class_type, init_args)
return add_class_path
def instantiate_class(args: Union[Any, Tuple[Any, ...]], init: Dict[str, Any]) -> Any:
"""Instantiates a class with the given args and init.
Args:
args: Positional arguments required for instantiation.
init: Dict of the form {"class_path":...,"init_args":...}.
Returns:
The instantiated class object.
"""
kwargs = init.get("init_args", {})
if not isinstance(args, tuple):
args = (args,)
class_module, class_name = init["class_path"].rsplit(".", 1)
module = __import__(class_module, fromlist=[class_name])
args_class = getattr(module, class_name)
return args_class(*args, **kwargs)
def _get_short_description(component: object) -> Optional[str]:
parse = import_docstring_parse("LightningCLI(run=True)")
try:
docstring = parse(component.__doc__)
return docstring.short_description
except ValueError:
rank_zero_warn(f"Failed parsing docstring for {component}")
| 46.729977
| 120
| 0.650825
|
0c38aa566eb888aa5c27830c3785593d1f14d406
| 699
|
py
|
Python
|
Visualization/us_counties.py
|
monocilindro/qgis-earthengine-examples
|
82aea8926d34ed3f4ad4a4a345ddbd225819d28f
|
[
"MIT"
] | 646
|
2019-12-03T06:09:03.000Z
|
2022-03-28T03:37:08.000Z
|
Visualization/us_counties.py
|
csaybar/qgis-earthengine-examples
|
ba8942683834d2847ff3246bdd1859b36e50fe44
|
[
"MIT"
] | 10
|
2019-12-30T03:42:44.000Z
|
2021-05-22T07:34:07.000Z
|
Visualization/us_counties.py
|
csaybar/qgis-earthengine-examples
|
ba8942683834d2847ff3246bdd1859b36e50fe44
|
[
"MIT"
] | 219
|
2019-12-06T02:20:53.000Z
|
2022-03-30T15:14:27.000Z
|
import ee
from ee_plugin import Map
dataset = ee.FeatureCollection('TIGER/2018/Counties')
visParams = {
'palette': ['purple', 'blue', 'green', 'yellow', 'orange', 'red'],
'min': 0,
'max': 50,
'opacity': 0.8,
}
# Turn the strings into numbers
dataset = dataset.map(lambda f: f.set('STATEFP', ee.Number.parse(f.get('STATEFP'))))
image = ee.Image().float().paint(dataset, 'STATEFP')
countyOutlines = ee.Image().float().paint(**{
'featureCollection': dataset,
'color': 'black',
'width': 1
})
Map.setCenter(-99.844, 37.649, 5)
Map.addLayer(image, visParams, 'TIGER/2018/Counties')
Map.addLayer(countyOutlines, {}, 'county outlines')
# Map.addLayer(dataset, {}, 'for Inspector', False)
| 27.96
| 84
| 0.668097
|
c36ea07e41d4d16b6fe239c9707805d1c68ad5da
| 2,923
|
py
|
Python
|
apps/browser/firefox.py
|
AndreasArvidsson/andreas-talon
|
240218a385e450188a28f628af48d53d56d091d5
|
[
"MIT"
] | 11
|
2021-08-22T19:41:23.000Z
|
2022-02-24T11:39:42.000Z
|
apps/browser/firefox.py
|
AndreasArvidsson/talon-user
|
c8945d94a8c3c53083b2f75b9e585500d9ae8845
|
[
"MIT"
] | null | null | null |
apps/browser/firefox.py
|
AndreasArvidsson/talon-user
|
c8945d94a8c3c53083b2f75b9e585500d9ae8845
|
[
"MIT"
] | 2
|
2022-01-28T04:59:31.000Z
|
2022-03-04T21:28:59.000Z
|
from talon import Module, Context, actions, app
key = actions.key
ctx = Context()
mod = Module()
apps = mod.apps
apps.firefox = "app.name: Firefox"
apps.firefox = "app.name: firefox"
apps.firefox = """
os: windows
and app.name: Firefox
os: windows
and app.exe: firefox.exe
"""
apps.firefox = """
os: mac
and app.bundle: org.mozilla.firefox
"""
ctx.matches = r"""
app: firefox
"""
@ctx.action_class("browser")
class BrowserActions:
def go(url: str):
actions.browser.focus_address()
actions.sleep("50ms")
actions.insert(url)
key("enter")
def focus_search(): actions.focus_address()
def submit_form(): key("enter")
def bookmark(): key("ctrl-d")
def bookmark_tabs(): key("ctrl-shift-d")
def bookmarks(): key("ctrl-shift-O")
def bookmarks_bar():
key("alt-v")
actions.sleep("50ms")
key("t")
actions.sleep("50ms")
key("b")
def focus_address(): key("ctrl-l")
def go_blank(): key("ctrl-n")
def go_home(): key("alt-home")
def open_private_window(): key("ctrl-shift-p")
def reload(): key("ctrl-r")
def reload_hard(): key("ctrl-shift-r")
def show_clear_cache(): key("ctrl-shift-delete")
def show_downloads(): key("ctrl-j")
def show_extensions(): key("ctrl-shift-a")
def show_history(): key("ctrl-h")
def toggle_dev_tools(): key("ctrl-shift-i")
def go(url: str):
actions.browser.focus_address()
actions.sleep("50ms")
actions.insert(url)
key("enter")
@ctx.action_class("app")
class AppActions:
def tab_detach():
key("escape ctrl-alt-M")
def preferences():
actions.user.browser_open("about:preferences")
@ctx.action_class("user")
class UserActions:
def tab_jump(number: int):
if number < 9:
key(f"ctrl-{number}")
def tab_final(): key("ctrl-9")
def tab_back(): key("escape ctrl-alt-N")
def browser_open(url: str):
actions.browser.focus_address()
actions.sleep("50ms")
actions.insert(url)
key("alt-enter")
# ----- Scroll -----
def scroll_up(): key("ctrl-alt-h")
def scroll_down(): key("ctrl-alt-j")
def scroll_left(): key("ctrl-alt-k")
def scroll_right(): key("ctrl-alt-l")
def scroll_up_page(): key("pageup")
def scroll_down_page(): key("pagedown")
def scroll_up_half_page(): key("alt-pageup")
def scroll_down_half_page(): key("alt-pagedown")
# ----- LINUX -----
ctx_linux = Context()
ctx_linux.matches = r"""
os: linux
app: firefox
"""
@ctx_linux.action_class("user")
class UserActionsLinux:
def tab_final(): key("alt-9")
def tab_jump(number: int):
if number < 9:
key(f"alt-{number}")
| 26.098214
| 56
| 0.571331
|
743bff7eb823fe626dfca8d7a42af91dbe16ff58
| 1,719
|
py
|
Python
|
16.Clustering/16.5.MeanShift.py
|
radiumweilei/chinahadoop-ml-2
|
ea886610a6ccb278afeff759bf2dc8a30ef3f275
|
[
"Apache-2.0"
] | null | null | null |
16.Clustering/16.5.MeanShift.py
|
radiumweilei/chinahadoop-ml-2
|
ea886610a6ccb278afeff759bf2dc8a30ef3f275
|
[
"Apache-2.0"
] | null | null | null |
16.Clustering/16.5.MeanShift.py
|
radiumweilei/chinahadoop-ml-2
|
ea886610a6ccb278afeff759bf2dc8a30ef3f275
|
[
"Apache-2.0"
] | null | null | null |
# !/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import sklearn.datasets as ds
import matplotlib.colors
from sklearn.cluster import MeanShift
from sklearn.metrics import euclidean_distances
if __name__ == "__main__":
N = 1000
centers = [[1, 2], [-1, -1], [1, -1], [-1, 1]]
data, y = ds.make_blobs(N, n_features=2, centers=centers, cluster_std=[0.5, 0.25, 0.7, 0.5], random_state=0)
matplotlib.rcParams['font.sans-serif'] = [u'SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False
plt.figure(figsize=(10, 9), facecolor='w')
m = euclidean_distances(data, squared=True)
bw = np.median(m)
print(bw)
for i, mul in enumerate(np.linspace(0.1, 0.4, 4)):
band_width = mul * bw
model = MeanShift(bin_seeding=True, bandwidth=band_width)
ms = model.fit(data)
centers = ms.cluster_centers_
y_hat = ms.labels_
n_clusters = np.unique(y_hat).size
print('带宽:', mul, band_width, '聚类簇的个数为:', n_clusters)
plt.subplot(2, 2, i + 1)
plt.title(u'带宽:%.2f,聚类簇的个数为:%d' % (band_width, n_clusters))
clrs = []
for c in np.linspace(16711680, 255, n_clusters):
clrs.append('#%06x' % int(c))
# clrs = plt.cm.Spectral(np.linspace(0, 1, n_clusters))
print(clrs)
for k, clr in enumerate(clrs):
cur = (y_hat == k)
plt.scatter(data[cur, 0], data[cur, 1], c=clr, edgecolors='none')
plt.scatter(centers[:, 0], centers[:, 1], s=150, c=clrs, marker='*', edgecolors='k')
plt.grid(True)
plt.tight_layout(2)
plt.suptitle(u'MeanShift聚类', fontsize=20)
plt.subplots_adjust(top=0.92)
plt.show()
| 36.574468
| 112
| 0.612565
|
244aa0243d6eb61686a159c057d8e7a049106a40
| 514
|
py
|
Python
|
bin/sample_sparkline.py
|
aayoubi/spyklines
|
23969d4ceabeeb661cbcc607547c8ca964412f86
|
[
"MIT"
] | 2
|
2019-11-06T12:11:43.000Z
|
2020-07-23T09:30:47.000Z
|
bin/sample_sparkline.py
|
aayoubi/spyklines
|
23969d4ceabeeb661cbcc607547c8ca964412f86
|
[
"MIT"
] | 1
|
2020-07-22T16:45:25.000Z
|
2020-07-22T16:45:25.000Z
|
bin/sample_sparkline.py
|
aayoubi/spyklines
|
23969d4ceabeeb661cbcc607547c8ca964412f86
|
[
"MIT"
] | 3
|
2020-02-22T13:10:22.000Z
|
2021-02-15T17:36:14.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from spyklines import SparkLine, TrendLine
if __name__ == '__main__':
number_of_plots = 10
plt.figure(figsize=(6, 4))
for i in range(1, number_of_plots + 1):
ax = plt.subplot(number_of_plots, 1, i)
values = np.random.randint(100, 500, size=100)
sparkline = SparkLine('line-{}'.format(i), values)
sparkline.plot(ax)
trendline = TrendLine('line-{}'.format(i), values)
trendline.plot(ax)
plt.show()
| 28.555556
| 58
| 0.642023
|
93f342036f8c102a6cf7d2898ff9c9990a8e55b5
| 11,764
|
py
|
Python
|
intersight/model/iaas_most_run_tasks_response.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/iaas_most_run_tasks_response.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/iaas_most_run_tasks_response.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.iaas_most_run_tasks_list import IaasMostRunTasksList
from intersight.model.mo_aggregate_transform import MoAggregateTransform
from intersight.model.mo_document_count import MoDocumentCount
from intersight.model.mo_tag_key_summary import MoTagKeySummary
from intersight.model.mo_tag_summary import MoTagSummary
globals()['IaasMostRunTasksList'] = IaasMostRunTasksList
globals()['MoAggregateTransform'] = MoAggregateTransform
globals()['MoDocumentCount'] = MoDocumentCount
globals()['MoTagKeySummary'] = MoTagKeySummary
globals()['MoTagSummary'] = MoTagSummary
class IaasMostRunTasksResponse(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'object_type': (str,), # noqa: E501
'count': (int,), # noqa: E501
'results': ([MoTagKeySummary], none_type,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'iaas.MostRunTasks.List': IaasMostRunTasksList,
'mo.AggregateTransform': MoAggregateTransform,
'mo.DocumentCount': MoDocumentCount,
'mo.TagSummary': MoTagSummary,
}
if not val:
return None
return {'object_type': val}
attribute_map = {
'object_type': 'ObjectType', # noqa: E501
'count': 'Count', # noqa: E501
'results': 'Results', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, object_type, *args, **kwargs): # noqa: E501
"""IaasMostRunTasksResponse - a model defined in OpenAPI
Args:
object_type (str): A discriminator value to disambiguate the schema of a HTTP GET response body.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
count (int): The total number of 'iaas.MostRunTasks' resources matching the request, accross all pages. The 'Count' attribute is included when the HTTP GET request includes the '$inlinecount' parameter.. [optional] # noqa: E501
results ([MoTagKeySummary], none_type): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
],
'oneOf': [
IaasMostRunTasksList,
MoAggregateTransform,
MoDocumentCount,
MoTagSummary,
],
}
| 47.056
| 1,678
| 0.637538
|
00ff6334d937372140f4f72b15e1494034bddd11
| 7,142
|
py
|
Python
|
apps/trade/src/CrossTrader.py
|
kikei/btc-bot-ai
|
cb118fa1809ebef472a2025be697c9050e948009
|
[
"Apache-2.0"
] | 1
|
2020-02-02T13:53:21.000Z
|
2020-02-02T13:53:21.000Z
|
apps/trade/src/CrossTrader.py
|
kikei/btc-bot-ai
|
cb118fa1809ebef472a2025be697c9050e948009
|
[
"Apache-2.0"
] | null | null | null |
apps/trade/src/CrossTrader.py
|
kikei/btc-bot-ai
|
cb118fa1809ebef472a2025be697c9050e948009
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import concurrent.futures
CWD = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(CWD, '..', 'config'))
from Markets import Markets
import market.MarketConstants as Const
from classes import Tick
def get_executor():
executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)
return executor
class CrossTrader(object):
def __init__(self, logger=None):
self.logger = logger
self.markets = Markets(logger)
def get_tick(self):
"""
(self: CrossTrader) -> Tick
"""
bitflyer = self.markets.BitFlyer
quoine = self.markets.Quoine
bitflyer.get_tick()
tick = {}
def on_bitflyer(future):
tick[Tick.BitFlyer] = future.result()
def on_quoine(future):
tick[Tick.Quoine] = future.result()
fs = [
(bitflyer.get_tick, on_bitflyer),
(quoine.get_tick, on_quoine)
]
with get_executor() as executor:
def to_future(fs):
f, callback = fs
future = executor.submit(f)
future.add_done_callback(callback)
return future
futures = [to_future(f) for f in fs]
timeout = 300 # seconds
try:
concurrent.futures.wait(futures, timeout=timeout)
except concurrent.futures.TimeoutError as e:
self.logger.error(('Tick取得処理がタイムアウトしました, ' +
'timeout={}').format(timeout))
if Tick.BitFlyer not in tick or tick[Tick.BitFlyer] is None:
self.logger.error('BitFlyerのTick取得に失敗しました')
if Tick.Quoine not in tick or tick[Tick.Quoine] is None:
self.logger.error('QuoineのTick取得に失敗しました')
return Tick(tick)
def open_position(self, upper, lower, tick, lot):
"""
(
upper: str, lower: str, tick: Tick, lot: float
) -> {
lower: OneTick, upper: OneTick
}
"""
f_short = None
f_long = None
if upper == Tick.BitFlyer:
f_short = lambda: self.open_bitflyer(Const.SHORT,
tick.exchanger(Tick.BitFlyer).ask,
lot)
elif upper == Tick.Quoine:
f_short = lambda: self.open_quoine(Const.SHORT,
tick.exchanger(Tick.Quoine).ask,
lot)
if lower == Tick.BitFlyer:
f_long = lambda: self.open_bitflyer(Const.LONG,
tick.exchanger(Tick.BitFlyer).bid,
lot)
elif lower == Tick.Quoine:
f_long = lambda: self.open_quoine(Const.LONG,
tick.exchanger(Tick.Quoine).bid,
lot)
if f_short is None or \
f_long is None:
self.logger.error('unknown exchanger, upper={}, lower={}'
.format(upper, lower))
return
exchangers = {}
def on_short(future):
exchangers[upper] = future.result()
def on_long(future):
exchangers[lower] = future.result()
fs = [(f_short, on_short), (f_long, on_long)]
with get_executor() as executor:
def to_future(fs):
f, callback = fs
future = executor.submit(f)
future.add_done_callback(callback)
return future
futures = [to_future(f) for f in fs]
timeout = 300 # seconds
try:
concurrent.futures.wait(futures, timeout=timeout)
except concurrent.futures.TimeoutError as e:
self.logger.error(('オープン処理がタイムアウトしました, ' +
'timeout={}').format(timeout))
if upper not in exchangers or exchangers[upper] is None:
self.logger.error(('ショートポジションのオープンに失敗しました, ' +
'exchanger={}, lot={}').format(upper, lot))
return None
if lower not in exchangers or exchangers[lower] is None:
self.logger.error(('ロングポジションのオープンに失敗しました, ' +
'exchanger={}, lot={}').format(upper, lot))
return None
return exchangers
def close_position(self, position):
name_short = position.short
name_long = position.long
ex_short = position.short_one() # : OnePosition
ex_long = position.long_one() # : OnePosition
f_short = None
f_long = None
if name_short == Tick.BitFlyer:
f_short = lambda: self.close_bitflyer(ex_short)
elif name_short == Tick.Quoine:
f_short = lambda: self.close_quoine(ex_short)
if name_long == Tick.BitFlyer:
f_long = lambda: self.close_bitflyer(ex_long)
elif name_long == Tick.Quoine:
f_long = lambda: self.close_quoine(ex_long)
if f_short is None or \
f_long is None:
self.logger.error('unknow exchanger, short={}, long={}'
.format(name_short, name_long))
return False
results = {}
def on_short(future):
results[name_short] = future.result()
def on_long(future):
results[name_long] = future.result()
fs = [(f_short, on_short), (f_long, on_long)]
with get_executor() as executor:
def to_future(fs):
f, callback = fs
future = executor.submit(f)
future.add_done_callback(callback)
return future
futures = [to_future(f) for f in fs]
timeout = 300 # seconds
try:
concurrent.futures.wait(futures, timeout=timeout)
except concurrent.futures.TimeoutError as e:
self.logger.error(('クローズ処理がタイムアウトしました, ' +
'timeout={}').format(timeout))
if name_short not in results or results[name_short] is None:
self.logger.error(('ショートポジションのクローズに失敗しました, ' +
'exchanger={}, lot={}')
.format(name_short,
sum(results[name_short]['sizes'])))
return False
if name_short not in results or results[name_long] is None:
self.logger.error(('ロングポジションのクローズに失敗しました, ' +
'exchanger={}, lot={}, ids={}')
.format(name_long,
sum(results[name_long]['sizes']),
results[name_long]['ids']))
return False
return True
def open_bitflyer(self, side, price, lot):
"""
( side: str, price: float, lot: float ) -> OnePosition
"""
bitflyer = self.markets.BitFlyer
position = bitflyer.open_position(side, price, lot)
return position
def open_quoine(self, side, price, lot):
"""
( side: str, price: float, lot: float ) -> OnePosition
"""
quoine = self.markets.Quoine
position = quoine.open_position(side, price, lot)
return position
def close_bitflyer(self, position):
"""
( position: OnePosition ) -> bool
"""
bitflyer = self.markets.BitFlyer
success = bitflyer.close_position(position)
return success
def close_quoine(self, position):
"""
( position: OnePosition ) -> bool
"""
quoine = self.markets.Quoine
success = quoine.close_position(position)
return success
def is_busy_bitflyer(self):
bitflyer = self.markets.BitFlyer
is_busy = bitflyer.is_busy()
return is_busy
| 30.262712
| 77
| 0.58541
|
a9758c8f089c11fde5b4a0bc128205770cbc7fb9
| 3,874
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_request_routing_rule_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_request_routing_rule_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/application_gateway_request_routing_rule_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class ApplicationGatewayRequestRoutingRule(SubResource):
"""Request routing rule of an application gateway.
:param id: Resource ID.
:type id: str
:param rule_type: Rule type. Possible values include: 'Basic',
'PathBasedRouting'
:type rule_type: str or
~azure.mgmt.network.v2018_01_01.models.ApplicationGatewayRequestRoutingRuleType
:param backend_address_pool: Backend address pool resource of the
application gateway.
:type backend_address_pool:
~azure.mgmt.network.v2018_01_01.models.SubResource
:param backend_http_settings: Frontend port resource of the application
gateway.
:type backend_http_settings:
~azure.mgmt.network.v2018_01_01.models.SubResource
:param http_listener: Http listener resource of the application gateway.
:type http_listener: ~azure.mgmt.network.v2018_01_01.models.SubResource
:param url_path_map: URL path map resource of the application gateway.
:type url_path_map: ~azure.mgmt.network.v2018_01_01.models.SubResource
:param redirect_configuration: Redirect configuration resource of the
application gateway.
:type redirect_configuration:
~azure.mgmt.network.v2018_01_01.models.SubResource
:param provisioning_state: Provisioning state of the request routing rule
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'rule_type': {'key': 'properties.ruleType', 'type': 'str'},
'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'},
'backend_http_settings': {'key': 'properties.backendHttpSettings', 'type': 'SubResource'},
'http_listener': {'key': 'properties.httpListener', 'type': 'SubResource'},
'url_path_map': {'key': 'properties.urlPathMap', 'type': 'SubResource'},
'redirect_configuration': {'key': 'properties.redirectConfiguration', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, rule_type=None, backend_address_pool=None, backend_http_settings=None, http_listener=None, url_path_map=None, redirect_configuration=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayRequestRoutingRule, self).__init__(id=id, **kwargs)
self.rule_type = rule_type
self.backend_address_pool = backend_address_pool
self.backend_http_settings = backend_http_settings
self.http_listener = http_listener
self.url_path_map = url_path_map
self.redirect_configuration = redirect_configuration
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| 49.037975
| 275
| 0.680692
|
0d9cfbed35d44d3a806b92669b29e4b9b41b2265
| 2,257
|
py
|
Python
|
third_party/gsutil/gslib/addlhelp/anon.py
|
Martijnve23/catapult
|
5c63b19d221af6a12889e8727acc85d93892cab7
|
[
"BSD-3-Clause"
] | 1,894
|
2015-04-17T18:29:53.000Z
|
2022-03-28T22:41:06.000Z
|
third_party/gsutil/gslib/addlhelp/anon.py
|
Martijnve23/catapult
|
5c63b19d221af6a12889e8727acc85d93892cab7
|
[
"BSD-3-Clause"
] | 4,640
|
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
third_party/gsutil/gslib/addlhelp/anon.py
|
Martijnve23/catapult
|
5c63b19d221af6a12889e8727acc85d93892cab7
|
[
"BSD-3-Clause"
] | 698
|
2015-06-02T19:18:35.000Z
|
2022-03-29T16:57:15.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help text for anonymous access."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
gsutil users can access publicly readable data without obtaining
credentials. For example, the gs://uspto-pair bucket contains a number
of publicly readable objects, so any user can run the following command
without first obtaining credentials:
gsutil ls gs://uspto-pair/applications/0800401*
Users can similarly download objects they find via the above gsutil ls
command.
See "gsutil help acls" for more details about data protection.
<B>Configuring/Using Credentials via Cloud SDK Distribution of gsutil</B>
If a user without credentials attempts to access protected data using gsutil,
they will be prompted to run gcloud init to obtain credentials.
<B>Configuring/Using Credentials via Standalone gsutil Distribution</B>
If a user without credentials attempts to access protected data using gsutil,
they will be prompted to run gsutil config to obtain credentials.
""")
class CommandOptions(HelpProvider):
"""Additional help text for anonymous access."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='anon',
help_name_aliases=['anonymous', 'public'],
help_type='additional_help',
help_one_line_summary='Accessing Public Data Without Credentials',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
| 37.616667
| 79
| 0.770049
|
5c67e76d41cdd04a6e138c3d14a43cb22e88bc84
| 926
|
py
|
Python
|
content/charts/amdahl-efficiency.py
|
ENCCS/veloxchem-hpc
|
548c68f90816f4774a7ed9d5c18b1a1afb98f1bf
|
[
"CC-BY-4.0"
] | null | null | null |
content/charts/amdahl-efficiency.py
|
ENCCS/veloxchem-hpc
|
548c68f90816f4774a7ed9d5c18b1a1afb98f1bf
|
[
"CC-BY-4.0"
] | 3
|
2022-02-28T14:49:31.000Z
|
2022-03-03T10:04:08.000Z
|
content/charts/amdahl-efficiency.py
|
ENCCS/veloxchem-hpc
|
548c68f90816f4774a7ed9d5c18b1a1afb98f1bf
|
[
"CC-BY-4.0"
] | 3
|
2022-02-17T01:18:05.000Z
|
2022-03-05T11:59:10.000Z
|
#!/usr/bin/env python3
import numpy as np
import plotly.graph_objects as go
serial_fraction = [0.001, 0.01, 0.1, 0.3, 0.5]
workers = np.around(np.geomspace(1, 2048, num=12)).astype(int)
fig = go.Figure()
for i, f in enumerate(serial_fraction):
efficiency = 1 / np.multiply(workers, (f + (1 - f) / workers))
fig.add_trace(
go.Scatter(
name=f"{f*100}%",
x=workers,
y=efficiency,
mode="lines+markers",
marker_symbol=i,
hovertemplate="~%{y:.2%}<extra></extra>",
)
)
fig.update_layout(
title="Efficiency according to Amdahl's law",
hovermode="x unified",
xaxis_title="Number of workers",
yaxis_title="Efficiency",
height=500,
width=600,
xaxis=dict(type="log", dtick=np.log10(2)),
yaxis=dict(tickformat=".0%"),
legend=dict(title="Serial fraction"),
)
fig.write_json("amdahl-efficiency.json")
| 26.457143
| 66
| 0.605832
|
910040b14ea98f39ea41cdabfcfbc58fcdf0c978
| 1,772
|
py
|
Python
|
stellar/rooms.py
|
wg4568/PyStellarEngine
|
feec17fa5795fe2aa131bdf9d36b4b5b4f1a2601
|
[
"MIT"
] | 2
|
2017-05-02T01:18:06.000Z
|
2017-05-02T10:33:48.000Z
|
stellar/rooms.py
|
wg4568/PyStellarEngine
|
feec17fa5795fe2aa131bdf9d36b4b5b4f1a2601
|
[
"MIT"
] | null | null | null |
stellar/rooms.py
|
wg4568/PyStellarEngine
|
feec17fa5795fe2aa131bdf9d36b4b5b4f1a2601
|
[
"MIT"
] | null | null | null |
class Room:
def __init__(self):
self.game = None
self.background = (0, 0, 0)
self.all_objects = []
self.objects = []
self.fixtures = []
self.active = False
self.size = None
def game_link(self, game):
self.game = game
self.size = self.game.size
def add_object(self, obj):
obj.room_link(self)
self.all_objects.append(obj)
def add_fixture(self, fixture, posn):
self.fixtures.append([fixture, posn])
def activate(self):
self.active = True
self.on_load()
def deactivate(self):
self.active = False
self.on_unload()
def _handle_event(self, event):
self.handle_event(event)
def _logic(self):
self.objects = filter(lambda x: x.enabled, self.all_objects)
# self.objects = sorted(self.objects, key=lambda x: x.layer)
for obj in self.objects:
obj._logic()
self.logic()
def _control(self, buttons, mouse):
for obj in self.objects:
obj._control(buttons, mouse)
self.control(buttons, mouse)
def _draw(self):
self.game.screen.fill(self.background)
for fixture, posn in self.fixtures:
fixture.draw(self, posn)
for obj in self.objects:
obj._draw()
self.draw()
def mouse_pos(self):
return self.game.mousepos
def draw_rect(self, color, dims):
self.game.pygame.draw.rect(self.game.screen, color, dims)
def draw_lines(self, color, lines, width):
self.game.pygame.draw.lines(self.game.screen, color, True, lines, width)
def draw_ellipse(self, color, dims):
self.game.pygame.draw.ellipse(self.game.screen, color, dims)
def draw_blit(self, surf, posn):
self.game.screen.blit(surf, posn)
def logic(self):
pass
def control(self, buttons, mouse):
pass
def draw(self):
pass
def handle_event(self, event):
pass
def on_load(self):
pass
def on_unload(self):
pass
| 18.652632
| 74
| 0.692438
|
bd87f03baffa50bd99d559ddd3e1c68341b3c234
| 10,709
|
py
|
Python
|
scripts/validate.py
|
risseraka/english-wordnet
|
72c8fbcd616937b8bdba3340899bf234eaed77c0
|
[
"CC-BY-4.0"
] | null | null | null |
scripts/validate.py
|
risseraka/english-wordnet
|
72c8fbcd616937b8bdba3340899bf234eaed77c0
|
[
"CC-BY-4.0"
] | null | null | null |
scripts/validate.py
|
risseraka/english-wordnet
|
72c8fbcd616937b8bdba3340899bf234eaed77c0
|
[
"CC-BY-4.0"
] | null | null | null |
from wordnet import *
import re
import sys
import glob
import sense_keys
def check_symmetry(wn, fix):
errors = []
for synset in wn.synsets:
for rel in synset.synset_relations:
if rel.rel_type in inverse_synset_rels:
synset2 = wn.synset_by_id(rel.target)
if not synset2:
# This error only happens if the XML validation is not being carried out!
print("Referencing bad synset ID %s from %s" % (rel.target, synset.id))
else:
if not any(r for r in synset2.synset_relations if r.target == synset.id and r.rel_type == inverse_synset_rels[rel.rel_type]):
if fix:
errors.append("python3 scripts/change-relation.py --add --new-relation %s %s %s" % (inverse_synset_rels[rel.rel_type].value, synset2.id, synset.id))
else:
errors.append("No symmetric relation for %s =%s=> %s" % (synset.id, rel.rel_type, synset2.id))
for entry in wn.entries:
for sense in entry.senses:
for rel in sense.sense_relations:
if rel.rel_type in inverse_sense_rels:
sense2 = wn.sense_by_id(rel.target)
if not any(r for r in sense2.sense_relations if r.target == sense.id and r.rel_type == inverse_sense_rels[rel.rel_type]):
if fix:
errors.append("python3 scripts/change-relation.py --add --new-relation %s %s %s" % (inverse_sense_rels[rel.rel_type].value, sense2.id, sense.id))
else:
errors.append("No symmetric relation for %s =%s=> %s" % (sense.id, rel.rel_type, sense2.id))
return errors
def check_transitive(wn, fix):
errors = []
for synset in wn.synsets:
for rel in synset.synset_relations:
if rel.rel_type == SynsetRelType.HYPERNYM:
synset2 = wn.synset_by_id(rel.target)
for rel2 in synset2.synset_relations:
if any(r for r in synset.synset_relations if r.target == rel2.target and r.rel_type == SynsetRelType.HYPERNYM):
if fix:
errors.append("python scripts/change-relation.py --delete %s %s" % (synset.id, rel2.target))
else:
errors.append("Transitive error for %s => %s => %s" %(synset.id, synset2.id, rel2.target))
return errors
def check_no_loops(wn):
hypernyms = {}
for synset in wn.synsets:
hypernyms[synset.id] = set()
for rel in synset.synset_relations:
if rel.rel_type == SynsetRelType.HYPERNYM:
hypernyms[synset.id].add(rel.target)
changed = True
while changed:
changed = False
for synset in wn.synsets:
n_size = len(hypernyms[synset.id])
for c in hypernyms[synset.id]:
hypernyms[synset.id] = hypernyms[synset.id].union(hypernyms.get(c, []))
if len(hypernyms[synset.id]) != n_size:
changed = True
if synset.id in hypernyms[synset.id]:
return ["Loop for %s" % (synset.id)]
return []
def check_not_empty(wn, ss):
if not wn.members_by_id(ss.id):
return False
else:
return True
def check_ili(ss, fix):
errors = 0
if (not ss.ili or ss.ili == "in") and not ss.ili_definition:
if fix:
print("python3 scripts/change-definition.py --ili %s" % ss.id)
else:
print("%s does not have an ILI definition" % ss.id)
errors += 1
return errors
def check_lex_files(wn, fix):
pos_map = {
"nou": PartOfSpeech.NOUN,
"ver": PartOfSpeech.VERB,
"adj": PartOfSpeech.ADJECTIVE,
"adv": PartOfSpeech.ADVERB
}
errors = 0
for f in glob.glob("src/wn-*.xml"):
lexfile = f[7:-4]
lex_pos = pos_map[lexfile[:3]]
swn = parse_wordnet(f)
for synset in swn.synsets:
if synset.lex_name != lexfile:
print("%s declared in %s but listed as %s" % (synset.id, lexfile, synset.lex_name))
errors += 1
if not equal_pos(lex_pos, synset.part_of_speech):
print("%s declared in %s but has wrong POS %s" % (synset.id, lexfile, synset.part_of_speech))
errors += 1
for entry in swn.entries:
if len(entry.senses) == 0:
print("%s is empty in %s" % (entry.id, lexfile))
errors += 1
for sense in entry.senses:
if not sense.sense_key:
print("%s does not have a sense key" % (sense.id))
errors += 1
calc_sense_key = sense_keys.get_sense_key(wn, swn, entry, sense, f)
if sense.sense_key != calc_sense_key:
if fix:
print("sed -i 's/%s/%s/' src/*" % (sense.sense_key, calc_sense_key))
else:
print("%s has declared key %s but should be %s" % (sense.id,
sense.sense_key, calc_sense_key))
errors += 1
return errors
valid_id = re.compile("^ewn-[A-Za-z0-9_\\-.]*$")
valid_sense_id = re.compile("^ewn-[A-Za-z0-9_\\-.]+-[nvars]-[0-9]{8}-[0-9]{2}$")
valid_synset_id = re.compile("^ewn-[0-9]{8}-[nvars]$")
def is_valid_id(xml_id):
return bool(valid_id.match(xml_id))
def is_valid_synset_id(xml_id):
return bool(valid_synset_id.match(xml_id))
def is_valid_sense_id(xml_id):
return bool(valid_sense_id.match(xml_id))
def main():
wn = parse_wordnet("wn.xml")
if len(sys.argv) > 1 and sys.argv[1] == "--fix":
fix = True
else:
fix = False
errors = 0
errors += check_lex_files(wn, fix)
for entry in wn.entries:
if entry.id[-1:] != entry.lemma.part_of_speech.value:
print("ERROR: Entry ID not same as part of speech %s as %s" % (entry.id, entry.lemma.part_of_speech.value))
errors += 1
if not is_valid_id(entry.id):
if fix:
sys.stderr.write("Cannot be fixed")
sys.exit(-1)
print("ERROR: Invalid ID " + entry.id)
errors += 1
for sense in entry.senses:
if not is_valid_sense_id(sense.id):
if fix:
sys.stderr.write("Cannot be fixed")
sys.exit(-1)
print("ERROR: Invalid ID " + sense.id)
errors += 1
synset = wn.synset_by_id(sense.synset)
if entry.lemma.part_of_speech != synset.part_of_speech:
print("ERROR: Part of speech of entry not the same as synset %s in %s" % (entry.id, synset.id))
errors += 1
for sr in sense.sense_relations:
if sr.rel_type == SenseRelType.PERTAINYM:
ss_source = wn.synset_by_id(sense.synset)
if ((not equal_pos(ss_source.part_of_speech, PartOfSpeech.ADJECTIVE)
and not equal_pos(ss_source.part_of_speech, PartOfSpeech.ADVERB))):
print("ERROR: Pertainyms should be between adjectives %s => %s" % (sense.id, sr.target))
errors += 1
#if sr.target == sense.id:
# print("ERROR: Reflexive sense relation %s" % (sense.id))
# errors += 1
for synset in wn.synsets:
if synset.id[-1:] != synset.part_of_speech.value:
print("ERROR: Synset ID not same as part of speech %s as %s" % (synset.id, synset.part_of_speech.value))
errors += 1
if not is_valid_synset_id(synset.id):
if fix:
sys.stderr.write("Cannot be fixed")
sys.exit(-1)
print("ERROR: Invalid ID " + synset.id)
errors += 1
if not check_not_empty(wn, synset):
print("ERROR: Empty synset " + synset.id)
errors += 1
errors += check_ili(synset, fix)
similars = 0
for sr in synset.synset_relations:
if (sr.rel_type == SynsetRelType.HYPERNYM and
not equal_pos(synset.part_of_speech, wn.synset_by_id(sr.target).part_of_speech)):
print("ERROR: Cross-part-of-speech hypernym %s => %s" % (synset.id, sr.target))
errors += 1
if sr.rel_type == SynsetRelType.SIMILAR:
if (not equal_pos(synset.part_of_speech, PartOfSpeech.VERB) and
not equal_pos(synset.part_of_speech, PartOfSpeech.ADJECTIVE)):
print("ERROR: similar not between verb/adjective %s => %s" % (synset.id, sr.target))
errors += 1
similars += 1
if similars > 1 and synset.part_of_speech == PartOfSpeech.ADJECTIVE_SATELLITE:
print("ERROR: satellite of more than one synset %s" % (synset.id))
errors += 1
if sr.rel_type == SynsetRelType.ANTONYM:
print("ERROR: antonymy should be at the sense level %s => %s" % (synset.id, sr.target))
errors += 1
#if sense.id == sr.target:
# print("ERROR: reflexive synset relation for %s" % (synset.id))
# errors += 1
if synset.part_of_speech == PartOfSpeech.ADJECTIVE_SATELLITE and similars == 0:
print("ERROR: satellite must have at least one similar link %s" % (synset.id))
errors += 1
if len(synset.definitions) == 0:
print("ERROR: synset without definition %s" % (synset.id))
errors += 1
for defn in synset.definitions:
if len(defn.text) == 0:
print("ERROR: empty definition for %s" % (synset.id))
errors += 1
for error in check_symmetry(wn, fix):
if fix:
print(error)
else:
print("ERROR: " + error)
errors += 1
for error in check_transitive(wn, fix):
if fix:
print(error)
else:
print("ERROR: " + error)
errors += 1
for error in check_no_loops(wn):
if fix:
sys.stderr.write("Cannot be fixed")
sys.exit(-1)
else:
print("ERROR: " + error)
errors += 1
if fix:
pass
elif errors > 0:
print("Validation failed. %d errors" % errors)
sys.exit(-1)
else:
print("No validity issues")
if __name__ == "__main__":
main()
| 40.411321
| 176
| 0.53964
|
e7534867323d0a3ae75145b105bff28d83b54d8c
| 1,549
|
py
|
Python
|
se/commands/prepare_release.py
|
jggimi/tools
|
b4aa35f986b22ab1b92dae1e0bfad9c4d94b6cf7
|
[
"CC0-1.0"
] | null | null | null |
se/commands/prepare_release.py
|
jggimi/tools
|
b4aa35f986b22ab1b92dae1e0bfad9c4d94b6cf7
|
[
"CC0-1.0"
] | null | null | null |
se/commands/prepare_release.py
|
jggimi/tools
|
b4aa35f986b22ab1b92dae1e0bfad9c4d94b6cf7
|
[
"CC0-1.0"
] | null | null | null |
"""
This module implements the `se prepare_release` command.
"""
import argparse
from pathlib import Path
import se
from se.se_epub import SeEpub
def prepare_release() -> int:
"""
Entry point for `se prepare-release`
"""
parser = argparse.ArgumentParser(description="Calculate work word count, insert release date if not yet set, and update modified date and revision number.")
parser.add_argument("-n", "--no-word-count", dest="word_count", action="store_false", help="don’t calculate word count")
parser.add_argument("-r", "--no-revision", dest="revision", action="store_false", help="don’t increment the revision number")
parser.add_argument("-v", "--verbose", action="store_true", help="increase output verbosity")
parser.add_argument("directories", metavar="DIRECTORY", nargs="+", help="a Standard Ebooks source directory")
args = parser.parse_args()
for directory in args.directories:
directory = Path(directory).resolve()
if args.verbose:
print(f"Processing {directory} ...")
try:
se_epub = SeEpub(directory)
if args.word_count:
if args.verbose:
print("\tUpdating word count and reading ease ...", end="", flush=True)
se_epub.update_word_count()
se_epub.update_flesch_reading_ease()
if args.verbose:
print(" OK")
if args.revision:
if args.verbose:
print("\tUpdating revision number ...", end="", flush=True)
se_epub.set_release_timestamp()
if args.verbose:
print(" OK")
except se.SeException as ex:
se.print_error(ex)
return ex.code
return 0
| 27.660714
| 157
| 0.704971
|
6bc893d129511ba0641153780336f25efb184b52
| 2,140
|
py
|
Python
|
tpDcc/libs/unittests/dccs/maya/result.py
|
tpDcc/tpDcc-libs-unittest
|
8897c8feb45b2d474495af8f0370c6c504959873
|
[
"MIT"
] | null | null | null |
tpDcc/libs/unittests/dccs/maya/result.py
|
tpDcc/tpDcc-libs-unittest
|
8897c8feb45b2d474495af8f0370c6c504959873
|
[
"MIT"
] | null | null | null |
tpDcc/libs/unittests/dccs/maya/result.py
|
tpDcc/tpDcc-libs-unittest
|
8897c8feb45b2d474495af8f0370c6c504959873
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains tpDcc-libs-unittest unit test Maya result class
"""
from __future__ import print_function, division, absolute_import
import os
import shutil
import logging
import maya.cmds
from tpDcc.libs.unittests.core import settings, result
from tpDcc.libs.unittests.dccs.maya import scripteditor
class MayaUnitTestResult(result.BaseUnitTestResult, object):
"""
Customize Maya the test result so we can do things like do a file new between each test and suppress script
editor output
"""
def startTestRun(self):
"""
Called before any tests are run
"""
super(MayaUnitTestResult, self).startTestRun()
scripteditor.MayaScriptEditorState.suppress_output()
if settings.UnitTestSettings().buffer_output:
# Disable any logging while running tests. By disabling critical, we are disabling logging at all levels
# below critical as well
logging.disable(logging.CRITICAL)
def stopTestRun(self):
"""
Called after all tests are run
"""
if settings.UnitTestSettings().buffer_output:
# Restore logging state
logging.disable(logging.NOTSET)
scripteditor.MayaScriptEditorState.restore_output()
if settings.UnitTestSettings().delete_files and os.path.exists(settings.UnitTestSettings().temp_dir):
shutil.rmtree(settings.UnitTestSettings().temp_dir)
super(MayaUnitTestResult, self).stopTestRun()
def stopTest(self, test):
"""
Called after an individual test is run
@param test: TestCase that just ran
"""
super(MayaUnitTestResult, self).stopTest(test)
if settings.UnitTestSettings().file_new:
maya.cmds.file(f=True, new=True)
def addSuccess(self, test):
"""
Override the base addSuccess method so we can store a list of the successful tests
@param test: Testase that sucessfully ran.
"""
super(MayaUnitTestResult, self).addSuccess(test)
self._successes.append(test)
| 30.140845
| 116
| 0.674299
|
9adcf7d74bd677a580d9a5ecd94ce95e4efef084
| 8,941
|
py
|
Python
|
test/test_dataset.py
|
alexandrugavril/habitat-api
|
95106a965355e024dc7ebd109519799a64530660
|
[
"MIT"
] | null | null | null |
test/test_dataset.py
|
alexandrugavril/habitat-api
|
95106a965355e024dc7ebd109519799a64530660
|
[
"MIT"
] | null | null | null |
test/test_dataset.py
|
alexandrugavril/habitat-api
|
95106a965355e024dc7ebd109519799a64530660
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import groupby, islice
import pytest
from habitat.core.dataset import Dataset, Episode
def _construct_dataset(num_episodes, num_groups=10):
episodes = []
for i in range(num_episodes):
episode = Episode(
episode_id=str(i),
scene_id="scene_id_" + str(i % num_groups),
start_position=[0, 0, 0],
start_rotation=[0, 0, 0, 1],
)
episodes.append(episode)
dataset = Dataset()
dataset.episodes = episodes
return dataset
def test_scene_ids():
dataset = _construct_dataset(100)
assert dataset.scene_ids == ["scene_id_" + str(ii) for ii in range(10)]
def test_get_scene_episodes():
dataset = _construct_dataset(100)
scene = "scene_id_0"
scene_episodes = dataset.get_scene_episodes(scene)
assert len(scene_episodes) == 10
for ep in scene_episodes:
assert ep.scene_id == scene
def test_filter_episodes():
dataset = _construct_dataset(100)
def filter_fn(episode: Episode) -> bool:
return int(episode.episode_id) % 2 == 0
filtered_dataset = dataset.filter_episodes(filter_fn)
assert len(filtered_dataset.episodes) == 50
for ep in filtered_dataset.episodes:
assert filter_fn(ep)
def test_get_splits_even_split_possible():
dataset = _construct_dataset(100)
splits = dataset.get_splits(10)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 10
def test_get_splits_with_remainder():
dataset = _construct_dataset(100)
splits = dataset.get_splits(11)
assert len(splits) == 11
for split in splits:
assert len(split.episodes) == 9
def test_get_splits_num_episodes_specified():
dataset = _construct_dataset(100)
splits = dataset.get_splits(10, 3, False)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 3
assert len(dataset.episodes) == 100
dataset = _construct_dataset(100)
splits = dataset.get_splits(10, 10)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 10
assert len(dataset.episodes) == 100
dataset = _construct_dataset(100)
splits = dataset.get_splits(10, 3, True)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 3
assert len(dataset.episodes) == 30
dataset = _construct_dataset(100)
with pytest.raises(ValueError):
splits = dataset.get_splits(10, 20)
def test_get_splits_collate_scenes():
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, 23, collate_scene_ids=True)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 23
prev_ids = set()
for ii, ep in enumerate(split.episodes):
if ep.scene_id not in prev_ids:
prev_ids.add(ep.scene_id)
else:
assert split.episodes[ii - 1].scene_id == ep.scene_id
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, 200, collate_scene_ids=False)
assert len(splits) == 10
for split in splits:
prev_ids = set()
found_not_collated = False
for ii, ep in enumerate(split.episodes):
if ep.scene_id not in prev_ids:
prev_ids.add(ep.scene_id)
else:
if split.episodes[ii - 1].scene_id != ep.scene_id:
found_not_collated = True
break
assert found_not_collated
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, collate_scene_ids=True)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 1000
prev_ids = set()
for ii, ep in enumerate(split.episodes):
if ep.scene_id not in prev_ids:
prev_ids.add(ep.scene_id)
else:
assert split.episodes[ii - 1].scene_id == ep.scene_id
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, collate_scene_ids=False)
assert len(splits) == 10
for split in splits:
prev_ids = set()
found_not_collated = False
for ii, ep in enumerate(split.episodes):
if ep.scene_id not in prev_ids:
prev_ids.add(ep.scene_id)
else:
if split.episodes[ii - 1].scene_id != ep.scene_id:
found_not_collated = True
break
assert found_not_collated
def test_get_splits_sort_by_episode_id():
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, 23, sort_by_episode_id=True)
assert len(splits) == 10
for split in splits:
assert len(split.episodes) == 23
for ii, ep in enumerate(split.episodes):
if ii > 0:
assert ep.episode_id >= split.episodes[ii - 1].episode_id
@pytest.mark.parametrize(
"num_episodes,num_splits",
[(994, 64), (1023, 64), (1024, 64), (1025, 64), (10000, 9), (10000, 10)],
)
def test_get_splits_func(num_episodes: int, num_splits: int):
dataset = _construct_dataset(num_episodes)
splits = dataset.get_splits(num_splits, allow_uneven_splits=True)
assert len(splits) == num_splits
assert sum([len(split.episodes) for split in splits]) == num_episodes
splits = dataset.get_splits(num_splits, allow_uneven_splits=False)
assert len(splits) == num_splits
assert (
sum(map(lambda s: s.num_episodes, splits))
== (num_episodes // num_splits) * num_splits
)
def test_sample_episodes():
dataset = _construct_dataset(1000)
ep_iter = dataset.get_episode_iterator(
num_episode_sample=1000, cycle=False
)
assert len(list(ep_iter)) == 1000
ep_iter = dataset.get_episode_iterator(num_episode_sample=0, cycle=False)
assert len(list(ep_iter)) == 0
with pytest.raises(ValueError):
dataset.get_episode_iterator(num_episode_sample=1001, cycle=False)
ep_iter = dataset.get_episode_iterator(num_episode_sample=100, cycle=True)
ep_id_list = [e.episode_id for e in list(islice(ep_iter, 100))]
assert len(set(ep_id_list)) == 100
next_episode = next(ep_iter)
assert next_episode.episode_id in ep_id_list
ep_iter = dataset.get_episode_iterator(num_episode_sample=0, cycle=False)
with pytest.raises(StopIteration):
next(ep_iter)
def test_iterator_cycle():
dataset = _construct_dataset(100)
ep_iter = dataset.get_episode_iterator(
cycle=True, shuffle=False, group_by_scene=False
)
for i in range(200):
episode = next(ep_iter)
assert episode.episode_id == dataset.episodes[i % 100].episode_id
ep_iter = dataset.get_episode_iterator(cycle=True, num_episode_sample=20)
episodes = list(islice(ep_iter, 20))
for i in range(200):
episode = next(ep_iter)
assert episode.episode_id == episodes[i % 20].episode_id
def test_iterator_shuffle():
dataset = _construct_dataset(100)
episode_iter = dataset.get_episode_iterator(shuffle=True)
first_round_episodes = list(islice(episode_iter, 100))
second_round_episodes = list(islice(episode_iter, 100))
# both rounds should have same episodes but in different order
assert sorted(first_round_episodes) == sorted(second_round_episodes)
assert first_round_episodes != second_round_episodes
# both rounds should be grouped by scenes
first_round_scene_groups = [
k for k, g in groupby(first_round_episodes, key=lambda x: x.scene_id)
]
second_round_scene_groups = [
k for k, g in groupby(second_round_episodes, key=lambda x: x.scene_id)
]
assert len(first_round_scene_groups) == len(second_round_scene_groups)
assert len(first_round_scene_groups) == len(set(first_round_scene_groups))
def test_iterator_scene_switching():
total_ep = 1000
max_repeat = 25
dataset = _construct_dataset(total_ep)
episode_iter = dataset.get_episode_iterator(max_scene_repeat=max_repeat)
episodes = sorted(dataset.episodes, key=lambda x: x.scene_id)
# episodes before max_repeat reached should be identical
for i in range(max_repeat):
episode = next(episode_iter)
assert episode.episode_id == episodes.pop(0).episode_id
remaining_episodes = list(islice(episode_iter, total_ep - max_repeat))
# remaining episodes should be same but in different order
assert len(remaining_episodes) == len(episodes)
assert remaining_episodes != episodes
assert sorted(remaining_episodes) == sorted(episodes)
# next episodes should still be grouped by scene (before next switching)
assert len(set([e.scene_id for e in remaining_episodes[:max_repeat]])) == 1
| 33.867424
| 79
| 0.676546
|
b97548f3857ba8edccb8632a1d7d76b2e47ac195
| 1,449
|
py
|
Python
|
networks/modular_downscaling_model/input_modules/InputModule.py
|
khoehlein/CNNs-for-Wind-Field-Downscaling
|
eb8418d4d893fcb2beb929abb241281b7a9b6a95
|
[
"MIT"
] | 5
|
2021-05-05T06:08:52.000Z
|
2022-03-24T04:57:52.000Z
|
networks/modular_downscaling_model/input_modules/InputModule.py
|
khoehlein/CNNs-for-Wind-Field-Downscaling
|
eb8418d4d893fcb2beb929abb241281b7a9b6a95
|
[
"MIT"
] | null | null | null |
networks/modular_downscaling_model/input_modules/InputModule.py
|
khoehlein/CNNs-for-Wind-Field-Downscaling
|
eb8418d4d893fcb2beb929abb241281b7a9b6a95
|
[
"MIT"
] | 2
|
2021-08-07T05:18:05.000Z
|
2022-03-31T03:48:37.000Z
|
import torch
import torch.nn as nn
class InputModule(nn.Module):
def __init__(self, module_lr, module_hr=None, module_combined=None):
super(InputModule, self).__init__()
output_channels = self._verify_compatibility(module_lr, module_hr, module_combined)
self.module_lr = module_lr
self.module_hr = module_hr
self.module_combined = module_combined
self.output_channels = output_channels
@staticmethod
def _verify_compatibility(module_lr, module_hr, module_combined):
for m in [module_lr, module_hr, module_combined]:
assert hasattr(m, 'output_channels') or (m is None)
total_output_channels = module_lr.output_channels
if module_hr is not None:
total_output_channels += module_hr.output_channels
if module_combined is not None:
assert hasattr(module_combined, 'input_channels')
assert module_combined.input_channels == total_output_channels
total_output_channels = module_combined.output_channels
return total_output_channels
def forward(self, input_lr, input_hr=None):
output = self.module_lr(input_lr)
if self.module_hr is not None:
assert input_hr is not None
output = torch.cat([output, self.module_hr(input_hr)], dim=1)
if self.module_combined is not None:
output = self.module_combined(output)
return output
| 41.4
| 91
| 0.693582
|
271adf1d45c9c87358e977fcb7ef04f9d7e3b5d5
| 701
|
py
|
Python
|
dataset/models/tf/layers/__init__.py
|
mikhailkin/dataset
|
7417483fdbe2e3743af4d614cb9036fd5b1375c0
|
[
"Apache-2.0"
] | null | null | null |
dataset/models/tf/layers/__init__.py
|
mikhailkin/dataset
|
7417483fdbe2e3743af4d614cb9036fd5b1375c0
|
[
"Apache-2.0"
] | null | null | null |
dataset/models/tf/layers/__init__.py
|
mikhailkin/dataset
|
7417483fdbe2e3743af4d614cb9036fd5b1375c0
|
[
"Apache-2.0"
] | null | null | null |
""" Custom tf layers and operations """
import numpy as np
import tensorflow as tf
from .core import flatten, flatten2d, maxout, mip, xip, alpha_dropout
from .conv_block import conv_block, upsample
from .conv import conv1d_transpose, conv1d_transpose_nn, conv_transpose, separable_conv, separable_conv_transpose
from .pooling import max_pooling, average_pooling, pooling, \
global_pooling, global_average_pooling, global_max_pooling, \
fractional_pooling
from .roi import roi_pooling_layer, non_max_suppression
from .resize import subpixel_conv, resize_bilinear_additive, resize_nn, resize_bilinear, depth_to_space
from .pyramid import pyramid_pooling, aspp
| 50.071429
| 113
| 0.7903
|
d502d78b2a3b2674d683c381495172e6dfa7e6fa
| 12,255
|
py
|
Python
|
lib/improver/utilities/solar.py
|
TomekTrzeciak/improver
|
74b7bc0d194c30ea7af426d153e5047ccb67f60c
|
[
"BSD-3-Clause"
] | null | null | null |
lib/improver/utilities/solar.py
|
TomekTrzeciak/improver
|
74b7bc0d194c30ea7af426d153e5047ccb67f60c
|
[
"BSD-3-Clause"
] | null | null | null |
lib/improver/utilities/solar.py
|
TomekTrzeciak/improver
|
74b7bc0d194c30ea7af426d153e5047ccb67f60c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Utilities to find the relative position of the sun."""
import datetime as dt
import cf_units as unit
import numpy as np
from improver.utilities.spatial import (
lat_lon_determine, transform_grid_to_lat_lon)
from improver.utilities.temporal import iris_time_to_datetime
def calc_solar_declination(day_of_year):
"""
Calculate the Declination for the day of the year.
Calculation equivalent to the calculation defined in
NOAA Earth System Research Lab Low Accuracy Equations
https://www.esrl.noaa.gov/gmd/grad/solcalc/sollinks.html
Args:
day_of_year (int):
Day of the year 0 to 365, 0 = 1st January
Returns:
solar_declination (float):
Declination in degrees.North-South
"""
# Declination (degrees):
# = -(axial_tilt)*cos(360./orbital_year * day_of_year - solstice_offset)
if day_of_year < 0 or day_of_year > 365:
msg = ('Day of the year must be between 0 and 365')
raise ValueError(msg)
solar_declination = -23.5 * np.cos(np.radians(0.9856 * day_of_year + 9.3))
return solar_declination
def calc_solar_hour_angle(longitudes, day_of_year, utc_hour):
"""
Calculate the Solar Hour angle for each element of an array of longitudes.
Calculation equivalent to the calculation defined in
NOAA Earth System Research Lab Low Accuracy Equations
https://www.esrl.noaa.gov/gmd/grad/solcalc/sollinks.html
Args:
longitudes (float or numpy.ndarray):
A single Longitude or array of Longitudes
longitudes needs to be between 180.0 and -180.0 degrees
day_of_year (int):
Day of the year 0 to 365, 0 = 1st January
utc_hour (float):
Hour of the day in UTC
Returns:
solar_hour_angle (float or numpy.ndarray)
Hour angles in degrees East-West
"""
if day_of_year < 0 or day_of_year > 365:
msg = ('Day of the year must be between 0 and 365')
raise ValueError(msg)
if utc_hour < 0.0 or utc_hour > 24.0:
msg = ('Hour must be between 0 and 24.0')
raise ValueError(msg)
thetao = 2*np.pi*day_of_year/365.0
eqt = (0.000075 + 0.001868 * np.cos(thetao) -
0.032077 * np.sin(thetao) - 0.014615 * np.cos(2*thetao) -
0.040849 * np.sin(2*thetao))
# Longitudinal Correction from the Grenwich Meridian
lon_correction = 24.0*longitudes/360.0
# Solar time (hours):
solar_time = utc_hour + lon_correction + eqt*12/np.pi
# Hour angle (degrees):
solar_hour_angle = (solar_time - 12.0) * 15.0
return solar_hour_angle
def calc_solar_elevation(latitudes, longitudes, day_of_year, utc_hour,
return_sine=False):
"""
Calculate the Solar elevation.
Args:
latitudes (float or numpy.ndarray):
A single Latitude or array of Latitudes
latitudes needs to be between -90.0 and 90.0
longitudes (float or numpy.ndarray):
A single Longitude or array of Longitudes
longitudes needs to be between 180.0 and -180.0
day_of_year (int):
Day of the year 0 to 365, 0 = 1st January
utc_hour (float):
Hour of the day in UTC in hours
return_sine (bool):
If True return sine of solar elevation.
Default False.
Returns:
solar_elevation (float or numpy.ndarray):
Solar elevation in degrees for each location.
"""
if np.min(latitudes) < -90.0 or np.max(latitudes) > 90.0:
msg = ('Latitudes must be between -90.0 and 90.0')
raise ValueError(msg)
if day_of_year < 0 or day_of_year > 365:
msg = ('Day of the year must be between 0 and 365')
raise ValueError(msg)
if utc_hour < 0.0 or utc_hour > 24.0:
msg = ('Hour must be between 0 and 24.0')
raise ValueError(msg)
declination = calc_solar_declination(day_of_year)
decl = np.radians(declination)
hour_angle = calc_solar_hour_angle(longitudes, day_of_year, utc_hour)
rad_hours = np.radians(hour_angle)
lats = np.radians(latitudes)
# Calculate solar position:
solar_elevation = ((np.sin(decl) * np.sin(lats) +
np.cos(decl) * np.cos(lats) *
np.cos(rad_hours)))
if not return_sine:
solar_elevation = np.degrees(np.arcsin(solar_elevation))
return solar_elevation
def daynight_terminator(longitudes, day_of_year, utc_hour):
"""
Calculate the Latitude values of the daynight terminator
for the given longitudes.
Args:
longitudes (numpy.ndarray):
Array of longitudes.
longitudes needs to be between 180.0 and -180.0 degrees
day_of_year (int):
Day of the year 0 to 365, 0 = 1st January
utc_hour (float):
Hour of the day in UTC
Returns:
latitudes (numpy.ndarray):
latitudes of the daynight terminator
"""
if day_of_year < 0 or day_of_year > 365:
msg = ('Day of the year must be between 0 and 365')
raise ValueError(msg)
if utc_hour < 0.0 or utc_hour > 24.0:
msg = ('Hour must be between 0 and 24.0')
raise ValueError(msg)
declination = calc_solar_declination(day_of_year)
decl = np.radians(declination)
hour_angle = calc_solar_hour_angle(longitudes, day_of_year, utc_hour)
rad_hour = np.radians(hour_angle)
lats = np.arctan(-np.cos(rad_hour)/np.tan(decl))
lats = np.degrees(lats)
return lats
class DayNightMask(object):
"""
Plugin Class to generate a daynight mask for the provided cube
"""
def __init__(self):
""" Initial the DayNightMask Object """
self.night = 0
self.day = 1
def __repr__(self):
"""Represent the configured plugin instance as a string."""
result = ('<DayNightMask : '
'Day = {}, Night = {}>'.format(self.day, self.night))
return result
def _create_daynight_mask(self, cube):
"""
Create blank daynight mask cube
Args:
cube (iris.cube.Cube):
cube with the times and coordinates required for mask
Returns:
daynight_mask (iris.cube.Cube):
Blank daynight mask cube. The resulting cube will be the
same shape as the time, y, and x coordinate, other coordinates
will be ignored although they might appear as attributes
on the cube as it is extracted from the first slice.
"""
daynight_mask = next(cube.slices([cube.coord('time'),
cube.coord(axis='y'),
cube.coord(axis='x')])).copy()
daynight_mask.long_name = 'day_night_mask'
daynight_mask.standard_name = None
daynight_mask.var_name = None
daynight_mask.units = unit.Unit('1')
daynight_mask.data = np.ones(daynight_mask.data.shape,
dtype='int')*self.night
return daynight_mask
def _daynight_lat_lon_cube(self, mask_cube, day_of_year, utc_hour):
"""
Calculate the daynight mask for the provided Lat Lon cube
Args:
mask_cube (iris.cube.Cube):
daynight mask cube - data initially set to self.night
day_of_year (int):
day of the year 0 to 365, 0 = 1st January
utc_hour (float):
Hour in UTC
Returns:
mask_cube (iris.cube.Cube):
daynight mask cube - daytime set to self.day
"""
lons = mask_cube.coord('longitude').points
lats = mask_cube.coord('latitude').points
terminator_lats = daynight_terminator(lons, day_of_year, utc_hour)
lons_zeros = np.zeros_like(lons)
lats_zeros = np.zeros_like(lats).reshape(len(lats), 1)
lats_on_lon = lats.reshape(len(lats), 1) + lons_zeros
terminator_on_lon = lats_zeros + terminator_lats
dec = calc_solar_declination(day_of_year)
if dec > 0.0:
index = np.where(lats_on_lon >= terminator_on_lon)
else:
index = np.where(lats_on_lon < terminator_on_lon)
mask_cube.data[index] = self.day
return mask_cube
def process(self, cube):
"""
Calculate the daynight mask for the provided cube. Note that only the
hours and minutes of the dtval variable are used. To ensure consistent
behaviour with changes of second or subsecond precision, the second
component is added to the time object. This means that when the hours
and minutes are used, we have correctly rounded to the nearest minute,
e.g.::
dt(2017, 1, 1, 11, 59, 59) -- +59 --> dt(2017, 1, 1, 12, 0, 58)
dt(2017, 1, 1, 12, 0, 1) -- +1 --> dt(2017, 1, 1, 12, 0, 2)
dt(2017, 1, 1, 12, 0, 30) -- +30 --> dt(2017, 1, 1, 12, 1, 0)
Args:
cube (iris.cube.Cube):
input cube
Returns:
daynight_mask (iris.cube.Cube):
daynight mask cube, daytime set to self.day
nighttime set to self.night.
The resulting cube will be the same shape as
the time, y, and x coordinate, other coordinates
will be ignored although they might appear as attributes
on the cube as it is extracted from the first slice.
"""
daynight_mask = self._create_daynight_mask(cube)
dtvalues = iris_time_to_datetime(daynight_mask.coord('time'))
for i, dtval in enumerate(dtvalues):
mask_cube = daynight_mask[i]
day_of_year = (dtval - dt.datetime(dtval.year, 1, 1)).days
dtval = dtval + dt.timedelta(seconds=dtval.second)
utc_hour = (dtval.hour * 60.0 + dtval.minute) / 60.0
trg_crs = lat_lon_determine(mask_cube)
# Grids that are not Lat Lon
if trg_crs is not None:
lats, lons = transform_grid_to_lat_lon(mask_cube)
solar_el = calc_solar_elevation(lats, lons,
day_of_year, utc_hour)
mask_cube.data[np.where(solar_el > 0.0)] = self.day
else:
mask_cube = self._daynight_lat_lon_cube(mask_cube,
day_of_year, utc_hour)
daynight_mask.data[i, ::] = mask_cube.data
return daynight_mask
| 39.660194
| 79
| 0.624806
|
ec75db9ddbb87cfe7c065a95cd2fab10ea0106cd
| 90
|
py
|
Python
|
boto3_type_annotations_with_docs/boto3_type_annotations/codestar/__init__.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/codestar/__init__.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations_with_docs/boto3_type_annotations/codestar/__init__.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from boto3_type_annotations.codestar.client import Client
__all__ = (
'Client'
)
| 15
| 57
| 0.722222
|
896999f944912a2506b1d30f41698197db819cbc
| 4,155
|
py
|
Python
|
dateparser/data/date_translation_data/mk.py
|
Rodp63/dateparser
|
938a9573234679b603210bd47cc93eb258b1f1df
|
[
"BSD-3-Clause"
] | null | null | null |
dateparser/data/date_translation_data/mk.py
|
Rodp63/dateparser
|
938a9573234679b603210bd47cc93eb258b1f1df
|
[
"BSD-3-Clause"
] | null | null | null |
dateparser/data/date_translation_data/mk.py
|
Rodp63/dateparser
|
938a9573234679b603210bd47cc93eb258b1f1df
|
[
"BSD-3-Clause"
] | null | null | null |
info = {
"name": "mk",
"date_order": "DMY",
"january": [
"јан",
"јануари"
],
"february": [
"фев",
"февруари"
],
"march": [
"мар",
"март"
],
"april": [
"апр",
"април"
],
"may": [
"мај"
],
"june": [
"јун",
"јуни"
],
"july": [
"јул",
"јули"
],
"august": [
"авг",
"август"
],
"september": [
"септ",
"септември"
],
"october": [
"окт",
"октомври"
],
"november": [
"ноем",
"ноември"
],
"december": [
"дек",
"декември"
],
"monday": [
"пон",
"понеделник"
],
"tuesday": [
"вт",
"вто",
"вторник"
],
"wednesday": [
"сре",
"среда"
],
"thursday": [
"чет",
"четврток"
],
"friday": [
"пет",
"петок"
],
"saturday": [
"саб",
"сабота"
],
"sunday": [
"нед",
"недела"
],
"am": [
"претпл",
"претпладне"
],
"pm": [
"попл",
"попладне"
],
"year": [
"год",
"година"
],
"month": [
"мес",
"месец"
],
"week": [
"недела",
"сед"
],
"day": [
"ден"
],
"hour": [
"час"
],
"minute": [
"мин",
"минута"
],
"second": [
"сек",
"секунда"
],
"relative-type": {
"0 day ago": [
"денес"
],
"0 hour ago": [
"часов"
],
"0 minute ago": [
"оваа минута"
],
"0 month ago": [
"овој месец"
],
"0 second ago": [
"сега"
],
"0 week ago": [
"оваа седмица"
],
"0 year ago": [
"оваа година"
],
"1 day ago": [
"вчера"
],
"1 month ago": [
"минатиот месец"
],
"1 week ago": [
"минатата седмица"
],
"1 year ago": [
"минатата година"
],
"in 1 day": [
"утре"
],
"in 1 month": [
"следниот месец"
],
"in 1 week": [
"следната седмица"
],
"in 1 year": [
"следната година"
]
},
"relative-type-regex": {
"\\1 day ago": [
"пред (\\d+) ден",
"пред (\\d+) дена"
],
"\\1 hour ago": [
"пред (\\d+) час",
"пред (\\d+) часа"
],
"\\1 minute ago": [
"пред (\\d+) минута",
"пред (\\d+) минути"
],
"\\1 month ago": [
"пред (\\d+) месец",
"пред (\\d+) месеци"
],
"\\1 second ago": [
"пред (\\d+) секунда",
"пред (\\d+) секунди"
],
"\\1 week ago": [
"пред (\\d+) седмица",
"пред (\\d+) седмици"
],
"\\1 year ago": [
"пред (\\d+) година",
"пред (\\d+) години"
],
"in \\1 day": [
"за (\\d+) ден",
"за (\\d+) дена"
],
"in \\1 hour": [
"за (\\d+) час",
"за (\\d+) часа"
],
"in \\1 minute": [
"за (\\d+) минута",
"за (\\d+) минути"
],
"in \\1 month": [
"за (\\d+) месец",
"за (\\d+) месеци"
],
"in \\1 second": [
"за (\\d+) секунда",
"за (\\d+) секунди"
],
"in \\1 week": [
"за (\\d+) седмица",
"за (\\d+) седмици"
],
"in \\1 year": [
"за (\\d+) година",
"за (\\d+) години"
]
},
"locale_specific": {},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
}
| 17.680851
| 34
| 0.272924
|
b4e565d39038b531292f95fd4096bda7b69a8492
| 14,329
|
py
|
Python
|
brian2/core/base.py
|
Kyzarok/SNNProject
|
14b555e221dbdd5100cb4f6333e49030423462ea
|
[
"BSD-2-Clause"
] | 2
|
2020-03-20T13:30:19.000Z
|
2020-03-20T13:30:57.000Z
|
brian2/core/base.py
|
Kyzarok/SNNProject
|
14b555e221dbdd5100cb4f6333e49030423462ea
|
[
"BSD-2-Clause"
] | null | null | null |
brian2/core/base.py
|
Kyzarok/SNNProject
|
14b555e221dbdd5100cb4f6333e49030423462ea
|
[
"BSD-2-Clause"
] | null | null | null |
'''
All Brian objects should derive from `BrianObject`.
'''
import weakref
import traceback
import os
import sys
from brian2.utils.logger import get_logger
from brian2.core.names import Nameable
from brian2.units.allunits import second
from brian2.units.fundamentalunits import check_units
__all__ = ['BrianObject',
'BrianObjectException',
]
logger = get_logger(__name__)
class BrianObject(Nameable):
'''
All Brian objects derive from this class, defines magic tracking and update.
See the documentation for `Network` for an explanation of which
objects get updated in which order.
Parameters
----------
dt : `Quantity`, optional
The time step to be used for the simulation. Cannot be combined with
the `clock` argument.
clock : `Clock`, optional
The update clock to be used. If neither a clock, nor the `dt` argument
is specified, the `defaultclock` will be used.
when : str, optional
In which scheduling slot to simulate the object during a time step.
Defaults to ``'start'``.
order : int, optional
The priority of this object for operations occurring at the same time
step and in the same scheduling slot. Defaults to 0.
name : str, optional
A unique name for the object - one will be assigned automatically if
not provided (of the form ``brianobject_1``, etc.).
Notes
-----
The set of all `BrianObject` objects is stored in ``BrianObject.__instances__()``.
'''
@check_units(dt=second)
def __init__(self, dt=None, clock=None, when='start', order=0, name='brianobject*'):
# Setup traceback information for this object
creation_stack = []
bases = []
for modulename in ['brian2']:
if modulename in sys.modules:
base, _ = os.path.split(sys.modules[modulename].__file__)
bases.append(base)
for fname, linenum, funcname, line in traceback.extract_stack():
if all(base not in fname for base in bases):
s = ' File "{fname}", line {linenum}, in {funcname}\n {line}'.format(fname=fname,
linenum=linenum,
funcname=funcname,
line=line)
creation_stack.append(s)
creation_stack = [''] + creation_stack
#: A string indicating where this object was created (traceback with any parts of Brian code removed)
self._creation_stack = ('Object was created here (most recent call only, full details in '
'debug log):\n'+creation_stack[-1])
self._full_creation_stack = 'Object was created here:\n'+'\n'.join(creation_stack)
if dt is not None and clock is not None:
raise ValueError('Can only specify either a dt or a clock, not both.')
if not isinstance(when, basestring):
from brian2.core.clocks import Clock
# Give some helpful error messages for users coming from the alpha
# version
if isinstance(when, Clock):
raise TypeError(("Do not use the 'when' argument for "
"specifying a clock, either provide a "
"timestep for the 'dt' argument or a Clock "
"object for 'clock'."))
if isinstance(when, tuple):
raise TypeError("Use the separate keyword arguments, 'dt' (or "
"'clock'), 'when', and 'order' instead of "
"providing a tuple for 'when'. Only use the "
"'when' argument for the scheduling slot.")
# General error
raise TypeError("The 'when' argument has to be a string "
"specifying the scheduling slot (e.g. 'start').")
Nameable.__init__(self, name)
#: The clock used for simulating this object
self._clock = clock
if clock is None:
from brian2.core.clocks import Clock, defaultclock
if dt is not None:
self._clock = Clock(dt=dt, name=self.name+'_clock*')
else:
self._clock = defaultclock
if getattr(self._clock, '_is_proxy', False):
from brian2.devices.device import get_device
self._clock = get_device().defaultclock
#: Used to remember the `Network` in which this object has been included
#: before, to raise an error if it is included in a new `Network`
self._network = None
#: The ID string determining when the object should be updated in `Network.run`.
self.when = when
#: The order in which objects with the same clock and ``when`` should be updated
self.order = order
self._dependencies = set()
self._contained_objects = []
self._code_objects = []
self._active = True
#: The scope key is used to determine which objects are collected by magic
self._scope_key = self._scope_current_key
logger.diagnostic("Created BrianObject with name {self.name}, "
"clock={self._clock}, "
"when={self.when}, order={self.order}".format(self=self))
#: Global key value for ipython cell restrict magic
_scope_current_key = 0
#: Whether or not `MagicNetwork` is invalidated when a new `BrianObject` of this type is added
invalidates_magic_network = True
#: Whether or not the object should be added to a `MagicNetwork`. Note that
#: all objects in `BrianObject.contained_objects` are automatically added
#: when the parent object is added, therefore e.g. `NeuronGroup` should set
#: `add_to_magic_network` to ``True``, but it should not be set for all the
#: dependent objects such as `StateUpdater`
add_to_magic_network = False
def add_dependency(self, obj):
'''
Add an object to the list of dependencies. Takes care of handling
subgroups correctly (i.e., adds its parent object).
Parameters
----------
obj : `BrianObject`
The object that this object depends on.
'''
from brian2.groups.subgroup import Subgroup
if isinstance(obj, Subgroup):
self._dependencies.add(obj.source.id)
else:
self._dependencies.add(obj.id)
def before_run(self, run_namespace):
'''
Optional method to prepare the object before a run.
TODO
'''
pass
def after_run(self):
'''
Optional method to do work after a run is finished.
Called by `Network.after_run` after the main simulation loop terminated.
'''
pass
def run(self):
for codeobj in self._code_objects:
codeobj()
contained_objects = property(fget=lambda self:self._contained_objects,
doc='''
The list of objects contained within the `BrianObject`.
When a `BrianObject` is added to a `Network`, its contained objects will
be added as well. This allows for compound objects which contain
a mini-network structure.
Note that this attribute cannot be set directly, you need to modify
the underlying list, e.g. ``obj.contained_objects.extend([A, B])``.
''')
code_objects = property(fget=lambda self:self._code_objects,
doc='''
The list of `CodeObject` contained within the `BrianObject`.
TODO: more details.
Note that this attribute cannot be set directly, you need to modify
the underlying list, e.g. ``obj.code_objects.extend([A, B])``.
''')
updaters = property(fget=lambda self:self._updaters,
doc='''
The list of `Updater` that define the runtime behaviour of this object.
TODO: more details.
Note that this attribute cannot be set directly, you need to modify
the underlying list, e.g. ``obj.updaters.extend([A, B])``.
''')
clock = property(fget=lambda self: self._clock,
doc='''
The `Clock` determining when the object should be updated.
Note that this cannot be changed after the object is
created.
''')
def _set_active(self, val):
val = bool(val)
self._active = val
for obj in self.contained_objects:
obj.active = val
active = property(fget=lambda self:self._active,
fset=_set_active,
doc='''
Whether or not the object should be run.
Inactive objects will not have their `update`
method called in `Network.run`. Note that setting or
unsetting the `active` attribute will set or unset
it for all `contained_objects`.
''')
def __repr__(self):
description = ('{classname}(clock={clock}, when={when}, order={order}, name={name})')
return description.format(classname=self.__class__.__name__,
when=self.when,
clock=self._clock,
order=self.order,
name=repr(self.name))
# This is a repeat from Nameable.name, but we want to get the documentation
# here again
name = Nameable.name
def weakproxy_with_fallback(obj):
'''
Attempts to create a `weakproxy` to the object, but falls back to the object if not possible.
'''
try:
return weakref.proxy(obj)
except TypeError:
return obj
def device_override(name):
'''
Decorates a function/method to allow it to be overridden by the current `Device`.
The ``name`` is the function name in the `Device` to use as an override if it exists.
The returned function has an additional attribute ``original_function``
which is a reference to the original, undecorated function.
'''
def device_override_decorator(func):
def device_override_decorated_function(*args, **kwds):
from brian2.devices.device import get_device
curdev = get_device()
if hasattr(curdev, name):
return getattr(curdev, name)(*args, **kwds)
else:
return func(*args, **kwds)
device_override_decorated_function.__doc__ = func.__doc__
device_override_decorated_function.original_function = func
return device_override_decorated_function
return device_override_decorator
class BrianObjectException(Exception):
'''
High level exception that adds extra Brian-specific information to exceptions
This exception should only be raised at a fairly high level in Brian code to
pass information back to the user. It adds extra information about where a
`BrianObject` was defined to better enable users to locate the source of
problems.
You should use the `brian_object_exception` function to raise this, and
it should only be raised in an ``except`` block handling a prior
exception.
Parameters
----------
message : str
Additional error information to add to the original exception.
brianobj : BrianObject
The object that caused the error to happen.
original_exception : Exception
The original exception that was raised.
'''
def __init__(self, message, brianobj, original_exception):
self._brian_message = message
self._brian_objname = brianobj.name
self._brian_origexc = '\n'.join(traceback.format_exception_only(type(original_exception),
original_exception))
self._brian_origtb = traceback.format_exc()
self._brian_objcreate = brianobj._creation_stack
logger.diagnostic('Error was encountered with object "{objname}":\n{fullstack}'.format(
objname=self._brian_objname,
fullstack=brianobj._full_creation_stack))
def __str__(self):
return ('Original error and traceback:\n{origtb}\n'
'Error encountered with object named "{objname}".\n'
'{objcreate}\n\n'
'{message} {origexc}'
'(See above for original error message and traceback.)'
).format(origtb=self._brian_origtb,
origexc=self._brian_origexc,
objname=self._brian_objname, message=self._brian_message,
objcreate=self._brian_objcreate)
def brian_object_exception(message, brianobj, original_exception):
'''
Returns a `BrianObjectException` derived from the original exception.
Creates a new class derived from the class of the original exception
and `BrianObjectException`. This allows exception handling code to
respond both to the original exception class and `BrianObjectException`.
See `BrianObjectException` for arguments and notes.
'''
DerivedBrianObjectException = type('BrianObjectException',
(BrianObjectException, original_exception.__class__),
{})
new_exception = DerivedBrianObjectException(message, brianobj, original_exception)
# Copy over all exception attributes
for attribute in dir(original_exception):
if attribute.startswith('_'):
continue
try:
setattr(new_exception, attribute, getattr(original_exception, attribute))
except AttributeError: # some attributes cannot be set
pass
return new_exception
| 40.36338
| 109
| 0.592226
|
6a2918be84bf52e5d0f44175cadf2a300aceb11c
| 3,529
|
py
|
Python
|
mltoolkit/mldp/tests/transformers/test_vocab_mapper.py
|
stungkit/Copycat-abstractive-opinion-summarizer
|
04fe5393a7bb6883516766b762f6a0c530e95375
|
[
"MIT"
] | 51
|
2020-09-25T07:05:01.000Z
|
2022-03-17T12:07:40.000Z
|
mltoolkit/mldp/tests/transformers/test_vocab_mapper.py
|
stungkit/Copycat-abstractive-opinion-summarizer
|
04fe5393a7bb6883516766b762f6a0c530e95375
|
[
"MIT"
] | 4
|
2020-10-19T10:00:22.000Z
|
2022-03-14T17:02:47.000Z
|
mltoolkit/mldp/tests/transformers/test_vocab_mapper.py
|
stungkit/Copycat-abstractive-opinion-summarizer
|
04fe5393a7bb6883516766b762f6a0c530e95375
|
[
"MIT"
] | 22
|
2020-09-22T01:06:47.000Z
|
2022-01-26T14:20:09.000Z
|
import unittest
from mltoolkit.mldp.tests.common import read_data_from_csv_file
from mltoolkit.mldp.utils.tools import Vocabulary, DataChunk
from mltoolkit.mldp.steps.transformers.nlp import VocabMapper
from mltoolkit.mldp.steps.readers import CsvReader
import copy
import numpy as np
class TestVocabMapper(unittest.TestCase):
def test_vocabulary_mapper(self):
"""Testing whether the mapper allows to map back and forth field values.
"""
data_path = 'mldp/tests/data/mock_data.csv'
target_fields = ["first_name", "last_name", "email", "gender"]
reader = CsvReader(sep=',')
vocab = Vocabulary(reader)
for target_field in target_fields:
vocab.create(data_source={"data_path": data_path},
data_fnames=target_field)
data = read_data_from_csv_file(data_path)
data_original = copy.deepcopy(data)
mapper_to = VocabMapper({target_field: vocab}, "id")
mapper_back = VocabMapper({target_field: vocab}, "token")
data = mapper_to(data)
data = mapper_back(data)
self.assertTrue((data[target_field] == data_original[target_field])
.all())
def test_vocabulary_mapper_multidim_lists(self):
"""Testing whether the mapper can map multi-dim lists."""
target_field_name = "dummy"
symbols_attr = "id"
data_chunk = DataChunk(**{target_field_name: np.array([
[["one"], ["two"]],
[["three"], ["four", "five", "six"]]
], dtype="object")})
exp_val = np.empty(2, dtype="object")
exp_val[0] = np.array([[1], [2]])
exp_val[1] = np.array([[3], [4, 5, 6]])
expected_output_chunk = DataChunk(**{target_field_name: exp_val})
# creating and populating a vocab
vocab = Vocabulary()
vocab.add_symbol("zero")
vocab.add_symbol("one")
vocab.add_symbol("two")
vocab.add_symbol("three")
vocab.add_symbol("four")
vocab.add_symbol("five")
vocab.add_symbol("six")
mapper = VocabMapper({target_field_name: vocab},
symbols_attr=symbols_attr)
actual_output_chunk = mapper(copy.deepcopy(data_chunk))
self.assertTrue(actual_output_chunk == expected_output_chunk)
def test_vocabulary_mapper_mixed_field_values(self):
"""Testing whether the mapper can map multi-dim mixed field values."""
target_field_name = "dummy"
symbols_attr = "id"
data_chunk = DataChunk(**{target_field_name: np.array([
[["one"], np.array(["two", "one"])],
[["three"], np.array(["four", "five", "six"])]
], dtype="object")})
expected_output_chunk = DataChunk(**{target_field_name: np.array([
[[1], np.array([2, 1])],
[[3], np.array([4, 5, 6])]
], dtype="object")})
# creating and populating a vocab
vocab = Vocabulary()
vocab.add_symbol("zero")
vocab.add_symbol("one")
vocab.add_symbol("two")
vocab.add_symbol("three")
vocab.add_symbol("four")
vocab.add_symbol("five")
vocab.add_symbol("six")
mapper = VocabMapper({target_field_name: vocab},
symbols_attr=symbols_attr)
actual_output_chunk = mapper(data_chunk)
self.assertTrue(actual_output_chunk == expected_output_chunk)
if __name__ == '__main__':
unittest.main()
| 35.29
| 80
| 0.606687
|
a305707db76815514a4ae1891fbee441894578e0
| 650
|
py
|
Python
|
Aula18/ex08.py
|
danicon/MD3-Curso_Python
|
3d419d440d3b28adb5c019268f4b217e7d0ce45a
|
[
"MIT"
] | null | null | null |
Aula18/ex08.py
|
danicon/MD3-Curso_Python
|
3d419d440d3b28adb5c019268f4b217e7d0ce45a
|
[
"MIT"
] | null | null | null |
Aula18/ex08.py
|
danicon/MD3-Curso_Python
|
3d419d440d3b28adb5c019268f4b217e7d0ce45a
|
[
"MIT"
] | null | null | null |
from random import randint
from time import sleep
lista = list()
jogos = list()
print('-'*30)
print(f'{"JOGA NA MEGA SENA":^30}')
print('-'*30)
quant = int(input('Quantos jogos você quer que eu sortei? '))
tot = 1
while tot <= quant:
cont = 0
while True:
num = randint(1, 60)
if num not in lista:
lista.append(num)
cont += 1
if cont >= 6:
break
lista.sort()
jogos.append(lista[:])
lista.clear()
tot += 1
print('-='*3, f'SORTEANDO {quant} JOGOS', '-='*3)
for i, l in enumerate(jogos):
print(f'Jogo {i+1}: {l}')
sleep(1)
print('-='*5, '< BOA SORTE! >', '-='*5)
| 24.074074
| 61
| 0.54
|
5a24594ac470c6c102cca9da652bd195bf6d97c7
| 342
|
py
|
Python
|
src/data/migrations/0004_auto_20210220_2304.py
|
oreoluwa/stocksight
|
764167ae88904f04cea0b01161e2c17da3951355
|
[
"Apache-2.0"
] | null | null | null |
src/data/migrations/0004_auto_20210220_2304.py
|
oreoluwa/stocksight
|
764167ae88904f04cea0b01161e2c17da3951355
|
[
"Apache-2.0"
] | null | null | null |
src/data/migrations/0004_auto_20210220_2304.py
|
oreoluwa/stocksight
|
764167ae88904f04cea0b01161e2c17da3951355
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-02-20 23:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data', '0003_auto_20210220_2251'),
]
operations = [
migrations.RenameModel(
old_name='ArticleAuthors',
new_name='ArticleAuthor',
),
]
| 19
| 47
| 0.608187
|
c6da5ac62cc1e025e6675ef5710a8b1a8d22c09a
| 2,842
|
py
|
Python
|
test/functional/interface_rpc.py
|
bitcoin-global/bitcoin-global
|
8f8783245ec209ba1ae4b2c0717f9d8f2d5658ea
|
[
"MIT"
] | 3
|
2020-09-23T23:55:28.000Z
|
2021-07-10T03:21:46.000Z
|
test/functional/interface_rpc.py
|
Penny-Admixture/bitcoin-global
|
8f8783245ec209ba1ae4b2c0717f9d8f2d5658ea
|
[
"MIT"
] | 2
|
2020-07-28T08:55:30.000Z
|
2021-04-22T10:57:10.000Z
|
test/functional/interface_rpc.py
|
Penny-Admixture/bitcoin-global
|
8f8783245ec209ba1ae4b2c0717f9d8f2d5658ea
|
[
"MIT"
] | 1
|
2021-06-12T07:04:55.000Z
|
2021-06-12T07:04:55.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Copyright (c) 2020 The Bitcoin Global developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
import os
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_greater_than_or_equal
def expect_http_status(expected_http_status, expected_rpc_code,
fcn, *args):
try:
fcn(*args)
raise AssertionError("Expected RPC error %d, got none" % expected_rpc_code)
except JSONRPCException as exc:
assert_equal(exc.error["code"], expected_rpc_code)
assert_equal(exc.http_status, expected_http_status)
class RPCInterfaceTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def test_getrpcinfo(self):
self.log.info("Testing getrpcinfo...")
info = self.nodes[0].getrpcinfo()
assert_equal(len(info['active_commands']), 1)
command = info['active_commands'][0]
assert_equal(command['method'], 'getrpcinfo')
assert_greater_than_or_equal(command['duration'], 0)
assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, 'regtest', 'debug.log'))
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getbestblockhash", "id": 3},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def test_http_status_codes(self):
self.log.info("Testing HTTP status codes for JSON-RPC requests...")
expect_http_status(404, -32601, self.nodes[0].invalidmethod)
expect_http_status(500, -8, self.nodes[0].getblockhash, 42)
def run_test(self):
self.test_getrpcinfo()
self.test_batch_request()
self.test_http_status_codes()
if __name__ == '__main__':
RPCInterfaceTest().main()
| 36.435897
| 98
| 0.662913
|
b46a4922398bbc6abb0808d485dbdcb7e69e82f9
| 9,338
|
py
|
Python
|
HW2-named-entity-recognition/code_share/utils.py
|
henniekim/named-entity-recognition
|
155fd6d50cc126082b4b90dea12037be273ce24c
|
[
"MIT"
] | null | null | null |
HW2-named-entity-recognition/code_share/utils.py
|
henniekim/named-entity-recognition
|
155fd6d50cc126082b4b90dea12037be273ce24c
|
[
"MIT"
] | null | null | null |
HW2-named-entity-recognition/code_share/utils.py
|
henniekim/named-entity-recognition
|
155fd6d50cc126082b4b90dea12037be273ce24c
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
# shared global variables to be imported from model also
UNK = "$UNK$"
NUM = "$NUM$"
NONE = "O"
class Config():
# general config
dir_output = "results/test/"
dir_model = dir_output + "model.weights/"
path_log = dir_output + "log.txt"
# embeddings
dim_word = 50
dim_char = 50
filename_embedding = "../data/korean_news_100MB_word2vec.txt".format(dim_word)
filename_trimmed = "../data/korean_embedding.trimmed.npz".format(dim_word)
use_pretrained = True
use_chars = True
# dataset
filename_dev = "../data/NER_dev.txt"
filename_test = "../data/NER_test.txt"
filename_train = "../data/NER_train.txt"
max_iter = None # if not None, max number of examples in Dataset
# vocab (created from dataset with build_data.py)
filename_words = "../data/words.txt"
filename_tags = "../data/tags.txt"
filename_chars = "../data/chars.txt"
# training
nepochs = 10
dropout = 0.5
batch_size = 20
lr = 0.005 #learning rate
lr_decay = 0.9
nepoch_no_imprv = 3
# model hyperparameters
hidden_size_char = 25 # lstm on chars
hidden_size_lstm = 100 # lstm on word embeddings
def __init__(self, load=True):
"""Initialize hyperparameters and load vocabs
Args:
load_embeddings: (bool) if True, load embeddings into
np array, else None
"""
# directory for training outputs
if not os.path.exists(self.dir_output):
os.makedirs(self.dir_output)
# load if requested (default)
if load:
"""Loads vocabulary, processing functions and embeddings
Supposes that build_data.py has been run successfully and that
the corresponding files have been created
"""
# 1. vocabulary
self.vocab_words = load_vocab(self.filename_words)
self.vocab_tags = load_vocab(self.filename_tags)
self.vocab_chars = load_vocab(self.filename_chars)
self.nwords = len(self.vocab_words)
self.ntags = len(self.vocab_tags)
self.nchars = len(self.vocab_chars)
# 2. get processing functions that map str -> id
self.processing_word = get_processing_word(self.vocab_words,
self.vocab_chars, chars=self.use_chars)
self.processing_tag = get_processing_word(self.vocab_tags,
allow_unk=False)
# 3. get pre-trained embeddings
data = np.load(self.filename_trimmed)
self.embeddings = data["embeddings"]
class data_read(object):
def __init__(self, filename, processing_word=None, processing_tag=None,
max_iter=None):
self.filename = filename #file path
self.processing_word = processing_word #input word
self.processing_tag = processing_tag #input tag
self.max_iter = max_iter #maximum number of sentence
self.length = None
def __iter__(self):
niter = 0
with open(self.filename, encoding='UTF8') as f:
words, tags = [], []
for line in f:
line = line.strip()
if len(line) == 0 :
if len(words) != 0:
niter += 1
if self.max_iter is not None and niter > self.max_iter:
break
yield words, tags
words, tags = [], []
else:
ls = line.split(' ')
word, tag = ls[0],ls[-1]
if self.processing_word is not None:
word = self.processing_word(word)
if self.processing_tag is not None:
tag = self.processing_tag(tag)
words += [word]
tags += [tag]
def __len__(self):
"""Iterates once over the corpus to set and store length"""
if self.length is None:
self.length = 0
for _ in self:
self.length += 1
return self.length
def load_vocab(filename):
"""Loads vocab from a file
Args:
filename: (string) the format of the file must be one word per line.
Returns:
d: dict[word] = index
"""
# try:
d = dict()
with open(filename) as f:
for idx, word in enumerate(f):
word = word.strip()
d[word] = idx
# except IOError:
# raise MyIOError(filename)
return d
def write_vocab(vocab, filename):
"""Writes a vocab to a file
Writes one word per line.
Args:
vocab: iterable that yields word
filename: path to vocab file
Returns:
write a word per line
"""
with open(filename, "w") as f:
for i, word in enumerate(vocab):
if i != len(vocab) - 1:
f.write("{}\n".format(word))
else:
f.write(word)
print("- done. {} tokens".format(len(vocab)))
def data_build():
"""Procedure to build data
You MUST RUN this procedure. It iterates over the whole dataset (train,
dev and test) and extract the vocabularies in terms of words, tags, and
characters. Having built the vocabularies it writes them in a file. The
writing of vocabulary in a file assigns an id (the line #) to each word.
It then extract the relevant GloVe vectors and stores them in a np array
such that the i-th entry corresponds to the i-th word in the vocabulary.
Args:
config: (instance of Config) has attributes like hyper-params...
"""
# get config and processing of words
config = Config(load=False)
processing_word = get_processing_word()
# Generators
dev = data_read(config.filename_dev, processing_word)
test = data_read(config.filename_test, processing_word)
train = data_read(config.filename_train, processing_word)
# Build Word and Tag vocab #-> get_
print("Building vocab(data vocabulary)...")
vocab_words = set()
vocab_tags = set()
for dataset in [train, dev, test]:
for words, tags in dataset:
vocab_words.update(words)
vocab_tags.update(tags)
print("- done. {} tokens".format(len(vocab_words)))
print("Building vocab(embedding vocabulary)...")
vocab_embed = set()
with open(config.filename_embedding, encoding='UTF8') as f:
for line in f:
word = line.strip().split(' ')[0]
vocab_embed.add(word)
print("- done. {} tokens".format(len(vocab_embed)))
vocab = vocab_words & vocab_embed
vocab.add(UNK)
vocab.add(NUM)
# Save vocab
print("Writing vocab(# of covered words with pre-trained embedding)...")
write_vocab(vocab, config.filename_words)
print("Writing vocab(# of NEtag)...")
write_vocab(vocab_tags, config.filename_tags)
vocab_for_embed = load_vocab(config.filename_words)
embeddings = np.zeros([len(vocab_for_embed), config.dim_word])
with open(config.filename_embedding, encoding='UTF8') as f:
for line in f:
line = line.strip().split(' ')
word = line[0]
embedding = [float(x) for x in line[1:]]
if word in vocab_for_embed:
word_idx = vocab_for_embed[word]
embeddings[word_idx] = np.asarray(embedding)
np.savez_compressed(config.filename_trimmed, embeddings=embeddings)
# Build and save char vocab
train = data_read(config.filename_train)
vocab_chars = set()
for words, _ in train:
for word in words:
vocab_chars.update(word)
#vocab_chars = get_char_vocab(train)
print("Writing vocab(# of char)...")
write_vocab(vocab_chars, config.filename_chars)
def get_processing_word(vocab_words=None, vocab_chars=None,
chars=False, allow_unk=True):
"""Return lambda function that transform a word (string) into list,
or tuple of (list, id) of int corresponding to the ids of the word and
its corresponding characters.
Args:
vocab: dict[word] = idx
Returns:
f("cat") = ([12, 4, 32], 12345)
= (list of char ids, word id)
"""
def f(word):
# 0. get chars of words
if vocab_chars is not None and chars == True:
char_ids = []
for char in word:
# ignore chars out of vocabulary
if char in vocab_chars:
char_ids += [vocab_chars[char]]
# 1. get id of word
if vocab_words is not None:
if word in vocab_words:
word = vocab_words[word]
else:
if allow_unk:
word = vocab_words[UNK]
else:
raise Exception("Unknow key is not allowed. Check that "\
"your vocab (tags?) is correct")
# 2. return tuple char ids, word id
if vocab_chars is not None and chars == True:
return char_ids, word
else:
return word
return f
| 31.761905
| 94
| 0.575926
|
fc83545c3ba9868ecb3d3feb9d9f76704354913f
| 616
|
py
|
Python
|
test/unittests/test_NConc.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | null | null | null |
test/unittests/test_NConc.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | 6
|
2018-07-24T22:46:28.000Z
|
2018-07-29T19:13:09.000Z
|
test/unittests/test_NConc.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | 1
|
2018-07-24T18:22:01.000Z
|
2018-07-24T18:22:01.000Z
|
import numpy as np
from VariableUnittest import VariableUnitTest
from gwlfe.Output.Loading import NConc
class TestNConc(VariableUnitTest):
def test_NConc(self):
z = self.z
np.testing.assert_array_almost_equal(
NConc.NConc_f(z.NRur, z.NUrb, z.NitrConc, z.ManNitr, z.ManuredAreas, z.FirstManureMonth, z.LastManureMonth,
z.FirstManureMonth2, z.LastManureMonth2),
NConc.NConc(z.NRur, z.NUrb, z.NitrConc, z.ManNitr, z.ManuredAreas, z.FirstManureMonth, z.LastManureMonth,
z.FirstManureMonth2, z.LastManureMonth2), decimal=7)
| 41.066667
| 119
| 0.686688
|
b30b59bd56ab99999a71fbc517f1f030532b0d07
| 1,353
|
py
|
Python
|
improvement-prediction/recommend_datasets.py
|
VIDA-NYU/prida
|
cb2af13704506abc73d10f5c346ea21f70dd6e65
|
[
"BSD-3-Clause"
] | 1
|
2021-06-12T02:03:54.000Z
|
2021-06-12T02:03:54.000Z
|
improvement-prediction/recommend_datasets.py
|
VIDA-NYU/prida
|
cb2af13704506abc73d10f5c346ea21f70dd6e65
|
[
"BSD-3-Clause"
] | null | null | null |
improvement-prediction/recommend_datasets.py
|
VIDA-NYU/prida
|
cb2af13704506abc73d10f5c346ea21f70dd6e65
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import json
import sys
from recommender import *
if __name__ == '__main__':
"""This script reads a training file with lines in the format
features generated by
generate_data_for_augmentation_learning_spark.py, learns
how to predict relative gains with data augmentation, and
recommends candidate datasets for recommendation based on that
"""
# Reads a parameter file in the format described in the README
if len(sys.argv) == 1:
params = json.load(open('params.json'))
else:
params = json.load(open(sys.argv[1]))
recommender = Recommender(params['learning_data_filename'], store_instances=False)
# Reads machine learning models to predict relative gains and test data
if params['augmentation_models_and_tests_filename']:
models, test_data = recommender.read_models_and_test_data(params['augmentation_models_and_tests_filename'])
else:
models, test_data = recommender.generate_models_and_test_data(params['augmentation_learning_data_filename'],
params['n_splits'])
print('done creating models and corresponding test_data')
for model, data_filename in zip(models, test_data):
recommender.generate_and_evaluate_predicted_gains(model, data_filename)
| 43.645161
| 116
| 0.707317
|
579840e139ea21f653294ebdd84baff90adf7d08
| 473
|
py
|
Python
|
Source_Pages/Upload.py
|
StephenJD/TnT.Cinley
|
1779ef282eeb6d7381f5a6361f14928202d57eb3
|
[
"MIT"
] | null | null | null |
Source_Pages/Upload.py
|
StephenJD/TnT.Cinley
|
1779ef282eeb6d7381f5a6361f14928202d57eb3
|
[
"MIT"
] | null | null | null |
Source_Pages/Upload.py
|
StephenJD/TnT.Cinley
|
1779ef282eeb6d7381f5a6361f14928202d57eb3
|
[
"MIT"
] | null | null | null |
import subprocess
from pathlib import Path
def updateWebsite(webRootPath):
ParmsAdd = ("add", ".")
ParmsCommit = ("commit","-m", "Upload new content")
ParmsPush = ("push", "origin", "master")
Git = "git"
subprocess.run([Git, *ParmsAdd], shell=False, cwd=webRootPath)
subprocess.run([Git, *ParmsCommit], shell=False, cwd=webRootPath)
subprocess.run([Git, *ParmsPush], shell=False, cwd=webRootPath)
webRootPath = Path.cwd().parent
updateWebsite(webRootPath)
| 31.533333
| 67
| 0.710359
|
ff4a42a3394b591146a89061ecbea19fd9977dac
| 155
|
py
|
Python
|
chill/examples/chill/testcases/test_permute.py
|
CompOpt4Apps/Artifact-DataDepSimplify
|
4fa1bf2bda2902fec50a54ee79ae405a554fc9f4
|
[
"MIT"
] | 5
|
2019-05-20T03:35:41.000Z
|
2021-09-16T22:22:13.000Z
|
chill/examples/chill/testcases/test_permute.py
|
CompOpt4Apps/Artifact-DataDepSimplify
|
4fa1bf2bda2902fec50a54ee79ae405a554fc9f4
|
[
"MIT"
] | null | null | null |
chill/examples/chill/testcases/test_permute.py
|
CompOpt4Apps/Artifact-DataDepSimplify
|
4fa1bf2bda2902fec50a54ee79ae405a554fc9f4
|
[
"MIT"
] | null | null | null |
from chill import *
source('mm.c')
procedure('mm')
#format: rose
loop(0)
known('ambn > 0')
known('an > 0')
known('bm > 0')
permute([3,2,1])
print_code()
| 11.923077
| 19
| 0.619355
|
30f9fa7a6050d03395602281f06a3da07c71dded
| 35
|
py
|
Python
|
python/testData/surround/SurroundNewline.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/testData/surround/SurroundNewline.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/surround/SurroundNewline.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
<selection>a = 1
</selection>
a = 2
| 11.666667
| 16
| 0.628571
|
e3c7e78fdd00ec7157fe4a82add968be8ba26df1
| 12,102
|
py
|
Python
|
train.py
|
nyLiao/GPT2-Chinese
|
18af9c9547320ed9f472951a33569729ec829f67
|
[
"MIT"
] | null | null | null |
train.py
|
nyLiao/GPT2-Chinese
|
18af9c9547320ed9f472951a33569729ec829f67
|
[
"MIT"
] | null | null | null |
train.py
|
nyLiao/GPT2-Chinese
|
18af9c9547320ed9f472951a33569729ec829f67
|
[
"MIT"
] | null | null | null |
import transformers
import torch
import os
import json
import random
import numpy as np
import argparse
# from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from tqdm import tqdm
from torch.nn import DataParallel
# from tokenizations.bpe_tokenizer import get_encoder
def build_files(data_path, tokenized_data_path, num_pieces, full_tokenizer, min_length):
with open(data_path, 'r', encoding='utf8') as f:
print('reading lines')
lines = json.load(f)
lines = [line.replace('\n', ' [SEP] ') for line in lines] # 用[SEP]表示换行, 段落之间使用SEP表示段落结束
all_len = len(lines)
if not os.path.exists(tokenized_data_path):
os.mkdir(tokenized_data_path)
for i in tqdm(range(num_pieces)):
sublines = lines[all_len // num_pieces * i: all_len // num_pieces * (i + 1)]
if i == num_pieces - 1:
sublines.extend(lines[all_len // num_pieces * (i + 1):]) # 把尾部例子添加到最后一个piece
sublines = [full_tokenizer.tokenize(line) for line in sublines if len(line) > min_length] # 只考虑长度超过min_length的句子
sublines = [full_tokenizer.convert_tokens_to_ids(line) for line in sublines]
full_line = []
for subline in sublines:
full_line.append(full_tokenizer.convert_tokens_to_ids('[MASK]')) # 文章开头添加MASK表示文章开始
full_line.extend(subline)
full_line.append(full_tokenizer.convert_tokens_to_ids('[CLS]')) # 文章之间添加CLS表示文章结束
with open(tokenized_data_path + 'tokenized_train_{}.txt'.format(i), 'w') as f:
for id in full_line:
f.write(str(id) + ' ')
print('finish')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='设置使用哪些显卡')
parser.add_argument('--model_config', default='config/model_config_small.json', type=str, required=False,
help='选择模型参数')
parser.add_argument('--tokenizer_path', default='cache/vocab_small.txt', type=str, required=False, help='选择词库')
parser.add_argument('--raw_data_path', default='data/train.json', type=str, required=False, help='原始训练语料')
parser.add_argument('--tokenized_data_path', default='data/tokenized/', type=str, required=False,
help='tokenized语料存放位置')
parser.add_argument('--raw', action='store_true', help='是否先做tokenize')
parser.add_argument('--epochs', default=5, type=int, required=False, help='训练循环')
parser.add_argument('--batch_size', default=8, type=int, required=False, help='训练batch size')
parser.add_argument('--lr', default=1.5e-4, type=float, required=False, help='学习率')
parser.add_argument('--warmup_steps', default=2000, type=int, required=False, help='warm up步数')
parser.add_argument('--log_step', default=1, type=int, required=False, help='多少步汇报一次loss,设置为gradient accumulation的整数倍')
parser.add_argument('--stride', default=768, type=int, required=False, help='训练时取训练数据的窗口步长')
parser.add_argument('--gradient_accumulation', default=1, type=int, required=False, help='梯度积累')
parser.add_argument('--fp16', action='store_true', help='混合精度')
parser.add_argument('--fp16_opt_level', default='O1', type=str, required=False)
parser.add_argument('--max_grad_norm', default=1.0, type=float, required=False)
parser.add_argument('--num_pieces', default=100, type=int, required=False, help='将训练语料分成多少份')
parser.add_argument('--min_length', default=128, type=int, required=False, help='最短收录文章长度')
parser.add_argument('--output_dir', default='model/', type=str, required=False, help='模型输出路径')
parser.add_argument('--pretrained_model', default='', type=str, required=False, help='模型训练起点路径')
# parser.add_argument('--writer_dir', default='tensorboard_summary/', type=str, required=False, help='Tensorboard路径')
parser.add_argument('--segment', action='store_true', help='中文以词为单位')
# parser.add_argument('--bpe_token', action='store_true', help='subword 使用BPE Tokenizer')
# parser.add_argument('--encoder_json', default="tokenizations/encoder.json", type=str, help="encoder.json")
# parser.add_argument('--vocab_bpe', default="tokenizations/vocab.bpe", type=str, help="vocab.bpe")
args = parser.parse_args()
print('args:\n' + args.__repr__())
if args.segment:
from tokenizations import tokenization_bert_word_level as tokenization_bert
else:
from tokenizations import tokenization_bert
os.environ["CUDA_VISIBLE_DEVICES"] = args.device # 此处设置程序使用哪些显卡
model_config = transformers.modeling_gpt2.GPT2Config.from_json_file(args.model_config)
print('config:\n' + model_config.to_json_string())
n_ctx = model_config.n_ctx
# if args.bpe_token:
# full_tokenizer = get_encoder(args.encoder_json, args.vocab_bpe)
# else:
# full_tokenizer = tokenization_bert.BertTokenizer(vocab_file=args.tokenizer_path)
full_tokenizer = tokenization_bert.BertTokenizer(vocab_file=args.tokenizer_path)
full_tokenizer.max_len = 999999
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('using device:', device)
raw_data_path = args.raw_data_path
tokenized_data_path = args.tokenized_data_path
raw = args.raw # 选择是否从零开始构建数据集
epochs = args.epochs
batch_size = args.batch_size
lr = args.lr
warmup_steps = args.warmup_steps
log_step = args.log_step
stride = args.stride
gradient_accumulation = args.gradient_accumulation
fp16 = args.fp16 # 不支持半精度的显卡请勿打开
fp16_opt_level = args.fp16_opt_level
max_grad_norm = args.max_grad_norm
num_pieces = args.num_pieces
min_length = args.min_length
output_dir = args.output_dir
# tb_writer = SummaryWriter(log_dir=args.writer_dir)
assert log_step % gradient_accumulation == 0
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if raw:
print('building files')
build_files(data_path=raw_data_path, tokenized_data_path=tokenized_data_path, num_pieces=num_pieces,
full_tokenizer=full_tokenizer, min_length=min_length)
print('files built')
if not args.pretrained_model:
model = transformers.modeling_gpt2.GPT2LMHeadModel(config=model_config)
else:
model = transformers.modeling_gpt2.GPT2LMHeadModel.from_pretrained(args.pretrained_model)
model.train()
model.to(device)
num_parameters = 0
parameters = model.parameters()
for parameter in parameters:
num_parameters += parameter.numel()
print('number of parameters: {}'.format(num_parameters))
multi_gpu = False
full_len = 0
print('calculating total steps')
for i in tqdm(range(num_pieces)):
with open(tokenized_data_path + 'tokenized_train_{}.txt'.format(i), 'r') as f:
full_len += len([int(item) for item in f.read().strip().split()])
total_steps = int(full_len / stride * epochs / batch_size / gradient_accumulation)
print('total steps = {}'.format(total_steps))
optimizer = transformers.AdamW(model.parameters(), lr=lr, correct_bias=True)
scheduler = transformers.WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps,
t_total=total_steps)
if fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=fp16_opt_level)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = DataParallel(model, device_ids=[int(i) for i in args.device.split(',')])
multi_gpu = True
print('starting training')
overall_step = 0
running_loss = 0
for epoch in range(epochs):
print('epoch {}'.format(epoch + 1))
now = datetime.now()
print('time: {}'.format(now))
x = np.linspace(0, num_pieces - 1, num_pieces, dtype=np.int32)
random.shuffle(x)
piece_num = 0
for i in x:
with open(tokenized_data_path + 'tokenized_train_{}.txt'.format(i), 'r') as f:
line = f.read().strip()
tokens = line.split()
tokens = [int(token) for token in tokens]
start_point = 0
samples = []
while start_point < len(tokens) - n_ctx:
samples.append(tokens[start_point: start_point + n_ctx])
start_point += stride
if start_point < len(tokens):
samples.append(tokens[len(tokens)-n_ctx:])
random.shuffle(samples)
for step in range(len(samples) // batch_size): # drop last
# prepare data
batch = samples[step * batch_size: (step + 1) * batch_size]
batch_inputs = []
for ids in batch:
int_ids = [int(x) for x in ids]
batch_inputs.append(int_ids)
batch_inputs = torch.tensor(batch_inputs).long().to(device)
# forward pass
outputs = model.forward(input_ids=batch_inputs, labels=batch_inputs)
# outputs = model(input_ids=batch_inputs, labels=batch_inputs)
loss, logits = outputs[:2]
# get loss
if multi_gpu:
loss = loss.mean()
if gradient_accumulation > 1:
loss = loss / gradient_accumulation
# loss backward
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
# optimizer step
running_loss += loss.item()
if (overall_step + 1) % gradient_accumulation == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
if (overall_step + 1) % log_step == 0:
# tb_writer.add_scalar('loss', loss.item() * gradient_accumulation, overall_step)
print('now time: {}:{}. Step {} of piece {} of epoch {}, loss {}'.format(
datetime.now().hour,
datetime.now().minute,
step + 1,
piece_num,
epoch + 1,
running_loss * gradient_accumulation / (log_step / gradient_accumulation)))
running_loss = 0
overall_step += 1
piece_num += 1
print('saving model for epoch {}'.format(epoch + 1))
if not os.path.exists(output_dir + 'model_epoch{}'.format(epoch + 1)):
os.mkdir(output_dir + 'model_epoch{}'.format(epoch + 1))
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir + 'model_epoch{}'.format(epoch + 1))
# torch.save(scheduler.state_dict(), output_dir + 'model_epoch{}/scheduler.pt'.format(epoch + 1))
# torch.save(optimizer.state_dict(), output_dir + 'model_epoch{}/optimizer.pt'.format(epoch + 1))
print('epoch {} finished'.format(epoch + 1))
then = datetime.now()
print('time: {}'.format(then))
print('time for one epoch: {}'.format(then - now))
print('training finished')
if not os.path.exists(output_dir + 'final_model'):
os.mkdir(output_dir + 'final_model')
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir + 'final_model')
# torch.save(scheduler.state_dict(), output_dir + 'final_model/scheduler.pt')
# torch.save(optimizer.state_dict(), output_dir + 'final_model/optimizer.pt')
if __name__ == '__main__':
main()
| 47.833992
| 123
| 0.640307
|
bc54e69b8ce5534d5d45e11e71108a67c23daa00
| 1,197
|
py
|
Python
|
setup.py
|
naibo-code/nnabla_tensorboard
|
eea69bc18e912ba602a5c62ecaaf21a837764d16
|
[
"MIT"
] | null | null | null |
setup.py
|
naibo-code/nnabla_tensorboard
|
eea69bc18e912ba602a5c62ecaaf21a837764d16
|
[
"MIT"
] | null | null | null |
setup.py
|
naibo-code/nnabla_tensorboard
|
eea69bc18e912ba602a5c62ecaaf21a837764d16
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
version_git = '0.1'
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'numpy',
'protobuf >= 3.8.0',
'six',
]
test_requirements = [
'pytest',
'matplotlib',
'crc32c',
]
setup(
name='nnabla_tensorboard',
version=version_git,
description='nnabla_tensorboard lets you watch Tensors Flow with NNabla',
long_description=history,
author='naibo-code',
author_email='naibo.ha@gmail.com',
url='https://github.com/naibo-code/nnabla_tensorboard',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=requirements,
license='MIT license',
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
test_suite='tests',
tests_require=test_requirements
)
| 24.9375
| 77
| 0.643275
|
dbb9d1b84f345b8ab53c8ea83110c1915713935c
| 208
|
py
|
Python
|
ccgowl/simulations/simulation.py
|
cmazzaanthony/ccgowl
|
31e12579449c296d7581c2838b5d7d7873361b48
|
[
"MIT"
] | 8
|
2019-06-20T17:26:15.000Z
|
2020-08-26T04:19:22.000Z
|
ccgowl/simulations/simulation.py
|
cmazzaanthony/ccgowl
|
31e12579449c296d7581c2838b5d7d7873361b48
|
[
"MIT"
] | 3
|
2021-03-25T22:42:42.000Z
|
2021-06-01T23:52:14.000Z
|
ccgowl/simulations/simulation.py
|
cmazzaanthony/ccgowl
|
31e12579449c296d7581c2838b5d7d7873361b48
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class Simulation(ABC):
@abstractmethod
def run(self, *arg, **kwargs):
pass
@abstractmethod
def plot_results(self, *arg, **kwargs):
pass
| 16
| 43
| 0.629808
|
40db722767ef8ef35094b1a8122dde7ac921d120
| 6,796
|
py
|
Python
|
molecule/driver/delegated.py
|
micheelengronne/molecule
|
152be703d65606d6bdea7202bc5270ceb108c8ba
|
[
"MIT"
] | 1
|
2019-05-14T13:03:34.000Z
|
2019-05-14T13:03:34.000Z
|
molecule/driver/delegated.py
|
micheelengronne/molecule
|
152be703d65606d6bdea7202bc5270ceb108c8ba
|
[
"MIT"
] | 11
|
2019-05-16T17:10:49.000Z
|
2019-06-13T20:51:15.000Z
|
molecule/driver/delegated.py
|
micheelengronne/molecule
|
152be703d65606d6bdea7202bc5270ceb108c8ba
|
[
"MIT"
] | 1
|
2021-04-26T19:47:39.000Z
|
2021-04-26T19:47:39.000Z
|
# Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from molecule import logger
from molecule import util
from molecule.driver import base
LOG = logger.get_logger(__name__)
class Delegated(base.Base):
"""
The class responsible for managing delegated instances. Delegated is `not`
the default driver used in Molecule.
Under this driver, it is the developers responsibility to implement the
create and destroy playbooks. ``Managed`` is the default behaviour of all
drivers.
.. code-block:: yaml
driver:
name: delegated
However, the developer must adhere to the instance-config API. The
developer's create playbook must provide the following instance-config
data, and the developer's destroy playbook must reset the instance-config.
.. code-block:: yaml
- address: ssh_endpoint
identity_file: ssh_identity_file
instance: instance_name
port: ssh_port_as_string
user: ssh_user
- address: winrm_endpoint
instance: instance_name
port: winrm_port
user: winrm_user
connection: 'winrm'
This article covers how to configure and use WinRM with Ansible:
https://docs.ansible.com/ansible/latest/user_guide/windows_winrm.html
Molecule can also skip the provisioning/deprovisioning steps. It is the
developers responsibility to manage the instances, and properly configure
Molecule to connect to said instances.
.. code-block:: yaml
driver:
name: delegated
options:
managed: False
login_cmd_template: 'docker exec -ti {instance} bash'
ansible_connection_options:
ansible_connection: docker
platforms:
- name: instance-docker
.. code-block:: bash
$ docker run \\
-d \\
--name instance-docker \\
--hostname instance-docker \\
-it molecule_local/ubuntu:latest sleep infinity & wait
Use Molecule with delegated instances, which are accessible over ssh.
.. important::
It is the developer's responsibility to configure the ssh config file.
.. code-block:: yaml
driver:
name: delegated
options:
managed: False
login_cmd_template: 'ssh {instance} -F /tmp/ssh-config'
ansible_connection_options:
ansible_connection: ssh
ansible_ssh_common_args: '-F /path/to/ssh-config'
platforms:
- name: instance-vagrant
Provide the files Molecule will preserve post ``destroy`` action.
.. code-block:: yaml
driver:
name: delegated
safe_files:
- foo
And in order to use localhost as molecule's target:
.. code-block:: yaml
driver:
name: delegated
options:
managed: False
ansible_connection_options:
ansible_connection: local
"""
def __init__(self, config):
super(Delegated, self).__init__(config)
self._name = 'delegated'
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def login_cmd_template(self):
if self.managed:
connection_options = ' '.join(self.ssh_connection_options)
return ('ssh {{address}} '
'-l {{user}} '
'-p {{port}} '
'-i {{identity_file}} '
'{}').format(connection_options)
return self.options['login_cmd_template']
@property
def default_safe_files(self):
return []
@property
def default_ssh_connection_options(self):
if self.managed:
return self._get_ssh_connection_options()
return []
def login_options(self, instance_name):
if self.managed:
d = {'instance': instance_name}
return util.merge_dicts(d,
self._get_instance_config(instance_name))
return {'instance': instance_name}
def ansible_connection_options(self, instance_name):
if self.managed:
try:
d = self._get_instance_config(instance_name)
conn_dict = {}
conn_dict['ansible_user'] = d.get('user')
conn_dict['ansible_host'] = d.get('address')
conn_dict['ansible_port'] = d.get('port')
conn_dict['ansible_connection'] = d.get('connection', 'smart')
if d.get('identity_file'):
conn_dict['ansible_private_key_file'] = d.get(
'identity_file')
conn_dict['ansible_ssh_common_args'] = ' '.join(
self.ssh_connection_options)
return conn_dict
except StopIteration:
return {}
except IOError:
# Instance has yet to be provisioned , therefore the
# instance_config is not on disk.
return {}
return self.options['ansible_connection_options']
def _created(self):
if self.managed:
return super(Delegated, self)._created()
return 'unknown'
def _get_instance_config(self, instance_name):
instance_config_dict = util.safe_load_file(
self._config.driver.instance_config)
return next(item for item in instance_config_dict
if item['instance'] == instance_name)
def sanity_checks(self):
# Note(decentral1se): Cannot implement driver specifics are unknown
pass
| 32.516746
| 79
| 0.623161
|
88ac93467503e354f5ac2b086117a04a164f76bd
| 3,439
|
py
|
Python
|
test/functional/mempool_persist.py
|
freelancedeveloper025/titaniumcore
|
7a629d0391d78527bad017ce0b8b02c964a26258
|
[
"MIT"
] | null | null | null |
test/functional/mempool_persist.py
|
freelancedeveloper025/titaniumcore
|
7a629d0391d78527bad017ce0b8b02c964a26258
|
[
"MIT"
] | null | null | null |
test/functional/mempool_persist.py
|
freelancedeveloper025/titaniumcore
|
7a629d0391d78527bad017ce0b8b02c964a26258
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool persistence.
By default, bitcoind will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
"""
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MempoolPersistTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Stop-start node0 and node1. Verify that node0 has the transactions in its mempool and node1 does not.")
self.stop_nodes()
self.start_node(0)
self.start_node(1)
# Give ttmd a second to reload the mempool
time.sleep(1)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
# Give ttmd a second to reload the mempool
time.sleep(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
if __name__ == '__main__':
MempoolPersistTest().main()
| 41.433735
| 127
| 0.698168
|
f66e0dc364fab2c111de4e9ab7b7330b95cabff6
| 10,701
|
py
|
Python
|
pyscf/grad/dhf.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | 2
|
2021-06-30T22:33:35.000Z
|
2021-11-22T18:02:36.000Z
|
pyscf/grad/dhf.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | 36
|
2018-08-22T19:44:03.000Z
|
2020-05-09T10:02:36.000Z
|
pyscf/grad/dhf.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | 4
|
2018-02-14T16:28:28.000Z
|
2019-08-12T16:40:30.000Z
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Relativistic Dirac-Hartree-Fock
"""
import time
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import _vhf
from pyscf.grad import rhf as rhf_grad
def grad_elec(mf_grad, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
mf = mf_grad.base
mol = mf_grad.mol
if mo_energy is None: mo_energy = mf.mo_energy
if mo_occ is None: mo_occ = mf.mo_occ
if mo_coeff is None: mo_coeff = mf.mo_coeff
log = logger.Logger(mf_grad.stdout, mf_grad.verbose)
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
dm0 = mf.make_rdm1(mf.mo_coeff, mf.mo_occ)
n2c = dm0.shape[0] // 2
t0 = (time.clock(), time.time())
log.debug('Compute Gradients of NR Hartree-Fock Coulomb repulsion')
vhf = mf_grad.get_veff(mol, dm0)
log.timer('gradients of 2e part', *t0)
dme0 = mf_grad.make_rdm1e(mf.mo_energy, mf.mo_coeff, mf.mo_occ)
if atmlst is None:
atmlst = range(mol.natm)
aoslices = mol.aoslice_2c_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
h1ao = hcore_deriv(ia)
de[k] += numpy.einsum('xij,ji->x', h1ao, dm0).real
# large components
de[k] +=(numpy.einsum('xij,ji->x', vhf[:,p0:p1], dm0[:,p0:p1])
+ numpy.einsum('xji,ji->x', vhf[:,p0:p1].conj(), dm0[p0:p1])).real
de[k] -=(numpy.einsum('xij,ji->x', s1[:,p0:p1], dme0[:,p0:p1])
+ numpy.einsum('xji,ji->x', s1[:,p0:p1].conj(), dme0[p0:p1])).real
# small components
p0 += n2c
p1 += n2c
de[k] +=(numpy.einsum('xij,ji->x', vhf[:,p0:p1], dm0[:,p0:p1])
+ numpy.einsum('xji,ji->x', vhf[:,p0:p1].conj(), dm0[p0:p1])).real
de[k] -=(numpy.einsum('xij,ji->x', s1[:,p0:p1], dme0[:,p0:p1])
+ numpy.einsum('xji,ji->x', s1[:,p0:p1].conj(), dme0[p0:p1])).real
log.debug('gradients of electronic part')
log.debug(str(de))
return de
grad_nuc = rhf_grad.grad_nuc
def get_hcore(mol):
n2c = mol.nao_2c()
n4c = n2c * 2
c = lib.param.LIGHT_SPEED
t = mol.intor('int1e_ipkin_spinor', comp=3)
vn = mol.intor('int1e_ipnuc_spinor', comp=3)
wn = mol.intor('int1e_ipspnucsp_spinor', comp=3)
h1e = numpy.zeros((3,n4c,n4c), numpy.complex)
h1e[:,:n2c,:n2c] = vn
h1e[:,n2c:,:n2c] = t
h1e[:,:n2c,n2c:] = t
h1e[:,n2c:,n2c:] = wn * (.25/c**2) - t
return -h1e
def get_ovlp(mol):
n2c = mol.nao_2c()
n4c = n2c * 2
c = lib.param.LIGHT_SPEED
s = mol.intor('int1e_ipovlp_spinor', comp=3)
t = mol.intor('int1e_ipkin_spinor', comp=3)
s1e = numpy.zeros((3,n4c,n4c), numpy.complex)
s1e[:,:n2c,:n2c] = s
s1e[:,n2c:,n2c:] = t * (.5/c**2)
return -s1e
make_rdm1e = rhf_grad.make_rdm1e
def get_coulomb_hf(mol, dm, level='SSSS'):
'''Dirac-Hartree-Fock Coulomb repulsion'''
if level.upper() == 'LLLL':
logger.info(mol, 'Compute Gradients: (LL|LL)')
vj, vk = _call_vhf1_llll(mol, dm)
#L2SL the response of the large and small components on the large component density
#LS2L the response of the large component on the L+S density
#NOSS just exclude SSSS
#TODO elif level.upper() == 'LS2L':
#TODO logger.info(mol, 'Compute Gradients: (LL|LL) + (SS|dLL)')
#TODO vj, vk = scf.hf.get_vj_vk(pycint.rkb_vhf_coul_grad_ls2l_o1, mol, dm)
#TODO elif level.upper() == 'L2SL':
#TODO logger.info(mol, 'Compute Gradients: (LL|LL) + (dSS|LL)')
#TODO vj, vk = scf.hf.get_vj_vk(pycint.rkb_vhf_coul_grad_l2sl_o1, mol, dm)
#TODO elif level.upper() == 'NOSS':
#TODO logger.info(mol, 'Compute Gradients: (LL|LL) + (dSS|LL) + (SS|dLL)')
#TODO vj, vk = scf.hf.get_vj_vk(pycint.rkb_vhf_coul_grad_xss_o1, mol, dm)
else:
logger.info(mol, 'Compute Gradients: (LL|LL) + (SS|LL) + (SS|SS)')
vj, vk = _call_vhf1(mol, dm)
return -(vj - vk)
get_veff = get_coulomb_hf
class GradientsBasics(rhf_grad.GradientsBasics):
'''
Basic nuclear gradient functions for 4C relativistic methods
'''
def get_hcore(self, mol=None):
if mol is None: mol = self.mol
return get_hcore(mol)
def hcore_generator(self, mol):
aoslices = mol.aoslice_2c_by_atom()
h1 = self.get_hcore(mol)
n2c = mol.nao_2c()
n4c = n2c * 2
c = lib.param.LIGHT_SPEED
def hcore_deriv(atm_id):
shl0, shl1, p0, p1 = aoslices[atm_id]
with mol.with_rinv_at_nucleus(atm_id):
z = -mol.atom_charge(atm_id)
vn = z * mol.intor('int1e_iprinv_spinor', comp=3)
wn = z * mol.intor('int1e_ipsprinvsp_spinor', comp=3)
v = numpy.zeros((3,n4c,n4c), numpy.complex)
v[:,:n2c,:n2c] = vn
v[:,n2c:,n2c:] = wn * (.25/c**2)
v[:,p0:p1] += h1[:,p0:p1]
v[:,n2c+p0:n2c+p1] += h1[:,n2c+p0:n2c+p1]
return v + v.conj().transpose(0,2,1)
return hcore_deriv
def get_ovlp(self, mol=None):
if mol is None: mol = self.mol
return get_ovlp(mol)
class Gradients(GradientsBasics):
'''Unrestricted Dirac-Hartree-Fock gradients'''
def __init__(self, scf_method):
GradientsBasics.__init__(self, scf_method)
if scf_method.with_ssss:
self.level = 'SSSS'
else:
#self.level = 'NOSS'
#self.level = 'LLLL'
raise NotImplementedError
self._keys = self._keys.union(['level'])
def get_veff(self, mol, dm):
return get_coulomb_hf(mol, dm, level=self.level)
def make_rdm1e(self, mo_energy=None, mo_coeff=None, mo_occ=None):
if mo_energy is None: mo_energy = self.base.mo_energy
if mo_coeff is None: mo_coeff = self.base.mo_coeff
if mo_occ is None: mo_occ = self.base.mo_occ
return make_rdm1e(mo_energy, mo_coeff, mo_occ)
grad_elec = grad_elec
def extra_force(self, atom_id, envs):
'''Hook for extra contributions in analytical gradients.
Contributions like the response of auxiliary basis in density fitting
method, the grid response in DFT numerical integration can be put in
this function.
'''
return 0
def kernel(self, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
cput0 = (time.clock(), time.time())
if mo_energy is None: mo_energy = self.base.mo_energy
if mo_coeff is None: mo_coeff = self.base.mo_coeff
if mo_occ is None: mo_occ = self.base.mo_occ
if atmlst is None:
atmlst = self.atmlst
else:
self.atmlst = atmlst
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.dump_flags()
de = self.grad_elec(mo_energy, mo_coeff, mo_occ, atmlst)
self.de = de + self.grad_nuc(atmlst=atmlst)
if self.mol.symmetry:
self.de = self.symmetrize(self.de, atmlst)
logger.timer(self, 'SCF gradients', *cput0)
self._finalize()
return self.de
as_scanner = rhf_grad.as_scanner
Grad = Gradients
from pyscf import scf
scf.dhf.UHF.Gradients = lib.class_as_method(Gradients)
def _call_vhf1_llll(mol, dm):
n2c = dm.shape[0] // 2
dmll = dm[:n2c,:n2c].copy()
vj = numpy.zeros((3,n2c*2,n2c*2), dtype=numpy.complex)
vk = numpy.zeros((3,n2c*2,n2c*2), dtype=numpy.complex)
vj[:,:n2c,:n2c], vk[:,:n2c,:n2c] = \
_vhf.rdirect_mapdm('int2e_ip1_spinor', 's2kl',
('lk->s1ij', 'jk->s1il'), dmll, 3,
mol._atm, mol._bas, mol._env)
return vj, vk
def _call_vhf1(mol, dm):
c1 = .5 / lib.param.LIGHT_SPEED
n2c = dm.shape[0] // 2
dmll = dm[:n2c,:n2c].copy()
dmls = dm[:n2c,n2c:].copy()
dmsl = dm[n2c:,:n2c].copy()
dmss = dm[n2c:,n2c:].copy()
vj = numpy.zeros((3,n2c*2,n2c*2), dtype=numpy.complex)
vk = numpy.zeros((3,n2c*2,n2c*2), dtype=numpy.complex)
vj[:,:n2c,:n2c], vk[:,:n2c,:n2c] = \
_vhf.rdirect_mapdm('int2e_ip1_spinor', 's2kl',
('lk->s1ij', 'jk->s1il'), dmll, 3,
mol._atm, mol._bas, mol._env)
vj[:,n2c:,n2c:], vk[:,n2c:,n2c:] = \
_vhf.rdirect_mapdm('int2e_ipspsp1spsp2_spinor', 's2kl',
('lk->s1ij', 'jk->s1il'), dmss, 3,
mol._atm, mol._bas, mol._env) * c1**4
vx = _vhf.rdirect_bindm('int2e_ipspsp1_spinor', 's2kl',
('lk->s1ij', 'jk->s1il'), (dmll, dmsl), 3,
mol._atm, mol._bas, mol._env) * c1**2
vj[:,n2c:,n2c:] += vx[0]
vk[:,n2c:,:n2c] += vx[1]
vx = _vhf.rdirect_bindm('int2e_ip1spsp2_spinor', 's2kl',
('lk->s1ij', 'jk->s1il'), (dmss, dmls), 3,
mol._atm, mol._bas, mol._env) * c1**2
vj[:,:n2c,:n2c] += vx[0]
vk[:,:n2c,n2c:] += vx[1]
return vj, vk
if __name__ == "__main__":
from pyscf import gto
from pyscf import scf
from pyscf import lib
with lib.light_speed(30):
h2o = gto.Mole()
h2o.verbose = 0
h2o.output = None#"out_h2o"
h2o.atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
h2o.basis = {"H": '6-31g',
"O": '6-31g',}
h2o.build()
method = scf.dhf.UHF(h2o).run()
g = method.Gradients().kernel()
print(g)
ms = method.as_scanner()
h2o.set_geom_([["O" , (0. , 0. ,-0.001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]], unit='Ang')
e1 = ms(h2o)
h2o.set_geom_([["O" , (0. , 0. , 0.001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]], unit='Ang')
e2 = ms(h2o)
print(g[0,2], (e2-e1)/0.002*lib.param.BOHR)
| 36.274576
| 83
| 0.572563
|
7c0a50e081ff113d491e1d50233bcf6cb8cde7c4
| 1,659
|
py
|
Python
|
datafactory/models/dict.py
|
righ/datafactory
|
8299df4e29472381ccfe91535fdecf8e97a46d32
|
[
"Apache-2.0"
] | 2
|
2015-07-09T08:49:32.000Z
|
2015-09-04T13:43:40.000Z
|
datafactory/models/dict.py
|
righ/datafactory
|
8299df4e29472381ccfe91535fdecf8e97a46d32
|
[
"Apache-2.0"
] | 1
|
2020-06-06T13:12:39.000Z
|
2020-06-06T13:12:39.000Z
|
datafactory/models/dict.py
|
righ/datafactory
|
8299df4e29472381ccfe91535fdecf8e97a46d32
|
[
"Apache-2.0"
] | 1
|
2020-11-06T08:11:51.000Z
|
2020-11-06T08:11:51.000Z
|
# coding: utf-8
from .base import BaseModel
from ..api import render, special
class DictModel(BaseModel, dict):
"""dict structure that is repeatedly drawn."""
__type = dict
def __init__(self, params, callback=None):
"""define dict-structure model.
:param dict params: elements.
:return: DictModel instance.
"""
self.update(params)
self._length = len(self)
self._order = self.keys()
self._callback = callback
def ordering(self, *order):
"""order in which list-index to generate the elements.
:param immutable-obj order: order of subscripts. (variable-length argument)
:return: self(model).
"""
self._order = list(order)
for subscript in self.keys():
if subscript not in self._order:
self._order.append(subscript)
return self
def __call__(self, key, index):
"""rendering a dict-structure record.
:param int index: index of iterable-object to render container object.
:param immutable-obj key: element of iterable-object to render container object.
:return: rendered record.
"""
record = self.__type()
for subscript in self._order:
blueprint = self[subscript]
args = [record, key, index]
value = render.apply(blueprint, args)
subscript = render.apply(subscript, args)
if not (value is special.BLANK or subscript is special.BLANK):
record[subscript] = value
if self._callback:
record = self._callback(record)
return record
| 30.163636
| 88
| 0.605184
|
43e19b2f12dff4e955c6e36e2051b994e821daaf
| 132
|
py
|
Python
|
tests/BaseTest.py
|
jsutch/webhook_as_a_service_demo
|
8c9d9f00b0dab04e0d2819d89007cf18fa8bd57f
|
[
"BSD-2-Clause"
] | null | null | null |
tests/BaseTest.py
|
jsutch/webhook_as_a_service_demo
|
8c9d9f00b0dab04e0d2819d89007cf18fa8bd57f
|
[
"BSD-2-Clause"
] | null | null | null |
tests/BaseTest.py
|
jsutch/webhook_as_a_service_demo
|
8c9d9f00b0dab04e0d2819d89007cf18fa8bd57f
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
from app import app
class BaseTest(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
| 14.666667
| 36
| 0.69697
|
dcfa611fc599f557d064ff8843fcd33d39388295
| 19,243
|
py
|
Python
|
django/analysis/tracer.py
|
kantai/passe-framework-prototype
|
51a441b689c54cfd57748129f77fde3c7a08e5c3
|
[
"BSD-3-Clause"
] | 3
|
2016-07-06T16:34:38.000Z
|
2021-02-10T08:06:23.000Z
|
django/analysis/tracer.py
|
kantai/passe-framework-prototype
|
51a441b689c54cfd57748129f77fde3c7a08e5c3
|
[
"BSD-3-Clause"
] | null | null | null |
django/analysis/tracer.py
|
kantai/passe-framework-prototype
|
51a441b689c54cfd57748129f77fde3c7a08e5c3
|
[
"BSD-3-Clause"
] | 2
|
2020-06-22T20:55:48.000Z
|
2021-01-15T18:01:04.000Z
|
import sys, os, gc, inspect, dis, opcode
from django.htoken import get_token
from django.analysis import persisted #, taintmode
import django.analysis.persisted as persisted
from django.analysis.assertion import *
_caller_cache = dict()
NULL_STATE = 1
BEFORE_VIEW_STATE = 2
IN_VIEW_STATE = 3
ANALYSIS_RUNNING = False
analysis = None
# Analysis needs to pick up the following stuff...
# 1. SQL Statements
# * should be easy enough to grab - watch for my db-proxy hook point
# 2. Current View
# * watch at the "handler" insertion point
# * will also need to construct a list of all views
# 3. Inferrable Assertions
# * use of the current session user - how to get this?
# + uses of foreign key constraint?
# + uses of session.userid
#
# And output a mapping from view to possible SQL queries
# with each SQL query having an attached assertion
# Note, this file is a mess of unorganized functions, globals, and strange
# calling structures. This should probably be fixed but, knowing you, it won't be.
def pause_sql_analysis():
if analysis:
analysis.sql_paused = True
def resume_sql_analysis():
if analysis:
analysis.sql_paused = False
def is_analysis_paused():
if analysis:
return analysis.sql_paused
def is_call_SQL(fmf):
"""
Returns a SQL descriptor if it is a SQL call.
Otherwise returns False
"""
if fmf[2] != "mark_sql_call": # TODO: add, ummm, a check for module too? kthnx.
return False
def make_perm_check(perm):
if analysis and (not analysis.sql_paused):
analysis.add_perm(perm)
try:
from __pypy__ import taint as pypyt
except ImportError:
class FooMoo:
def add_taint(self, x, y):
return None
def get_taint(self, x):
return set()
def get_control_taint(self):
return set()
pypyt = FooMoo()
def set_taint(variable, taint):
if variable is None:
return # don't taint None!!!
pypyt.add_taint(variable, taint)
#assert is_tainted(variable)
return variable
def is_tainted(variable):
return len(pypyt.get_taint(variable)) > 0
def get_taint(variable):
return set(pypyt.get_taint(variable))
def get_cf_taint():
return set(pypyt.get_control_taint())
def taint(variable):
if isinstance(variable, tuple):
return tuple(( taint(v) for v in variable ))
if analysis:
analysis.taint_count += 1
taint_m = analysis.taint_count
tainted_v = set_taint(variable, taint_m)
# tainted_v = taintmode.taint(variable, taint_m)
# if not taintmode.tainted(tainted_v) and \
# not (isinstance(variable, bool) or variable == None):
# print "fuh??? %s %s" % (tainted_v, type(tainted_v))
return tainted_v
else:
return variable
def is_analysis_running():
return ANALYSIS_RUNNING
def set_analysis_running(val):
global ANALYSIS_RUNNING
ANALYSIS_RUNNING = val
def in_view():
if analysis:
return True
return False
def mark_sql_call(q,a):
if analysis:
return analysis.mark_sql_call(q,a)
def set_user_id(v):
if analysis:
return analysis.set_user_id(v)
def analysis_view_start(f, a, kw, res_position, alias):
if analysis:
return analysis.analysis_view_start(f, a, kw, res_position, alias)
def analysis_view_refer(res_position, view_name,
referer, referer_position, referer_name):
analysis.add_referer(referer_position)
print "AJAX-REF %s, %s" % (referer, referer_name)
def analysis_view_stop():
analysis.analysis_view_stop()
def add_view_magic(magic):
if analysis and not is_analysis_paused():
analysis.add_view_magic(magic)
def is_view_start(fmf):
"Returns whether it is a view start!"
if fmf[2] != "analysis_view_start":
return False
else:
return True
pass
def is_view_done(fmf):
return fmf[2] == "analysis_view_stop"
def key_search_function_default(value, arg):
return arg == value
def ksf_cast(value, arg):
"""
Checks to see if the value is a simple cast of the arg
and vice-versa
"""
# untaint will simplify the casting... not in pypyt!
v = value
a = arg
a_type = type(a)
v_type = type(v)
if v_type == a_type:
return v == a
try:
casted_v = a_type(v)
if casted_v == a:
return True
except TypeError:
pass
except ValueError:
pass
try:
casted_a = v_type(a)
if casted_a == v:
return True
except TypeError:
pass
except ValueError:
pass
return False
def ksf_taints(value, arg):
"""
value is the token value.
arg is the actual SQL argument
"""
if isinstance(value, list):
rval = any([ksf_taints(potent, arg) for potent in value])
return rval
if is_tainted(arg) and is_tainted(value):
val = (len(set.intersection(get_taint(arg), get_taint(value))) > 0)
if val and key_search_function_default(value, arg):
return True
if val and ksf_cast(value, arg):
return True
return False
def ksf_magics(value, arg, query = None, count = None):
if isinstance(value, list):
rval = any([ksf_magics(potent, arg, query, count) for potent in value])
return rval
if key_search_function_default(value,arg):
if isinstance(value, int):
if (value > 10000): # this is my poor man's check for my magic numbers
print "!magic: %s" % (value)
return True
return False
class Analysis:
def __init__(self):
self.tracing_state = NULL_STATE
self.current_user_id = None
self.sql_paused = False
self.taint_count = 0
self.current_view_descriptor = None
self.all_views = {}
self.current_view_magic = []
def add_referer(self, ref_pos):
if self.current_view_descriptor == None:
return
refid = persisted.resolver_position_to_id(ref_pos)
self.current_view_descriptor.referers[refid] = True
def collect_control_flow_taint(self, assertion):
"""
Adds some control-flow inferences to the assertion list "assertion".
"""
z = [] # z will hold the keys and values which have affected control-flow
for taint_value in get_cf_taint():
token = get_token()
for token_key, token_value in token.dict.items():
if isinstance(token_value, list) or isinstance(token_value, tuple):
for item in token_value:
if taint_value in get_taint(item):
z.append((token_key, item))
else:
if taint_value in get_taint(token_value):
z.append ((token_key, token_value ))
# Now that we've collected all the (key, values) which have affected c-f,
# we can check for equality constraints between the values, and otherwise
# bail out to constants. (same process as in the query args)
matches = []
for ix, (token_key, cf_value) in enumerate(z):
if ix + 1 == len(z):
continue
matches.extend([ControlFlowAssert(token_key, a[0]) for a in z[ix+1:] if a[1] == cf_value])
assertion.extend(matches)
# TODO: bailing out to constants. differentiation of constants from key asserts in CFAs.
def mark_sql_call(self, query, args):
if self.current_view_descriptor == None or self.sql_paused:
return
assertion = []
token = get_token()
if len(self.current_view_perms) > 0:
a = PermissionAssert( list(self.current_view_perms),
perm_validity_check(token.dict, self.current_view_perms) )
assertion.append(a)
self.collect_control_flow_taint(assertion)
for counter,arg in enumerate(args):
if arg == self.current_user_id:
assertion.append(ActiveUserAssert(counter))
else:
if isinstance(arg, list):
token_ix = []
for a in arg:
token_ix += [key for key,token_value in token.dict.items() if ksf_taints(token_value, a)]
else:
token_ix = [key for key,token_value in token.dict.items() if ksf_taints(token_value, arg)]
if len(token_ix) < 1:
if isinstance(arg, list):
token_ix = []
for a in arg:
token_ix += [key for key,value in token.dict.items()
if ksf_magics(value, a, query, key)]
else:
token_ix = [key for key,value in token.dict.items()
if ksf_magics(value, arg, query, key)]
if len(token_ix) > 0:
res = token_ix[0] # this could result in an over-constraint, but it hasn't so far in testing.
if len(token_ix) > 1:
# prioritize SQL results...
sql_ixs = [key for key in token_ix if key.startswith('sql')]
if len(sql_ixs) > 0:
res = sql_ixs[0]
assertion.append(TokenAssert(counter, res))
# elif arg in self.current_view_magic:
# assertion.append(NullAssert(counter))
else:
# if isinstance(arg, int) and arg > 10000:
# print "Suspicious magic number : %s " % arg
assertion.append(NullAssert(counter))
# assertion.append(ConstantAssert(counter,
# arg))
self.current_view_descriptor.add_sql(query, assertion)
def set_user_id(self, value):
self.current_user_id = value
def analysis_view_stop(self):
self.current_view_magic = set()
self.current_view_descriptor = None
def add_view_magic(self, magic):
self.current_view_magic.add(magic)
def add_perm(self, perm):
self.current_view_perms.add(perm)
def analysis_view_start(self, view_func, args, kwargs, res_position, alias):
self.current_view_magic = set()
self.current_view_perms = set()
if res_position in self.all_views:
self.current_view_descriptor = self.all_views[res_position]
else:
self.current_view_descriptor = ViewDescriptor(res_position)
self.current_view_descriptor.alias = alias
self.all_views[res_position] = self.current_view_descriptor
def merge_assertions(assertion_list):
"""
assertion_list is a list containing an entry for every
''use'' of a database query.
Each entry is composed of inferred constraints for that particular usage
of the database query.
the output is a list of argument labels and other query constraints.
"""
if len(assertion_list) == 0:
return assertion_list
top_list = assertion_list[0]
out_list = []
# first we deal with "positional" labels
for x in top_list:
if not hasattr(x, "position"):
continue
pos = x.position
possibles = []
for l in assertion_list:
y = [z for z in l if hasattr(z, "position") and z.position == pos]
if len(y) > 0:
possibles.append(y[0])
# now that we have the set of possibilities, what do we do? ahhghg.
is_good = True
for p in possibles:
is_good = (p == x)
if not is_good:
break
if is_good:
if isinstance(x, NullAssert):
continue
out_list.append(x)
# now, more specific, check if they are all TokenAsserts...
elif all([isinstance(p, TokenAssert) for p in possibles]):
ix_lst = list(set([ p.index for p in possibles]))
out_list.append(TokenAssert(pos, ix_lst))
elif any([isinstance(p, TokenAssert) for p in possibles]):
pass
# print "\n".join(["%s" % j for j in possibles])
# now we deal with non-positional labels (i.e., Permissions, CF)
cf_lists = \
[ [z for z in l if isinstance(z, ControlFlowAssert) or isinstance(z, PermissionAssert)]
for l in assertion_list ]
if len(cf_lists) > 0:
top = cf_lists[0]
survivors = [ assertion for assertion in top if
all([assertion in l for l in cf_lists]) ]
for assertion in survivors:
assertion.position = -2 # just to make sorting easier.
out_list.append(assertion)
return sorted(out_list, key = lambda x : x.position)
admin_default1 = """SELECT "django_content_type"."id", "django_content_type"."name", "django_content_type"."app_label", "django_content_type"."model" FROM "django_content_type" WHERE "django_content_type"."id" = ?"""
admin_default2 = """SELECT "django_content_type"."id", "django_content_type"."name", "django_content_type"."app_label", "django_content_type"."model" FROM "django_content_type" WHERE ("django_content_type"."app_label" = ? AND "django_content_type"."model" = ? )"""
capture_contenttype = True
class ViewDescriptor:
def __init__(self,resolver_position):
self.resolver_postion = resolver_position
self.alias = None
self.sql_possibilities = {} # sql string -> assertion << list(atoms) >>
self.referers = {}
def add_sql(self,sql, assertion):
if sql in self.sql_possibilities:
self.sql_possibilities[sql].append(assertion)
else:
self.sql_possibilities[sql] = [ assertion ]
def get_merged_queries(self):
rval = {sql : merge_assertions(_assertions)
for sql, _assertions in self.sql_possibilities.items()}
if capture_contenttype:
if not (admin_default1 in rval):
rval[admin_default1] = []
if not (admin_default2 in rval):
rval[admin_default2] = []
return rval
def __str__(self, sql):
return self.sql_possibilities
def make_profile(self):
print "/tmp/hachi_view_%d.sh" % self.id
print "{"
# 1 -- requires a connection to the socket for communication with zeh router.
print "/tmp/sock_handler_%d rw" % self.id
# 2 -- requires a connection to the db-proxy
print "/tmp/sock_db_%d rw" % self.id
# What else will it need?
# Read permissions to the python libraries? read permissions to the application space?
print "/usr/bin/python rix" # execute python INSIDE of the current APP-ARMOR profile.
print "}"
def modname(path):
"""Return a plausible module name for the patch."""
for p in sys.path:
if path.startswith(p):
base = path[len(p):]
if base.startswith("/"):
base = base[1:]
name, ext = os.path.splitext(base)
return name.replace("/",".")
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
# unapologetically ripped off from cpython tracer.py
def file_module_function_of(code):
filename = code.co_filename
if filename:
modulename = modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in _caller_cache:
if _caller_cache[code] is not None:
clsname = _caller_cache[code]
else:
_caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
_caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def view_start_tracer(frame, event, arg):
assert analysis.tracing_state == BEFORE_VIEW_STATE
if event == "return":
analysis.tracing_state = IN_VIEW_STATE
def quar_tracer(frame, event, arg):
if event == "call":
call_to = file_module_function_of(frame.f_code)
call_from = file_module_function_of(frame.f_back.f_code)
if analysis.tracing_state == NULL_STATE:
if is_view_start(call_to):
analysis.tracing_state = BEFORE_VIEW_STATE
return view_start_tracer
else:
return None
elif analysis.tracing_state == IN_VIEW_STATE:
if is_view_done(call_to):
analysis.tracing_state = NULL_STATE
return None
return None
def start_tracer(callback, args, kwargs):
import threading
global analysis
analysis = Analysis()
try:
sys.settrace(quar_tracer)
threading.settrace(quar_tracer)
try:
return callback(*args, **kwargs)
finally:
sys.settrace(None)
results = persisted.HachiAnalysis()
view_aliases = {}
referers = {}
for view_resolver_position,VD in analysis.all_views.items():
#view_name_module = file_module_function_of(view_func.func_code)[1:]
#print view_name_module
view_id = persisted.resolver_position_to_id(view_resolver_position)
view_aliases[view_id] = VD.alias
view_asserts = VD.get_merged_queries()
results.view_ids.append(view_id)
results.assertions[view_id] = view_asserts
referers[view_id] = VD.referers
persisted.write_hachi_tables(results, view_aliases)
persisted.write_hachi_referers(referers)
persisted.write_sys_path()
persisted.create_spawn_script(results)
except IOError, err:
sys.settrace(None)
threading.settrace(None)
print ("Cannot run file %r because: %s" % (sys.argv[0], err))
sys.exit(-1)
except SystemExit:
pass
| 37.077071
| 265
| 0.601777
|
9bf6f98ce3066955f5491742cf5f5215544b0cdc
| 815
|
py
|
Python
|
tensorflow/tools/docker/jupyter_notebook_config.py
|
jylinman/tensorflow
|
5248d111c3aeaf9f560cd77bff0f183f38e31e0b
|
[
"Apache-2.0"
] | 23
|
2016-02-04T21:08:43.000Z
|
2022-01-14T13:22:33.000Z
|
tensorflow/tools/docker/jupyter_notebook_config.py
|
jylinman/tensorflow
|
5248d111c3aeaf9f560cd77bff0f183f38e31e0b
|
[
"Apache-2.0"
] | 2
|
2016-05-31T16:38:55.000Z
|
2018-12-30T20:17:05.000Z
|
tensorflow/tools/docker/jupyter_notebook_config.py
|
jylinman/tensorflow
|
5248d111c3aeaf9f560cd77bff0f183f38e31e0b
|
[
"Apache-2.0"
] | 39
|
2016-03-25T05:13:09.000Z
|
2020-06-16T01:30:53.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
c.NotebookApp.ip = '*'
c.NotebookApp.port = 8888
c.NotebookApp.open_browser = False
c.MultiKernelManager.default_kernel_name = 'python2'
| 40.75
| 80
| 0.690798
|
92765ff94970c0ac85172eb056058d9f0bc34036
| 1,484
|
py
|
Python
|
var/spack/repos/builtin/packages/activeharmony/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/activeharmony/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/activeharmony/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Activeharmony(MakefilePackage):
"""Active Harmony: a framework for auto-tuning (the automated search for
values to improve the performance of a target application)."""
homepage = "https://www.dyninst.org/harmony"
url = "https://www.dyninst.org/sites/default/files/downloads/harmony/ah-4.5.tar.gz"
version('4.6.0', sha256='9ce5009cfd8e2f4cf5f3536e1fea9993414fc25920fc90d0a2cb56f044787dbb')
version('4.5', sha256='31d9990c8dd36724d336707d260aa4d976e11eaa899c4c7cc11f80a56cdac684')
patch('fix_logical_bug_in_slave_list_parsing.patch', sha256='3e000616f84de80b262efcae7559d65eed0efcd53e915580dab63b0ffbbb8bf2', when='@4.6.0')
cflags = ['-O3', '-fPIC']
def setup_build_environment(self, spack_env):
spack_env.set('CFLAGS', ' '.join(self.cflags))
@when('@:4.5')
def install(self, spec, prefix):
make("install", 'PREFIX=%s' % prefix)
@when('@4.6.0:')
def install(self, spec, prefix):
make("install")
install_tree("./bin", prefix.bin)
install("./src/harmony.cfg", prefix.bin)
install_tree("./lib", prefix.lib)
install_tree("./libexec", prefix.libexec)
install_tree("./include", prefix.include)
install_tree("./doc", prefix.doc)
| 38.051282
| 146
| 0.694744
|
43951332d3637b548093958124735a45cb0edbc4
| 9,573
|
py
|
Python
|
src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py
|
Tobi-Alonso/finn
|
ea73d873e66414590f196dc71c398ba345301c24
|
[
"BSD-3-Clause"
] | null | null | null |
src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py
|
Tobi-Alonso/finn
|
ea73d873e66414590f196dc71c398ba345301c24
|
[
"BSD-3-Clause"
] | null | null | null |
src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py
|
Tobi-Alonso/finn
|
ea73d873e66414590f196dc71c398ba345301c24
|
[
"BSD-3-Clause"
] | 1
|
2020-05-14T13:50:40.000Z
|
2020-05-14T13:50:40.000Z
|
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from finn.custom_op.fpgadataflow import HLSCustomOp
class StreamingMaxPool_Batch(HLSCustomOp):
"""Class that corresponds to finn-hlslib StreamingMaxPool_batch function."""
def get_nodeattr_types(self):
my_attrs = {
"ImgDim": ("i", True, 0),
"PoolDim": ("i", True, 0),
"NumChannels": ("i", True, 0),
}
my_attrs.update(super().get_nodeattr_types())
return my_attrs
def make_shape_compatible_op(self):
pass
def infer_node_datatype(self, model):
pass
def verify_node(self):
info_messages = []
# verify number of attributes
num_of_attr = 6
if len(self.onnx_node.attribute) == num_of_attr:
info_messages.append("The number of attributes is correct")
else:
info_messages.append(
"""The number of attributes is incorrect,
{} should have {} attributes""".format(
self.onnx_node.op_type, num_of_attr
)
)
# verify that "domain" is set to "finn"
domain_value = self.onnx_node.domain
if domain_value == "finn":
info_messages.append("Attribute domain is set correctly")
else:
info_messages.append('Attribute domain should be set to "finn"')
# verify that "backend" is set to "fpgadataflow"
backend_value = self.get_nodeattr("backend")
if backend_value == "fpgadataflow":
info_messages.append("Attribute backend is set correctly")
else:
info_messages.append('Attribute backend should be set to "fpgadataflow"')
# verify that all necessary attributes exist
try:
self.get_nodeattr("code_gen_dir_npysim")
self.get_nodeattr("executable_path")
self.get_nodeattr("ImgDim")
self.get_nodeattr("PoolDim")
self.get_nodeattr("NumChannels")
info_messages.append("All necessary attributes exist")
except Exception:
info_messages.append(
"""The necessary attributes do not exist.
StreamingMaxPool_Batch needs the following attributes:
code_gen_dir_npysim, executable_path, ImgDim, PoolDim, NumChannels"""
)
# verify the number of inputs
if len(self.onnx_node.input) == 1:
info_messages.append("The number of inputs is correct")
else:
info_messages.append("""StreamingMaxPool_Batch needs 1 data input""")
return info_messages
def get_number_output_values(self):
pass
def bram_estimation(self):
pass
def lut_estimation(self):
pass
def global_includes(self):
self.code_gen_dict["$GLOBALS$"] = ['#include "maxpool.h"']
def defines(self, var):
numReps = 2
self.code_gen_dict["$DEFINES$"] = [
"""#define ImgDim {}\n #define PoolDim {}\n
#define NumChannels {}\n #define numReps {}""".format(
self.get_nodeattr("ImgDim"),
self.get_nodeattr("PoolDim"),
self.get_nodeattr("NumChannels"),
numReps,
)
]
def read_npy_data(self):
node = self.onnx_node
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
# c++ code to read out an npy file
# and put it in hls::stream in the correct order
self.code_gen_dict["$READNPYDATA$"] = []
input_ind = 0
input_file_names = []
for inputs in node.input:
input_file_names.append("{}/input_{}.npy".format(code_gen_dir, input_ind))
input_ind += 1
input_ind = 0
for input_file in input_file_names:
self.code_gen_dict["$READNPYDATA$"].append(
"""cnpy::NpyArray arr = cnpy::npy_load("{}");\n
float* loaded_data{} = arr.data<float>();""".format(
input_file, input_ind
)
)
self.code_gen_dict["$READNPYDATA$"].append(
"""int num_values = 1; \n
for(int i = 0; i < arr.shape.size(); i++){\n
num_values *= arr.shape[i]; \n }"""
)
self.code_gen_dict["$READNPYDATA$"].append(
"ap_uint<{}> dat;".format(self.get_nodeattr("NumChannels"))
)
self.code_gen_dict["$READNPYDATA$"].append(
"for(int i=0; i < num_values/{}; i++){{".format(
self.get_nodeattr("NumChannels")
)
)
for channel in range(self.get_nodeattr("NumChannels")):
self.code_gen_dict["$READNPYDATA$"].append(
"dat.range({},{}) = loaded_data{}[i+((num_values/{})*{})];".format(
channel,
channel,
input_ind,
self.get_nodeattr("NumChannels"),
channel,
)
)
self.code_gen_dict["$READNPYDATA$"].append("in{} << dat;".format(input_ind))
self.code_gen_dict["$READNPYDATA$"].append("}")
input_ind += 1
def strm_decl(self):
node = self.onnx_node
self.code_gen_dict["$STREAMDECLARATIONS$"] = []
input_ind = 0
for inputs in node.input:
self.code_gen_dict["$STREAMDECLARATIONS$"].append(
'hls::stream<ap_uint<{}>> in{} ("in{}");'.format(
self.get_nodeattr("NumChannels"), input_ind, input_ind
)
)
input_ind += 1
self.code_gen_dict["$STREAMDECLARATIONS$"].append(
'hls::stream<ap_uint<{}>> out ("out");'.format(
self.get_nodeattr("NumChannels")
)
)
def docompute(self):
node = self.onnx_node
self.code_gen_dict["$DOCOMPUTE$"] = [
"{}<ImgDim, PoolDim, NumChannels>(in0, out, numReps);".format(node.op_type)
]
def dataoutstrm(self):
self.code_gen_dict["$DATAOUTSTREAM$"] = [
"ap_uint<{}> out_data;\n std::vector<ap_uint<{}>> out_data_vector;".format(
self.get_nodeattr("NumChannels"), self.get_nodeattr("NumChannels")
)
]
self.code_gen_dict["$DATAOUTSTREAM$"].append("while(out.read_nb(out_data)){")
self.code_gen_dict["$DATAOUTSTREAM$"].append(
"out_data_vector.push_back(out_data);\n}"
)
self.code_gen_dict["$DATAOUTSTREAM$"].append(
"std::vector<float> output_data_vector;"
)
self.code_gen_dict["$DATAOUTSTREAM$"].append(
"""for(std::vector<ap_uint<{}>>::iterator it = out_data_vector.begin();
it != out_data_vector.end(); ++it){{""".format(
self.get_nodeattr("NumChannels")
)
)
self.code_gen_dict["$DATAOUTSTREAM$"].append(
"ap_uint<{}> output_data = *it;".format(self.get_nodeattr("NumChannels"))
)
for channel in range(self.get_nodeattr("NumChannels")):
self.code_gen_dict["$DATAOUTSTREAM$"].append(
"output_data_vector.push_back(output_data.range({},{}));".format(
channel, channel
)
)
self.code_gen_dict["$DATAOUTSTREAM$"].append("}")
def save_as_npy(self):
code_gen_dir = self.get_nodeattr("code_gen_dir_npysim")
numReps = 1
self.code_gen_dict["$SAVEASCNPY$"] = [
"""cnpy::npy_save("{}/output.npy",&output_data_vector[0],
{{{},{},{}}},"w");""".format(
code_gen_dir,
numReps,
self.get_nodeattr("NumChannels"),
int(self.get_nodeattr("ImgDim") / self.get_nodeattr("PoolDim")),
int(self.get_nodeattr("ImgDim") / self.get_nodeattr("PoolDim")),
)
]
def blackboxfunction(self):
pass
def pragmas(self):
pass
| 39.073469
| 88
| 0.583307
|
a1327fbb1a2052550191ec5c2c3ab213e48e187d
| 4,248
|
py
|
Python
|
python/src/nnabla/utils/converter/tensorflow/common.py
|
isabella232/nnabla
|
82a3c6fed382f889d1a4a429c696bb8cedf6ce79
|
[
"Apache-2.0"
] | null | null | null |
python/src/nnabla/utils/converter/tensorflow/common.py
|
isabella232/nnabla
|
82a3c6fed382f889d1a4a429c696bb8cedf6ce79
|
[
"Apache-2.0"
] | null | null | null |
python/src/nnabla/utils/converter/tensorflow/common.py
|
isabella232/nnabla
|
82a3c6fed382f889d1a4a429c696bb8cedf6ce79
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from .refine_graph import RefineGraph
from .refine_parser import RefineParser
def _strip_node_name(name):
if name.startswith("^"):
return name[1:]
else:
return name.split(":")[0]
def find_out_terminal_node(graph_def, **kwargs):
def add_postfix(names):
return ["{}:0".format(n) for n in names]
unlike_output_types = ["Const", "Assign", "NoOp", "Placeholder"]
terminal_inputs = []
terminal_outputs = []
input_cnt = collections.Counter()
need_add_postfix = kwargs.get("postfix", False)
for node in graph_def.node:
for input in node.input:
input = _strip_node_name(input)
input_cnt[input] += 1
if node.op == 'Placeholder':
strip_name = _strip_node_name(node.name)
terminal_inputs.append(strip_name)
for node in graph_def.node:
if input_cnt[node.name] == 0 and node.op not in unlike_output_types:
terminal_outputs.append(node.name)
if need_add_postfix:
terminal_inputs = add_postfix(terminal_inputs)
terminal_outputs = add_postfix(terminal_outputs)
return terminal_inputs, terminal_outputs
def check_optimization_criteria(nnp, batch_size):
def find_network(nnp, exe):
net = None
for network in nnp.protobuf.network:
if network.name == exe.network_name:
net = network
return net
def get_input_info(exec_info, network):
input_dict = collections.OrderedDict()
for v in exec_info.data_variable:
input_dict[v.variable_name] = []
for v in network.variable:
if v.name in input_dict:
shape = v.shape.dim
input_dict[v.name] = [
x if x > 0 else batch_size for x in shape]
return input_dict
state = {
'NCHW_TO_NHWC': {
'doc': "Convert the NCHW format to NHWC, and remove the extra nodes",
'status': True
}
}
func_list = ['Convolution', 'Deconvolution', 'MaxPooling', 'AveragePooling',
'SumPooling', 'Unpooling', 'Interpolate', 'RandomErase', 'MaxPoolingBackward']
func_cnt = collections.Counter()
exec_info = nnp.protobuf.executor[0]
network = find_network(nnp, exec_info)
input_dict = get_input_info(exec_info, network)
for k, shape in input_dict.items():
if len(shape) != 4:
state['NCHW_TO_NHWC']['status'] = False
break
for func in network.function:
if func.type in func_list:
func_cnt[func.type] += 1
for inp in func.input:
if inp in input_dict and len(func.ListFields()) > 4 \
and hasattr(func.ListFields()[-1][1], 'base_axis') \
and func.ListFields()[-1][1].base_axis != 1:
state['NCHW_TO_NHWC']['status'] = False
break
if len(func_cnt) == 0:
state['NCHW_TO_NHWC']['status'] = False
return state
class OptimizePb:
def __init__(self, graph_def):
self._graph_def = graph_def
def execute(self):
self._refine_graph = RefineGraph(self._graph_def)
self._refine_parser = RefineParser(self._refine_graph)
self._refine_graph.prepare()
self._refine_parser.parse()
return self
def export_graph_def(self):
return self._refine_graph.export_graph_def()
def export_to_file(self, output_file):
self._refine_graph.save_back(output_file)
def get_optimization_rate(self):
return self._refine_graph.export_optimization_rate()
| 33.714286
| 95
| 0.643832
|
a7b585ffb94e503a601d50fb296e98eb0656575c
| 41
|
py
|
Python
|
src/lib/formatter.py
|
DTenore/skulpt
|
098d20acfb088d6db85535132c324b7ac2f2d212
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
src/lib/formatter.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
src/lib/formatter.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
import _sk_fail; _sk_fail._("formatter")
| 20.5
| 40
| 0.780488
|
c892c14afb903836e7c86e4701e0ff68258c111c
| 100
|
py
|
Python
|
tutorial/snippets/apps.py
|
usadamasa/drf-example
|
e72b8a4d78575c3b291ac1650b10ccd66fbbd046
|
[
"MIT"
] | null | null | null |
tutorial/snippets/apps.py
|
usadamasa/drf-example
|
e72b8a4d78575c3b291ac1650b10ccd66fbbd046
|
[
"MIT"
] | null | null | null |
tutorial/snippets/apps.py
|
usadamasa/drf-example
|
e72b8a4d78575c3b291ac1650b10ccd66fbbd046
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class SnippetsConfig(AppConfig):
name = 'tutorial.snippets'
| 16.666667
| 33
| 0.77
|
fdd7b85942f6283a7c89392e3900c34c57cd515d
| 366
|
py
|
Python
|
lecture_01/hw/task02.py
|
AlekseiAfanasev/epam_python_autumn_2020
|
7e954aca381e258ca8348db1770f391c1e2bce44
|
[
"MIT"
] | null | null | null |
lecture_01/hw/task02.py
|
AlekseiAfanasev/epam_python_autumn_2020
|
7e954aca381e258ca8348db1770f391c1e2bce44
|
[
"MIT"
] | null | null | null |
lecture_01/hw/task02.py
|
AlekseiAfanasev/epam_python_autumn_2020
|
7e954aca381e258ca8348db1770f391c1e2bce44
|
[
"MIT"
] | 1
|
2022-01-02T11:47:29.000Z
|
2022-01-02T11:47:29.000Z
|
"""
Given a cell with "it's a fib sequence" from slideshow,
please write function "check_fib", which accepts a Sequence of integers, and
returns if the given sequence is a Fibonacci sequence
We guarantee, that the given sequence contain >= 0 integers inside.
"""
from collections import Sequence
def check_fibonacci(data: Sequence[int]) -> bool:
...
| 26.142857
| 80
| 0.73224
|
144b383d806812a6b5766a7091783c957c604d26
| 2,840
|
py
|
Python
|
parlai/tasks/ms_marco/build.py
|
ysglh/ParlAI
|
e0f16e9168839be12f72d3431b9819cf3d51fe10
|
[
"BSD-3-Clause"
] | 2
|
2017-10-06T09:56:49.000Z
|
2017-10-06T09:57:03.000Z
|
parlai/tasks/ms_marco/build.py
|
ysglh/ParlAI
|
e0f16e9168839be12f72d3431b9819cf3d51fe10
|
[
"BSD-3-Clause"
] | 1
|
2018-03-08T20:44:39.000Z
|
2018-03-08T23:49:29.000Z
|
parlai/tasks/ms_marco/build.py
|
ysglh/ParlAI
|
e0f16e9168839be12f72d3431b9819cf3d51fe10
|
[
"BSD-3-Clause"
] | 2
|
2017-10-06T09:57:04.000Z
|
2018-11-08T13:45:47.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
import gzip
import json
import os
import parlai.core.build_data as build_data
def read_gz(filename, delete_gz=True):
f = gzip.open(filename, 'rb')
lines = [x.decode('utf-8') for x in f.readlines()]
if delete_gz:
os.remove(filename)
return lines
def create_fb_format(outpath, dtype, inpath):
print('building fbformat:' + dtype)
lines = read_gz(inpath)
# save the raw json version for span selection task (default)
fout1 = open(os.path.join(outpath, dtype + '.txt'), 'w')
for line in lines:
fout1.write(line.rstrip("\n") + "\n")
fout1.close()
# save the file for passage selection task
fout2 = open(os.path.join(outpath, dtype + '.passage.txt'), 'w')
for line in lines:
dic = json.loads(line)
lq = dic["query"]
if dtype != "test":
ans = "|".join([d["passage_text"] for d in dic["passages"] if d["is_selected"] == 1])
cands = "|".join([d["passage_text"] for d in dic["passages"] if d["is_selected"] == 0])
cands = ans + "|" + cands
if ans == "": continue # if no true label, skip for now
else: # ground truth for test data is not available yet
ans = ""
cands = "|".join([d["passage_text"] for d in dic["passages"]])
s = '1 ' + lq + '\t' + ans.lstrip("|") + '\t\t' + cands
fout2.write(s + '\n')
fout2.close()
def build(opt):
dpath = os.path.join(opt['datapath'], 'MS_MARCO')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data
url = "https://msmarco.blob.core.windows.net/msmarco/"
fname = "train_v1.1.json.gz"
build_data.download(url + fname, dpath, 'train.gz')
fname = "dev_v1.1.json.gz"
build_data.download(url + fname, dpath, 'valid.gz')
fname = "test_public_v1.1.json.gz"
build_data.download(url + fname, dpath, 'test.gz')
create_fb_format(dpath, "train", os.path.join(dpath, 'train.gz'))
create_fb_format(dpath, "valid", os.path.join(dpath, 'valid.gz'))
create_fb_format(dpath, "test", os.path.join(dpath, 'test.gz'))
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
| 35.5
| 99
| 0.615493
|
00cfc6b0427acfcfda03652ce139acc35c381fdd
| 43
|
py
|
Python
|
tasks/R2R-judy/src/__init__.py
|
IMNearth/Curriculum-Learning-For-VLN
|
d2fe1286eb295dc8c63a0c886b35883f32481d85
|
[
"MIT"
] | 8
|
2021-11-09T13:29:19.000Z
|
2022-03-30T04:01:42.000Z
|
tasks/R2R-judy/src/__init__.py
|
IMNearth/Curriculum-Learning-For-VLN
|
d2fe1286eb295dc8c63a0c886b35883f32481d85
|
[
"MIT"
] | 1
|
2022-03-17T14:16:44.000Z
|
2022-03-29T03:16:32.000Z
|
tasks/R2R-judy/src/__init__.py
|
IMNearth/Curriculum-Learning-For-VLN
|
d2fe1286eb295dc8c63a0c886b35883f32481d85
|
[
"MIT"
] | null | null | null |
from . import utils, agent, engine, environ
| 43
| 43
| 0.767442
|
8b529fa33b6514d8cb550373e889bada5d128511
| 2,717
|
py
|
Python
|
venv/Lib/site-packages/nipype/interfaces/slicer/quantification/changequantification.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | 585
|
2015-01-12T16:06:47.000Z
|
2022-03-26T14:51:08.000Z
|
nipype/interfaces/slicer/quantification/changequantification.py
|
tamires-consulting/nipype
|
b7879d75a63b6500b2e7d2c3eba5aa7670339274
|
[
"Apache-2.0"
] | 2,329
|
2015-01-01T09:56:41.000Z
|
2022-03-30T14:24:49.000Z
|
nipype/interfaces/slicer/quantification/changequantification.py
|
tamires-consulting/nipype
|
b7879d75a63b6500b2e7d2c3eba5aa7670339274
|
[
"Apache-2.0"
] | 487
|
2015-01-20T01:04:52.000Z
|
2022-03-21T21:22:47.000Z
|
# -*- coding: utf-8 -*-
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import (
CommandLine,
CommandLineInputSpec,
SEMLikeCommandLine,
TraitedSpec,
File,
Directory,
traits,
isdefined,
InputMultiPath,
OutputMultiPath,
)
import os
class IntensityDifferenceMetricInputSpec(CommandLineInputSpec):
sensitivityThreshold = traits.Float(
desc="This parameter should be between 0 and 1, and defines how sensitive the metric should be to the intensity changes.",
argstr="--sensitivityThreshold %f",
)
changingBandSize = traits.Int(
desc="How far (in mm) from the boundary of the segmentation should the intensity changes be considered.",
argstr="--changingBandSize %d",
)
baselineVolume = File(
position=-4, desc="Baseline volume to be compared to", exists=True, argstr="%s"
)
baselineSegmentationVolume = File(
position=-3,
desc="Label volume that contains segmentation of the structure of interest in the baseline volume.",
exists=True,
argstr="%s",
)
followupVolume = File(
position=-2,
desc="Followup volume to be compare to the baseline",
exists=True,
argstr="%s",
)
outputVolume = traits.Either(
traits.Bool,
File(),
position=-1,
hash_files=False,
desc="Output volume to keep the results of change quantification.",
argstr="%s",
)
reportFileName = traits.Either(
traits.Bool,
File(),
hash_files=False,
desc="Report file name",
argstr="--reportFileName %s",
)
class IntensityDifferenceMetricOutputSpec(TraitedSpec):
outputVolume = File(
position=-1,
desc="Output volume to keep the results of change quantification.",
exists=True,
)
reportFileName = File(desc="Report file name", exists=True)
class IntensityDifferenceMetric(SEMLikeCommandLine):
"""title:
Intensity Difference Change Detection (FAST)
category:
Quantification.ChangeQuantification
description:
Quantifies the changes between two spatially aligned images based on the pixel-wise difference of image intensities.
version: 0.1
contributor: Andrey Fedorov
acknowledgements:
"""
input_spec = IntensityDifferenceMetricInputSpec
output_spec = IntensityDifferenceMetricOutputSpec
_cmd = "IntensityDifferenceMetric "
_outputs_filenames = {
"outputVolume": "outputVolume.nii",
"reportFileName": "reportFileName",
}
| 27.444444
| 130
| 0.662127
|
f2cdd9fcb1fdb7f4741692a7075a420e03a53fb0
| 3,108
|
py
|
Python
|
stock-filters/colan_draw_box_lines.py
|
bi3mer/GDMC
|
6c619cbf907d8de17f9bd7b1390849201e977581
|
[
"ISC"
] | null | null | null |
stock-filters/colan_draw_box_lines.py
|
bi3mer/GDMC
|
6c619cbf907d8de17f9bd7b1390849201e977581
|
[
"ISC"
] | null | null | null |
stock-filters/colan_draw_box_lines.py
|
bi3mer/GDMC
|
6c619cbf907d8de17f9bd7b1390849201e977581
|
[
"ISC"
] | null | null | null |
from pymclevel import alphaMaterials, MCSchematic, MCLevel, BoundingBox
from pymclevel.box import Vector
from mcplatform import *
from pprint import pprint
inputs = (
("Replace All", "label"),
("Material", alphaMaterials.CoalBlock),
("Creator: Colan Biemer", "label")
)
def vector_equals(v1, v2):
return v1.x == v2.x and v1.y == v2.y and v1.z == v2.z
def manhattan_distance(start, end):
return abs(end.x - start.x) + abs(end.y - start.y) + abs(end.z - start.z)
def draw_block(level, x, y, z, material):
level.setBlockAt(x, y, z, material.ID)
level.setBlockDataAt(x, y, z, 0)
def draw_block(level, point, material):
level.setBlockAt(point.x, point.y, point.z, material.ID)
level.setBlockDataAt(point.x, point.y, point.z, 0)
def fill_box(level, origin, size, material):
final_x = origin.x + size.x
final_y = origin.y + size.y
final_z = origin.z + size.z
for x in range(min(origin.x, final_x), max(origin.x, final_x)):
for y in range(min(origin.y, final_y), max(origin.y, final_y)):
for z in range(min(origin.z, final_z), max(origin.z, final_z)):
draw_block(level, x, y, z, material)
def draw_line(level, start, end, material):
directions = [(1,0,0),(-1,0,0),(0,1,0),(0,-1,0),(0,0,1),(0,0,-1),\
(1,1,0),(-1,1,0),(1,-1,0),(-1,-1,0),(0,1,1),(0,-1,1),\
(0,1,-1),(0,-1,-1),(1,0,1),(-1,0,1),(1,0,-1),(-1,0,-1),\
(1,1,1),(-1,1,1),(1,-1,1),(1,1,-1),(-1,-1,1),(-1,1,-1),\
(1,-1,-1),(-1,-1,-1)]
draw_block(level, start, material)
while not vector_equals(start, end):
new_s = start + directions[0]
dist = manhattan_distance(start, end)
for i in range(1, len(directions)):
s = start + directions[i]
d = manhattan_distance(s, end)
if d < dist:
new_s = s
dist = d
start = new_s
draw_block(level, start, material)
def draw_box_outline(level, box, material):
point_1 = box.origin
point_2 = Vector(box.origin.x + box.size.x, box.origin.y, box.origin.z)
point_3 = Vector(box.origin.x, box.origin.y + box.size.y, box.origin.z)
point_4 = Vector(box.origin.x, box.origin.y, box.origin.z + box.size.z)
point_5 = Vector(box.origin.x + box.size.x, box.origin.y + box.size.y, box.origin.z)
point_6 = Vector(box.origin.x + box.size.x, box.origin.y, box.origin.z + box.size.z)
point_7 = Vector(box.origin.x, box.origin.y + box.size.y, box.origin.z + box.size.z,)
point_8 = Vector(box.origin.x + box.size.x, box.origin.y + box.size.y, box.origin.z + box.size.z)
draw_line(level, point_1, point_2, material)
draw_line(level, point_1, point_3, material)
draw_line(level, point_1, point_4, material)
draw_line(level, point_2, point_6, material)
draw_line(level, point_4, point_6, material)
draw_line(level, point_3, point_7, material)
draw_line(level, point_4, point_7, material)
draw_line(level, point_7, point_8, material)
draw_line(level, point_6, point_8, material)
draw_line(level, point_8, point_5, material)
draw_line(level, point_5, point_2, material)
draw_line(level, point_5, point_3, material)
def perform(level, box, options):
draw_box_outline(level, box, options["Material"])
| 36.564706
| 98
| 0.665701
|
1663182b6a43b44b1d46291357c2c476fdfb9001
| 477
|
py
|
Python
|
languages/python/design_restricter_class.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 1
|
2021-04-09T04:15:24.000Z
|
2021-04-09T04:15:24.000Z
|
languages/python/design_restricter_class.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | null | null | null |
languages/python/design_restricter_class.py
|
Andilyn/learntosolveit
|
fd15345c74ef543e4e26f4691bf91cb6dac568a4
|
[
"BSD-3-Clause"
] | 1
|
2021-07-31T02:45:29.000Z
|
2021-07-31T02:45:29.000Z
|
class RestrictingWrapper(object):
def __init__(self, obj, to_block):
self._obj = obj
self._to_block = to_block
def __getattr__(self, name):
if name in self._to_block:
raise AttributeError(name)
return getattr(self._obj, name)
class Foo(object):
def __init__(self, x, y, z):
self.x, self.y, self.z = x, y, z
f1 = Foo(1, 2, 3)
print f1.x, f1.y, f1.z
f2 = RestrictingWrapper(f1, "z")
print f2.x, f2.y
print f2.z
| 22.714286
| 40
| 0.612159
|
60d6182d32a4373baae72b9b41a85dfde4a5e081
| 948
|
py
|
Python
|
xwavecal/utils/basic_utils.py
|
gmbrandt/echelle
|
7e6678cd541ccf025fc187eca7f1344efe85f265
|
[
"MIT"
] | null | null | null |
xwavecal/utils/basic_utils.py
|
gmbrandt/echelle
|
7e6678cd541ccf025fc187eca7f1344efe85f265
|
[
"MIT"
] | null | null | null |
xwavecal/utils/basic_utils.py
|
gmbrandt/echelle
|
7e6678cd541ccf025fc187eca7f1344efe85f265
|
[
"MIT"
] | null | null | null |
import numpy as np
def median_subtract_channels_y(data, num_channels):
"""
:param data: array_like
Input array. Must be 2D
:param num_channels: The number of readout channels along axis=0 (rows).
:return: ndarray
Input array with each horizontal slice of size data.shape[0]/num_channels
subtracted by its median.
Examples
--------
>>> import numpy as np
>>> a = (np.arange(3) * np.ones((3, 3))).T
>>> print(a)
array([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.]])
>>> print(median_subtract_channels_y(a, 3))
array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
"""
reshaped = data.reshape(num_channels, data.shape[1], -1)
medians = np.array([np.median(readout_channel) for readout_channel in reshaped])
return np.subtract(reshaped, medians.reshape(num_channels, 1, 1)).reshape(data.shape)
| 32.689655
| 89
| 0.562236
|
e82e997d0e8d9f45fc3b878ca9c2a19e558a16ed
| 5,157
|
py
|
Python
|
openGaussBase/testcase/SQL/DML/copy/Opengauss_Function_DML_Copy_Case0047.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SQL/DML/copy/Opengauss_Function_DML_Copy_Case0047.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SQL/DML/copy/Opengauss_Function_DML_Copy_Case0047.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 拷贝数据
Case Name : /copy from 对非法字符无容错能力
Description :
1.创建测试表并插入数据
DROP TABLE IF EXISTS TESTZL;
CREATE TABLE TESTZL(
SK INTEGER,ID CHAR(16),NAME VARCHAR(20),SQ_FT INTEGER);
INSERT INTO TESTZL VALUES (001,'SK1','TT',3332);
INSERT INTO TESTZL VALUES (001,'SK1','TT',3332);
INSERT INTO TESTZL VALUES (001,'SK1','TT',3332);
2.构造步骤1表的数据文件,使用copy to把表数据拷贝到文件
touch /opt/openGauss/cluster/dn1/testzl.dat;
COPY TESTZL TO '/opt/openGauss/cluster/dn1/testzl.dat';
3.将步骤2文件部分数据修改为非法字符\0
sed -i 's/SK1/\\0/g' "/opt/openGauss/cluster/dn1/testzl.dat";
4.使用\copy from将步骤3修改后的数据文件导入到表中
\COPY TESTZL FROM '/opt/openGauss/cluster/dn1/pg_copydir/testzl.dat';
5.清理环境
DROP TABLE IF EXISTS TESTZL;
rm -rf /opt/openGauss/cluster/dn1/testzl.dat
Expect :
1.成功
2.构造成功
3.修改成功
4.拷贝失败
invalid byte sequence for encoding "SQL_ASCII"
5.清理环境成功
History :
"""
import os
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
class CopyFile(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info('----Opengauss_Function_DML_Copy_Case0047开始执行----')
self.pri_node = Node('PrimaryDbUser')
self.pri_sh = CommonSH('PrimaryDbUser')
self.constant = Constant()
self.invalid_char = r'\\0'
self.t_name = 't_dml_copy_case0047'
self.copy_path = os.path.join(macro.DB_INSTANCE_PATH,
'dir_dml_copy_case0047')
self.copy_file = os.path.join(self.copy_path, 'dml_copy_case0047.dat')
def test_copy_file(self):
step_txt = '----step1:创建测试表并插入数据 expect:成功----'
self.log.info(step_txt)
sql_cmd = f"DROP TABLE IF EXISTS {self.t_name};" \
f"CREATE TABLE {self.t_name}(" \
f"SK INTEGER,ID CHAR(16),NAME VARCHAR(20),SQ_FT INTEGER);" \
f"INSERT INTO {self.t_name} VALUES (001,'SK1','TT',3332);" \
f"INSERT INTO {self.t_name} VALUES (001,'SK1','TT',3332);" \
f"INSERT INTO {self.t_name} VALUES (001,'SK1','TT',3332);"
msg = self.pri_sh.execut_db_sql(sql_cmd)
self.log.info(msg)
self.assertIn(self.constant.INSERT_SUCCESS_MSG, msg,
'执行失败:' + step_txt)
step_txt = '----step2:构造步骤1表的数据文件,使用copy to把表数据拷贝到文件 expect:构造成功----'
self.log.info(step_txt)
execute_cmd = f'mkdir {self.copy_path};' \
f'touch {self.copy_file};'
self.log.info(execute_cmd)
msg = self.pri_node.sh(execute_cmd).result()
self.log.info(msg)
self.assertEqual(msg, '', '执行失败:' + step_txt)
copy_to_sql = f"COPY {self.t_name} TO '{self.copy_file}';"
copy_to_msg = self.pri_sh.execut_db_sql(copy_to_sql)
self.log.info(copy_to_msg)
self.assertIn('COPY 3', copy_to_msg, '执行失败:' + step_txt)
step_txt = '----step3:将步骤2文件部分数据修改为非法字符\0 expect:修改成功----'
self.log.info(step_txt)
execute_cmd = f"sed -i 's/SK1/{self.invalid_char}/g' " \
f"{self.copy_file};" \
f"cat {self.copy_file};"
self.log.info(execute_cmd)
msg = self.pri_node.sh(execute_cmd).result()
self.log.info(msg)
self.assertIn('1\t\\0', msg, '执行失败:' + step_txt)
step_txt = '----step4:使用\copy from将步骤3修改后的数据文件导入到表中 expect:拷贝失败----'
self.log.info(step_txt)
copy_from_sql = f"\COPY {self.t_name} FROM '{self.copy_file}';"
copy_from_msg = self.pri_sh.execut_db_sql(copy_from_sql)
self.log.info(copy_from_msg)
self.assertIn(self.constant.COPY_ENCODING_ERROR_MSG, copy_from_msg,
'执行失败:' + step_txt)
def tearDown(self):
self.log.info('----step5:清理环境----')
text_1 = '----删除数据文件 expect:成功----'
self.log.info(text_1)
rm_cmd = f'rm -rf {self.copy_path}; '
self.log.info(rm_cmd)
rm_msg = self.pri_node.sh(rm_cmd).result()
self.log.info(rm_msg)
text_2 = '----删除表 expect:成功----'
self.log.info(text_2)
drop_sql = f'DROP TABLE IF EXISTS {self.t_name};'
drop_msg = self.pri_sh.execut_db_sql(drop_sql)
self.log.info(drop_msg)
self.log.info('----Opengauss_Function_DML_Copy_Case0047执行完成----')
self.log.info('----断言tearDown执行成功----')
self.assertEqual(rm_msg, '', '执行失败:' + text_1)
self.assertIn(self.constant.DROP_TABLE_SUCCESS, drop_msg,
'执行失败:' + text_2)
| 38.485075
| 84
| 0.632538
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.