hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ba8f3e37ec4368570a4ebb46bfcac5258a4c23c5
| 1,952
|
py
|
Python
|
extracting/main.py
|
Das-Boot/eason
|
2d50b3e1738e483284a487eb5a2f273b54f1c3a0
|
[
"Apache-2.0"
] | 2
|
2020-12-02T11:21:20.000Z
|
2021-08-30T01:58:53.000Z
|
extracting/main.py
|
Das-Boot/eason
|
2d50b3e1738e483284a487eb5a2f273b54f1c3a0
|
[
"Apache-2.0"
] | null | null | null |
extracting/main.py
|
Das-Boot/eason
|
2d50b3e1738e483284a487eb5a2f273b54f1c3a0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Author: Zhaoning Li
'''
from model import *
from util import *
import argparse
def initialize(args):
extractor = ChineseClinicalExtracter(args).slm()
extractor.load_weights(args.modelPath)
return extractor
def predict(extractor, predText, args):
index2tag = {
0: 'O',
1: 'B-position',
2: 'B-quantifier',
3: 'B-unusual',
4: 'I-position',
5: 'I-quantifier',
6: 'I-unusual'
}
predSent = splitText(predText)
predSent = [list(''.join(s)) for s in predSent]
maxlen = max([len(i) for i in predSent]) + 2
predict_generator = DataGenerator(args,
[i for i in range(len(predSent))],
x=predSent,
maxlen=maxlen)
probas = extractor.predict_generator(
predict_generator, verbose=1)[:len(predSent)]
tagResult = decode(extractor, probas)
return generateResult(predText, tagResult), [[index2tag[i] for i in t] for t in tagResult]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-ep', '--erniePath', type=str, default="your_ernie_path")
parser.add_argument('-mp', '--modelPath', type=str, default="your_weights_path")
parser.add_argument('-cuda', '--cuda_devices', type=int, default=0)
parser.add_argument('-cpu', '--cpu_core', type=int, default=1)
parser.add_argument('-bs', '--batch_size', type=int, default=16, help="")
args = parser.parse_args()
predText = ['胸廓对称。两肺纹理增著,肺实质未见明显异常。两肺门不大,纵隔未见增宽。心影大小、形态、位置未见异常。主动脉迂曲增宽,弓部见钙化影。气管及两侧主支气管通畅。两膈面光滑,两侧肋膈角锐利。两侧肋骨未见异常。胸11、12椎体成形术后改变,内见高密度骨水泥充填。', '对比2017-12-26胸片:右上肺见一类圆形肿块影,边界欠清,边缘不光整,似可见短毛刺,周围胸膜牵拉凹陷;余肺可见弥漫多发斑点、结节影,部分边缘模糊,部分边缘尚清。右胸膜增厚。两肺门稍增著,纵隔未见明显增宽。心影不大,主动脉增宽迂曲,弓部可见钙化影。大气道通畅。右膈上抬,两膈面尚清,两侧肋膈角变钝,右侧为著。——所见大致同前']
extractor = initialize(args)
print(predict(extractor, predText, args))
| 38.27451
| 313
| 0.638832
|
9382d73c1cc3e8a597d5c199732f483ebf444b8e
| 671
|
py
|
Python
|
application/main/utility/config_loader/read_yaml.py
|
willuvbb/test_fastapi_template
|
2d0c0b0526c7e490dd85905c03d7537cdc45f01f
|
[
"MIT"
] | 128
|
2021-05-14T14:31:05.000Z
|
2022-03-21T14:46:16.000Z
|
application/main/utility/config_loader/read_yaml.py
|
willuvbb/test_fastapi_template
|
2d0c0b0526c7e490dd85905c03d7537cdc45f01f
|
[
"MIT"
] | 3
|
2021-05-15T22:00:07.000Z
|
2021-11-16T07:09:31.000Z
|
application/main/utility/config_loader/read_yaml.py
|
willuvbb/test_fastapi_template
|
2d0c0b0526c7e490dd85905c03d7537cdc45f01f
|
[
"MIT"
] | 20
|
2021-05-14T16:11:22.000Z
|
2022-01-26T12:51:21.000Z
|
from pathlib import Path
import yaml
from application.main.config import settings
from application.main.utility.config_loader.config_interface import ConfigReaderInterface
from application.main.utility.config_loader.serializer import Struct
class YamlConfigReader(ConfigReaderInterface):
def __init__(self):
super(YamlConfigReader, self).__init__()
def read_config_from_file(self, config_filename: str):
conf_path = Path(__file__).joinpath(settings.APP_CONFIG.SETTINGS_DIR, config_filename)
with open(conf_path) as file:
config = yaml.safe_load(file)
config_object = Struct(**config)
return config_object
| 31.952381
| 94
| 0.764531
|
8a32c18d700cf38aa3cf4df404a37cc7711b172a
| 15,724
|
py
|
Python
|
topi/tests/python/test_topi_pooling.py
|
wix-playground/incubator-tvm
|
c9e2cc2c3daa8065257c76fce42d9c22e06ebb54
|
[
"Apache-2.0"
] | 1
|
2021-07-07T09:14:22.000Z
|
2021-07-07T09:14:22.000Z
|
topi/tests/python/test_topi_pooling.py
|
wix-playground/incubator-tvm
|
c9e2cc2c3daa8065257c76fce42d9c22e06ebb54
|
[
"Apache-2.0"
] | null | null | null |
topi/tests/python/test_topi_pooling.py
|
wix-playground/incubator-tvm
|
c9e2cc2c3daa8065257c76fce42d9c22e06ebb54
|
[
"Apache-2.0"
] | 1
|
2019-12-27T02:52:58.000Z
|
2019-12-27T02:52:58.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for pooling"""
import numpy as np
import tvm
import topi
import topi.testing
import math
from topi.util import get_const_tuple
from common import get_all_backend
def verify_pool(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_pad=True):
iw = ih
kw = kh
sw = sh
pt, pl, pb, pr = padding
layout = "NCHW"
A = tvm.placeholder((n, ic, ih, iw), name='A')
B = topi.nn.pool(A, kernel=[kh, kw], stride=[sh, sw], padding=padding,
pool_type=pool_type, ceil_mode=ceil_mode,
layout="NCHW", count_include_pad=count_include_pad)
B = topi.nn.relu(B)
dtype = A.dtype
bshape = get_const_tuple(B.shape)
ashape = get_const_tuple(A.shape)
if ceil_mode:
assert bshape[2] == int(math.ceil(float(ashape[2] - kh + pt + pb) / sh) + 1)
assert bshape[3] == int(math.ceil(float(ashape[3] - kw + pl + pr) / sw) + 1)
else:
assert bshape[2] == int(math.floor(float(ashape[2] - kh + pt + pb) / sh) + 1)
assert bshape[3] == int(math.floor(float(ashape[3] - kw + pl + pr) / sw) + 1)
a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
pad_np = np.zeros(shape=(n, ic, ih+pt+pb, iw+pl+pr)).astype(dtype)
no_zero = (range(n), range(ic), (range(pt, ih+pt)), (range(pl, iw+pl)))
pad_np[np.ix_(*no_zero)] = a_np
_, oc, oh, ow = get_const_tuple(B.shape)
b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)
if pool_type == 'avg':
for i in range(oh):
for j in range(ow):
if count_include_pad:
b_np[:,:,i,j] = np.mean(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3))
else:
pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3))
b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3)) / np.maximum(pad_count, 1)
elif pool_type =='max':
for i in range(oh):
for j in range(ow):
b_np[:,:,i,j] = np.max(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3))
b_np = np.maximum(b_np, 0.0)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_pool(B, layout)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in get_all_backend():
check_device(device)
def verify_pool_grad(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_pad=True,
add_relu=False):
iw = ih
kw = kh
sw = sh
pt, pl, pb, pr = padding
layout = "NCHW"
A = tvm.placeholder((n, ic, ih, iw), name='A')
B = topi.nn.pool(A, kernel=[kh, kw], stride=[sh, sw], padding=padding,
pool_type=pool_type, ceil_mode=ceil_mode,
layout="NCHW", count_include_pad=count_include_pad)
dtype = A.dtype
bshape = get_const_tuple(B.shape)
ashape = get_const_tuple(A.shape)
if ceil_mode:
assert bshape[2] == int(math.ceil(float(ashape[2] - kh + pt + pb) / sh) + 1)
assert bshape[3] == int(math.ceil(float(ashape[3] - kw + pl + pr) / sw) + 1)
else:
assert bshape[2] == int(math.floor(float(ashape[2] - kh + pt + pb) / sh) + 1)
assert bshape[3] == int(math.floor(float(ashape[3] - kw + pl + pr) / sw) + 1)
OutGrad = tvm.placeholder(bshape, name='OutGrad')
PoolGrad = topi.nn.pool_grad(OutGrad, A, kernel=[kh, kw], stride=[sh, sw], padding=padding,
pool_type=pool_type, ceil_mode=ceil_mode,
layout="NCHW", count_include_pad=count_include_pad)
if add_relu:
PoolGrad = topi.nn.relu(PoolGrad)
a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
out_grad_np = np.random.uniform(low=0.001, size=bshape).astype(dtype)
pool_grad_np = topi.testing.pool_grad_nchw(a_np, out_grad_np, pool_size=(kh, kw),
strides=(sh, sw), padding=padding,
pool_type=pool_type, ceil_mode=ceil_mode,
count_include_pad=count_include_pad)
if add_relu:
pool_grad_np = np.maximum(pool_grad_np, 0.)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_pool_grad(PoolGrad)
a = tvm.nd.array(a_np, ctx)
out_grad = tvm.nd.array(out_grad_np, ctx)
pool_grad = tvm.nd.array(np.zeros(get_const_tuple(PoolGrad.shape), dtype=dtype), ctx)
f = tvm.build(s, [A, OutGrad, PoolGrad], device)
f(a, out_grad, pool_grad)
tvm.testing.assert_allclose(pool_grad.asnumpy(), pool_grad_np, rtol=1e-5)
for device in get_all_backend():
check_device(device)
def test_pool():
verify_pool(1, 256, 32, 2, 2, [0, 0, 0, 0], 'avg', False, True)
verify_pool(1, 256, 31, 3, 3, [1, 2, 1, 2], 'avg', False, True)
verify_pool(1, 256, 32, 2, 2, [1, 2, 1, 2], 'avg', False, False)
verify_pool(1, 256, 31, 4, 4, [3, 3, 3, 3], 'avg', False, False)
verify_pool(1, 256, 31, 4, 4, [0, 0, 0, 0], 'avg', False, False)
verify_pool(1, 256, 32, 2, 2, [0, 0, 0, 0], 'max', False)
verify_pool(1, 256, 31, 3, 3, [2, 1, 2, 1], 'max', False)
verify_pool(1, 256, 31, 3, 3, [2, 1, 2, 1], 'max', True)
verify_pool(1, 256, 31, 3, 3, [2, 1, 0, 3], 'avg', False, True)
verify_pool(1, 256, 32, 2, 2, [0, 3, 2, 1], 'avg', False, False)
verify_pool(1, 256, 31, 3, 3, [1, 0, 3, 2], 'max', False)
verify_pool(1, 256, 31, 3, 3, [3, 2, 1, 0], 'max', True)
def test_pool_grad():
verify_pool_grad(1, 256, 32, 3, 2, [1, 1, 1, 1], 'avg', False, False)
verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], 'avg', False, True)
verify_pool_grad(1, 256, 31, 3, 3, [1, 2, 1, 2], 'avg', False, True)
verify_pool_grad(1, 256, 32, 2, 2, [1, 2, 1, 2], 'avg', False, False)
verify_pool_grad(1, 256, 31, 4, 4, [2, 2, 2, 2], 'avg', False, False)
verify_pool_grad(1, 256, 31, 4, 4, [0, 0, 0, 0], 'avg', False, False)
verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], 'max', False)
verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 2, 1], 'max', False)
verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 2, 1], 'max', True)
verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 0, 3], 'avg', False, True)
verify_pool_grad(1, 256, 32, 2, 2, [0, 3, 2, 1], 'avg', False, False)
verify_pool_grad(1, 256, 31, 3, 3, [1, 0, 3, 2], 'max', False)
verify_pool_grad(1, 256, 31, 3, 3, [3, 2, 1, 0], 'max', True)
verify_pool_grad(1, 256, 32, 3, 2, [1, 1, 1, 1], 'max', False)
verify_pool_grad(1, 256, 32, 1, 2, [1, 1, 1, 1], 'avg', False, False)
verify_pool_grad(1, 256, 31, 4, 4, [0, 0, 0, 0], 'avg', False, False, add_relu=True)
verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], 'max', False, add_relu=True)
def verify_global_pool(n, c, h, w, pool_type):
A = tvm.placeholder((n, c, h, w), name='A')
B = topi.nn.global_pool(A, pool_type=pool_type)
B = topi.nn.relu(B)
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
if pool_type == 'avg':
b_np = np.mean(a_np, axis=(2,3), keepdims=True)
elif pool_type =='max':
b_np = np.max(a_np, axis=(2,3), keepdims=True)
b_np = np.maximum(b_np, 0.0)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_adaptive_pool(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in get_all_backend():
check_device(device)
def test_global_pool():
verify_global_pool(1, 1024, 7, 7, 'avg')
verify_global_pool(4, 1024, 7, 7, 'avg')
verify_global_pool(1, 1024, 7, 7, 'max')
verify_global_pool(4, 1024, 7, 7, 'max')
def verify_adaptive_pool(dshape, out_size, pool_type, layout="NCHW", dtype="float32"):
def start_index(index, odim, idim):
return int(np.floor(index * idim / odim))
def end_index(index, odim, idim):
return int(np.ceil((index + 1) * idim / odim))
np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
n, c, h, w = dshape
oh, ow = out_size
oshape = (n, c) + out_size
np_out = np.zeros(oshape).astype(dtype)
np_op = np.mean if pool_type == "avg" else np.max
for i in range(n):
for j in range(c):
for k in range(oh):
k_start = start_index(k, oh, h)
k_end = end_index(k, oh, h)
k_sl = slice(k_start, k_end)
for l in range(ow):
l_start = start_index(l, ow, w)
l_end = end_index(l, ow, w)
l_sl = slice(l_start, l_end)
np_out[i, j, k, l] = np_op(np_data[i, j, k_sl, l_sl])
data = tvm.placeholder(dshape, name="data", dtype=dtype)
out = topi.nn.adaptive_pool(data, out_size, pool_type, layout)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_adaptive_pool(out)
a = tvm.nd.array(np_data, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(oshape), dtype=out.dtype), ctx)
f = tvm.build(s, [data, out], device)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), np_out, rtol=1e-5)
for device in get_all_backend():
check_device(device)
def test_adaptive_pool():
verify_adaptive_pool((1, 3, 224, 224), (1, 1), "max")
verify_adaptive_pool((1, 3, 224, 224), (1, 1), "avg")
verify_adaptive_pool((1, 14, 56, 78), (34, 13), "max")
verify_adaptive_pool((1, 5, 46, 97), (4, 96), "avg")
def verify_pool3d(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_pad=True):
iz = iw = ih
kz = kw = kh
sz = sw = sh
pf, pt, pl, pk, pb, pr = padding
layout = "NCDHW"
A = tvm.placeholder((n, ic, iz, ih, iw), name='A')
B = topi.nn.pool3d(A, kernel=[kz, kh, kw], stride=[sz, sh, sw], padding=padding,
pool_type=pool_type, ceil_mode=ceil_mode,
layout="NCDHW", count_include_pad=count_include_pad)
B = topi.nn.relu(B)
dtype = A.dtype
bshape = get_const_tuple(B.shape)
ashape = get_const_tuple(A.shape)
if ceil_mode:
assert bshape[2] == int(math.ceil(float(ashape[2] - kz + pf + pk) / sz) + 1)
assert bshape[3] == int(math.ceil(float(ashape[3] - kh + pt + pb) / sh) + 1)
assert bshape[4] == int(math.ceil(float(ashape[4] - kw + pl + pr) / sw) + 1)
else:
assert bshape[2] == int(math.floor(float(ashape[2] - kz + pf + pk) / sz) + 1)
assert bshape[3] == int(math.floor(float(ashape[3] - kh + pt + pb) / sh) + 1)
assert bshape[4] == int(math.floor(float(ashape[4] - kw + pl + pr) / sw) + 1)
a_np = np.random.uniform(low=0.001, size=(n, ic, iz, ih, iw)).astype(dtype)
pad_np = np.zeros(shape=(n, ic, iz+pf+pk, ih+pt+pb, iw+pl+pr)).astype(dtype)
no_zero = (range(n), range(ic), (range(pf, iz+pf)), (range(pt, ih+pt)), (range(pl, iw+pl)))
pad_np[np.ix_(*no_zero)] = a_np
_, oc, oz, oh, ow = get_const_tuple(B.shape)
b_np = np.zeros(shape=(n, oc, oz, oh, ow)).astype(dtype)
if pool_type == 'avg':
for k in range(oz):
for i in range(oh):
for j in range(ow):
if count_include_pad:
b_np[:,:,k,i,j] = np.mean( \
pad_np[:, :, k*sz:k*sz+kz, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3,4))
else:
pad_count = np.sum( \
pad_np[:, :, k*sz:k*sz+kz, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3,4))
b_np[:,:,k,i,j] = np.sum(pad_np[:, :, k*sz:k*sz+kz, i*sh:i*sh+kh, j*sw:j*sw+kw], \
axis=(2,3, 4)) / np.maximum(pad_count, 1)
elif pool_type =='max':
for k in range(oz):
for i in range(oh):
for j in range(ow):
b_np[:,:,k,i,j] = np.max( \
pad_np[:, :, k*sz:k*sz+kz, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3,4))
b_np = np.maximum(b_np, 0.0)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_pool(B, layout)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in get_all_backend():
check_device(device)
def test_pool3d():
verify_pool3d(1, 256, 32, 2, 2, [0, 0, 0, 0, 0, 0], 'avg', False, True)
verify_pool3d(1, 256, 31, 3, 3, [1, 1, 2, 2, 2, 1], 'avg', False, True)
verify_pool3d(1, 256, 32, 2, 2, [1, 1, 2, 2, 2, 1], 'avg', False, False)
verify_pool3d(1, 256, 31, 4, 4, [3, 3, 3, 3, 3, 3], 'avg', False, False)
verify_pool3d(1, 256, 31, 4, 4, [0, 0, 0, 0, 0, 0], 'avg', False, False)
verify_pool3d(1, 256, 32, 2, 2, [0, 0, 0, 0, 0, 0], 'max', False)
verify_pool3d(1, 256, 31, 3, 3, [2, 2, 1, 1, 1, 2], 'max', False)
verify_pool3d(1, 256, 31, 3, 3, [2, 2, 1, 1, 1, 2], 'max', True)
verify_pool3d(1, 256, 31, 3, 3, [2, 1, 0, 5, 4, 3], 'avg', False, True)
verify_pool3d(1, 256, 32, 2, 2, [0, 5, 4, 3, 2, 1], 'avg', False, False)
verify_pool3d(1, 256, 31, 3, 3, [1, 0, 5, 4, 3, 2], 'max', False)
verify_pool3d(1, 256, 31, 3, 3, [3, 2, 1, 0, 5, 4], 'max', True)
if __name__ == "__main__":
test_pool()
test_pool_grad()
test_global_pool()
test_adaptive_pool()
test_pool3d()
| 43.677778
| 123
| 0.565123
|
7220fd8c5f891e1ecce50871566950b878e8a9a8
| 3,714
|
py
|
Python
|
alerta/webhooks/pagerduty.py
|
tester22/alerta
|
0d3c1004bac41540d4a97a255973ea457d540a74
|
[
"Apache-2.0"
] | 1
|
2019-02-07T14:37:12.000Z
|
2019-02-07T14:37:12.000Z
|
alerta/webhooks/pagerduty.py
|
francopeapea/alerta
|
dce18f0ab93a1feff518289d51261cccf151d3e8
|
[
"Apache-2.0"
] | null | null | null |
alerta/webhooks/pagerduty.py
|
francopeapea/alerta
|
dce18f0ab93a1feff518289d51261cccf151d3e8
|
[
"Apache-2.0"
] | null | null | null |
from flask import request, g, jsonify
from flask_cors import cross_origin
from alerta.auth.utils import permission
from alerta.exceptions import ApiError
from alerta.models.alert import Alert
from . import webhooks
def parse_pagerduty(message):
try:
incident_key = message['data']['incident']['incident_key']
incident_number = message['data']['incident']['incident_number']
html_url = message['data']['incident']['html_url']
incident_url = '<a href="%s">#%s</a>' % (html_url, incident_number)
from alerta.models import status_code
if message['type'] == 'incident.trigger':
status = status_code.OPEN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s assigned to %s' % (incident_url, user)
elif message['type'] == 'incident.acknowledge':
status = status_code.ACK
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s acknowledged by %s' % (incident_url, user)
elif message['type'] == 'incident.unacknowledge':
status = status_code.OPEN
text = 'Incident %s unacknowledged due to timeout' % incident_url
elif message['type'] == 'incident.resolve':
status = status_code.CLOSED
if message['data']['incident']['resolved_by_user']:
user = message['data']['incident']['resolved_by_user']['name']
else:
user = 'n/a'
text = 'Incident %s resolved by %s' % (incident_url, user)
elif message['type'] == 'incident.assign':
status = status_code.ASSIGN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s manually assigned to %s' % (incident_url, user)
elif message['type'] == 'incident.escalate':
status = status_code.OPEN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s escalated to %s' % (incident_url, user)
elif message['type'] == 'incident.delegate':
status = status_code.OPEN
user = message['data']['incident']['assigned_to_user']['name']
text = 'Incident %s reassigned due to escalation to %s' % (incident_url, user)
else:
status = status_code.UNKNOWN
text = message['type']
except Exception:
raise ValueError
return incident_key, status, text
@webhooks.route('/webhooks/pagerduty', methods=['OPTIONS', 'POST'])
@cross_origin()
@permission('write:webhooks')
def pagerduty():
data = request.json
updated = False
if data and 'messages' in data:
for message in data['messages']:
try:
incident_key, status, text = parse_pagerduty(message)
except ValueError as e:
raise ApiError(str(e), 400)
if not incident_key:
raise ApiError('no incident key in PagerDuty data payload', 400)
customers = g.get('customers', None)
try:
alert = Alert.find_by_id(id=incident_key, customers=customers)
except Exception as e:
raise ApiError(str(e), 500)
if not alert:
raise ApiError("not found", 404)
try:
updated = alert.set_status(status, text)
except Exception as e:
raise ApiError(str(e), 500)
else:
raise ApiError("no messages in PagerDuty data payload", 400)
if updated:
return jsonify(status="ok"), 200
else:
raise ApiError("update PagerDuty incident status failed", 500)
| 37.897959
| 90
| 0.591546
|
287c619bb566778e7ac1d33cfe8633cb53e3da13
| 65,742
|
py
|
Python
|
pycqed/measurement/composite_detector_functions.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | null | null | null |
pycqed/measurement/composite_detector_functions.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | null | null | null |
pycqed/measurement/composite_detector_functions.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | null | null | null |
import numpy as np
import time
from pycqed.measurement import sweep_functions as swf
from pycqed.measurement import awg_sweep_functions as awg_swf
from pycqed.measurement import CBox_sweep_functions as CB_swf
from pycqed.measurement import detector_functions as det
from pycqed.analysis import measurement_analysis as ma
from pycqed.measurement.pulse_sequences import fluxing_sequences as fsqs
from pycqed.analysis import analysis_toolbox as a_tools
from qcodes.instrument.parameter import ManualParameter
from pycqed.measurement.waveform_control_CC import QWG_fluxing_seqs as qwfs
import pycqed.analysis.tools.plotting as plt_tools
class SSRO_Fidelity_Detector_CBox(det.Soft_Detector):
'''
Currently only for CBox,
'''
def __init__(self, measurement_name, MC, AWG, CBox,
RO_pulse_length, RO_pulse_delay, RO_trigger_delay,
raw=True, analyze=True, **kw):
self.detector_control = 'soft'
self.name = 'SSRO_Fidelity'
# For an explanation of the difference between the different
# Fidelities look in the analysis script
if raw:
self.value_names = ['F-raw']
self.value_units = [' ']
else:
self.value_names = ['F', 'F corrected']
self.value_units = [' ', ' ']
self.measurement_name = measurement_name
self.NoSamples = kw.get('NoSamples', 8000) # current max of log mode
self.MC = MC
self.CBox = CBox
self.AWG = AWG
self.RO_trigger_delay = RO_trigger_delay
self.RO_pulse_delay = RO_pulse_delay
self.RO_pulse_length = RO_pulse_length
self.i = 0
self.raw = raw # Performs no fits if True
self.analyze = analyze
self.upload = True
def prepare(self, **kw):
self.CBox.set('log_length', self.NoSamples)
self.MC.set_sweep_function(awg_swf.CBox_OffOn(
IF=self.IF,
RO_pulse_delay=self.RO_pulse_delay,
RO_trigger_delay=self.RO_trigger_delay,
RO_pulse_length=self.RO_pulse_length,
AWG=self.AWG, CBox=self.CBox,
upload=self.upload))
self.MC.set_detector_function(
det.CBox_alternating_shots_det(self.CBox, self.AWG))
def acquire_data_point(self, *args, **kw):
self.i += 1
self.MC.run(name=self.measurement_name+'_'+str(self.i))
if self.analyze:
ana = ma.SSRO_Analysis(label=self.measurement_name,
no_fits=self.raw, close_file=True)
# Arbitrary choice, does not think about the deffinition
if self.raw:
return ana.F_raw
else:
return ana.F_raw, ana.F_corrected
class SSRO_Fidelity_Detector_Tek(det.Soft_Detector):
'''
For Qcodes. Readout with CBox, UHFLI, DDM, pulse generation with 5014
'''
def __init__(self, measurement_name, MC, AWG, acquisition_instr,
pulse_pars, RO_pars, raw=True, analyze=True, upload=True,
IF=None, weight_function_I=0, weight_function_Q=1,
optimized_weights=False, one_weight_function_UHFQC=False,
wait=0.0, close_fig=True, SSB=False,
nr_averages=1024, integration_length=1e-6,
nr_shots=4094, **kw):
self.detector_control = 'soft'
self.name = 'SSRO_Fidelity'
# For an explanation of the difference between the different
# Fidelities look in the analysis script
if raw:
self.value_names = ['F_a', 'theta']
self.value_units = [' ', 'rad']
else:
self.value_names = ['F_a', 'F_d', 'SNR']
self.value_units = [' ', ' ', ' ']
self.measurement_name = measurement_name
self.MC = MC
self.acquisition_instr = acquisition_instr
self.AWG = AWG
self.pulse_pars = pulse_pars
self.RO_pars = RO_pars
self.optimized_weights = optimized_weights
self.i = 0
self.raw = raw # Performs no fits if True
self.analyze = analyze
self.upload = upload
self.wait = wait
self.close_fig = close_fig
self.SSB = SSB
self.IF = IF
self.nr_shots = nr_shots
if 'CBox' in str(self.acquisition_instr):
self.CBox = self.acquisition_instr
elif 'UHFQC' in str(self.acquisition_instr):
self.UHFQC = self.acquisition_instr
elif 'DDM' in str(self.acquisition_instr):
self.DDM = self.acquisition_instr
self.nr_averages = nr_averages
self.integration_length = integration_length
self.weight_function_I = weight_function_I
self.weight_function_Q = weight_function_Q
self.one_weight_function_UHFQC = one_weight_function_UHFQC
def prepare(self, **kw):
if not self.optimized_weights:
self.soft_rotate = True
self.MC.set_sweep_function(awg_swf.OffOn(
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
upload=self.upload))
self.MC.set_sweep_points(np.arange(self.nr_shots))
if 'CBox' in str(self.acquisition_instr):
self.MC.set_detector_function(
det.CBox_integration_logging_det(
self.acquisition_instr,
self.AWG,
integration_length=self.integration_length))
self.CBox = self.acquisition_instr
if self.SSB:
raise ValueError(
'SSB is only possible in CBox with optimized weights')
else:
self.CBox.lin_trans_coeffs([1, 0, 0, 1])
self.CBox.demodulation_mode('double')
if self.IF == None:
raise ValueError(
'IF has to be provided when not using optimized weights')
else:
self.CBox.upload_standard_weights(IF=self.IF)
elif 'UHFQC' in str(self.acquisition_instr):
self.MC.set_detector_function(
det.UHFQC_integration_logging_det(
self.acquisition_instr, self.AWG,
channels=[
self.weight_function_I, self.weight_function_Q],
integration_length=self.integration_length,
nr_shots=min(self.nr_shots, 4094)))
if self.SSB:
self.UHFQC.prepare_SSB_weight_and_rotation(
IF=self.IF, weight_function_I=self.weight_function_I,
weight_function_Q=self.weight_function_Q)
else:
if self.IF == None:
raise ValueError(
'IF has to be provided when not using optimized weights')
else:
self.UHFQC.prepare_DSB_weight_and_rotation(
IF=self.IF,
weight_function_I=self.weight_function_I,
weight_function_Q=self.weight_function_Q)
elif 'DDM' in str(self.acquisition_instr):
self.MC.set_detector_function(
det.DDM_integration_logging_det(
self.acquisition_instr, self.AWG,
channels=[
self.weight_function_I, self.weight_function_Q],
integration_length=self.integration_length,
nr_shots=min(self.nr_shots, 8000)))
if self.SSB:
self.DDM.prepare_SSB_weight_and_rotation(
IF=self.IF, weight_function_I=self.weight_function_I,
weight_function_Q=self.weight_function_Q)
#not yet implemented
# else:
# if self.IF == None:
# raise ValueError(
# 'IF has to be provided when not using optimized weights')
# else:
# self.UHFQC.prepare_DSB_weight_and_rotation(
# IF=self.IF,
# weight_function_I=self.weight_function_I,
# weight_function_Q=self.weight_function_Q)
def acquire_data_point(self, *args, **kw):
self.time_start = time.time()
if self.optimized_weights:
self.soft_rotate = False
if 'CBox' in str(self.acquisition_instr):
self.CBox.nr_averages(int(self.nr_averages))
if self.SSB:
self.CBox.lin_trans_coeffs([1, 1, -1, 1])
# self.CBox.demodulation_mode(1)
self.CBox.demodulation_mode('single')
else:
self.CBox.lin_trans_coeffs([1, 0, 0, 1])
# self.CBox.demodulation_mode(0)
self.CBox.demodulation_mode('double')
self.nr_samples = 512
self.CBox.nr_samples.set(self.nr_samples)
SWF = awg_swf.OffOn(
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
pulse_comb='OffOff',
nr_samples=self.nr_samples)
SWF.prepare()
self.CBox.acquisition_mode('idle')
self.AWG.start()
self.CBox.acquisition_mode('input averaging')
inp_avg_res = self.CBox.get_input_avg_results()
transient0_I = inp_avg_res[0]
transient0_Q = inp_avg_res[1]
SWF = awg_swf.OffOn(
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
pulse_comb='OnOn',
nr_samples=self.nr_samples)
SWF.prepare()
self.CBox.acquisition_mode('idle')
self.CBox.acquisition_mode('input averaging')
self.AWG.start()
inp_avg_res = self.CBox.get_input_avg_results()
self.CBox.acquisition_mode('idle')
transient1_I = inp_avg_res[0]
transient1_Q = inp_avg_res[1]
optimized_weights_I = (transient1_I-transient0_I)
optimized_weights_I = optimized_weights_I - \
np.mean(optimized_weights_I)
weight_scale_factor = 127./np.max(np.abs(optimized_weights_I))
optimized_weights_I = np.floor(
weight_scale_factor*optimized_weights_I).astype(int)
optimized_weights_Q = (transient1_Q-transient0_Q)
optimized_weights_Q = optimized_weights_Q - \
np.mean(optimized_weights_Q)
weight_scale_factor = 127./np.max(np.abs(optimized_weights_Q))
optimized_weights_Q = np.floor(
weight_scale_factor*optimized_weights_Q).astype(int)
self.CBox.sig0_integration_weights.set(optimized_weights_I)
if self.SSB:
self.CBox.sig1_integration_weights.set(
optimized_weights_Q) # disabling the Q quadrature
else:
self.CBox.sig1_integration_weights.set(
np.multiply(optimized_weights_Q, 0)) # disabling the Q quadrature
self.MC.set_sweep_function(awg_swf.OffOn(
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars))
self.MC.set_sweep_points(np.arange(self.nr_shots))
self.MC.set_detector_function(
det.CBox_integration_logging_det(self.CBox, self.AWG, integration_length=self.integration_length))
elif 'UHFQC' in str(self.acquisition_instr):
self.nr_samples = 4096
self.channels=[
self.weight_function_I, self.weight_function_Q]
#copy pasted from input average prepare
self.AWG.stop()
self.nr_sweep_points = self.nr_samples
self.UHFQC.acquisition_initialize(samples=self.nr_samples, averages=self.nr_averages, channels=self.channels, mode='iavg')
#prepare sweep
SWF = awg_swf.OffOn(
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
pulse_comb='OffOff',
nr_samples=self.nr_samples)
SWF.prepare()
#get values detector
self.UHFQC.acquisition_arm()
# starting AWG
if self.AWG is not None:
self.AWG.start()
data_raw=self.UHFQC.acquisition_poll(samples=self.nr_sweep_points,
arm=False, acquisition_time=0.01)
data = np.array([data_raw[key] for key in data_raw.keys()])
#calculating transients
transient0_I = data[0]
transient0_Q = data[1]
self.AWG.stop()
SWF = awg_swf.OffOn(
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
pulse_comb='OnOn',
nr_samples=self.nr_samples)
SWF.prepare()
# get values detector
self.UHFQC.acquisition_arm()
# starting AWG
if self.AWG is not None:
self.AWG.start()
data_raw=self.UHFQC.acquisition_poll(samples=self.nr_sweep_points,
arm=False, acquisition_time=0.01)
data = np.array([data_raw[key] for key in data_raw.keys()])
#calculating transients
transient1_I = data[0]
transient1_Q = data[1]
optimized_weights_I = (transient1_I-transient0_I)
optimized_weights_I = optimized_weights_I - \
np.mean(optimized_weights_I)
weight_scale_factor = 1./np.max(np.abs(optimized_weights_I))
optimized_weights_I = np.array(
weight_scale_factor*optimized_weights_I)
optimized_weights_Q = (transient1_Q-transient0_Q)
optimized_weights_Q = optimized_weights_Q - \
np.mean(optimized_weights_Q)
weight_scale_factor = 1./np.max(np.abs(optimized_weights_Q))
optimized_weights_Q = np.array(
weight_scale_factor*optimized_weights_Q)
self.UHFQC.set('qas_0_integration_weights_{}_real'.format(self.weight_function_I), np.array(optimized_weights_I))
if self.SSB:
self.UHFQC.set('qas_0_integration_weights_{}_imag'.format(self.weight_function_I), np.array(optimized_weights_Q))
self.UHFQC.set('qas_0_rotations_{}'.format(self.weight_function_I), 1.0 - 1.0j)
if not self.one_weight_function_UHFQC:
self.UHFQC.set('qas_0_integration_weights_{}_real'.format(self.weight_function_Q), np.array(optimized_weights_I))
self.UHFQC.set('qas_0_integration_weights_{}_imag'.format(self.weight_function_Q), np.array(optimized_weights_Q))
self.UHFQC.set('qas_0_rotations_{}'.format(self.weight_function_Q), 1.0 + 1.0j)
else:
# disabling the other weight fucntions
self.UHFQC.set('qas_0_integration_weights_{}_imag'.format(self.weight_function_I), 0*np.array(optimized_weights_Q))
self.UHFQC.set('qas_0_rotations_{}'.format(self.weight_function_I), 1.0 + 0.0j)
if not self.one_weight_function_UHFQC:
self.UHFQC.set('qas_0_integration_weights_{}_real'.format(self.weight_function_Q), 0*np.array(optimized_weights_I))
self.UHFQC.set('qas_0_integration_weights_{}_imag'.format(self.weight_function_Q), 0*np.array(optimized_weights_Q))
self.UHFQC.set('qas_0_rotations_{}'.format(self.weight_function_Q), 0.0 + 0.0j)
# reading out weights as check
self.UHFQC.get('qas_0_integration_weights_{}_real()'.format(self.weight_function_I))
self.UHFQC.get('qas_0_integration_weights_{}_imag()'.format(self.weight_function_I))
self.UHFQC.get('qas_0_integration_weights_{}_real()'.format(self.weight_function_Q))
self.UHFQC.get('qas_0_integration_weights_{}_imag()'.format(self.weight_function_Q))
self.MC.set_sweep_function(awg_swf.OffOn(
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars))
self.MC.set_sweep_points(np.arange(self.nr_shots))
self.MC.set_detector_function(
det.UHFQC_integration_logging_det(self.UHFQC, self.AWG,
channels=[
self.weight_function_I, self.weight_function_Q],
integration_length=self.integration_length, nr_shots=min(self.nr_shots, 4094)))
self.i += 1
self.MC.run(name=self.measurement_name+'_'+str(self.i))
if self.analyze:
ana = ma.SSRO_Analysis(rotate=self.soft_rotate,
label=self.measurement_name,
no_fits=self.raw, close_file=False,
close_fig=True, auto=True)
if self.optimized_weights:
# data_group = self.MC.data_object.create_group('Transients Data')
dset = ana.g.create_dataset('Transients', (self.nr_samples, 4),
maxshape=(self.nr_samples, 4))
dset[:, 0] = transient0_I
dset[:, 1] = transient0_Q
dset[:, 2] = transient1_I
dset[:, 3] = transient1_Q
ana.data_file.close()
# Arbitrary choice, does not think about the deffinition
time_end = time.time()
nett_wait = self.wait-time_end+self.time_start
print(self.time_start)
if nett_wait > 0:
time.sleep(nett_wait)
if self.raw:
return ana.F_a, ana.theta
else:
return ana.F_a, ana.F_d, ana.SNR
'''
def acquire_data_point(self, *args, **kw):
self.time_start = time.time()
if self.set_integration_weights:
nr_samples = 512
self.CBox.nr_samples.set(nr_samples)
self.MC.set_sweep_function(awg_swf.OffOn(
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
pulse_comb='OffOff',
nr_samples=nr_samples))
self.MC.set_detector_function(det.CBox_input_average_detector(
self.CBox, self.AWG))
self.MC.run('Measure_transients_0')
a0 = ma.MeasurementAnalysis(auto=True, close_fig=self.close_fig)
self.MC.set_sweep_function(awg_swf.OffOn(
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars,
pulse_comb='OnOn',
nr_samples=nr_samples))
self.MC.set_detector_function(det.CBox_input_average_detector(
self.CBox, self.AWG))
self.MC.run('Measure_transients_1')
a1 = ma.MeasurementAnalysis(auto=True, close_fig=self.close_fig)
transient0 = a0.data[1, :]
transient1 = a1.data[1, :]
optimized_weights = transient1-transient0
optimized_weights = optimized_weights+np.mean(optimized_weights)
self.CBox.sig0_integration_weights.set(optimized_weights)
self.CBox.sig1_integration_weights.set(
np.multiply(optimized_weights, self.use_Q)) # disabling the Q quadrature
self.MC.set_sweep_function(awg_swf.OffOn(
pulse_pars=self.pulse_pars,
RO_pars=self.RO_pars))
self.MC.set_detector_function(
det.CBox_integration_logging_det(self.CBox, self.AWG))
self.i += 1
self.MC.run(name=self.measurement_name+'_'+str(self.i))
if self.analyze:
ana = ma.SSRO_Analysis(label=self.measurement_name,
no_fits=self.raw, close_file=True,
close_fig=self.close_fig)
# Arbitrary choice, does not think about the deffinition
time_end=time.time()
nett_wait = self.wait-time_end+self.time_start
print(self.time_start)
if nett_wait>0:
time.sleep(nett_wait)
if self.raw:
return ana.F_raw, ana.theta
else:
return ana.F, ana.F_corrected
'''
class CBox_trace_error_fraction_detector(det.Soft_Detector):
def __init__(self, measurement_name, MC, AWG, CBox,
sequence_swf=None,
threshold=None,
calibrate_threshold='self-consistent',
save_raw_trace=False,
**kw):
super().__init__(**kw)
self.name = measurement_name
self.threshold = threshold
self.value_names = ['no err',
'single err',
'double err']
self.value_units = ['%', '%', '%']
self.AWG = AWG
self.MC = MC
self.CBox = CBox
# after testing equivalence this is to be removed
self.save_raw_trace = save_raw_trace
self.calibrate_threshold = calibrate_threshold
self.sequence_swf = sequence_swf
def calibrate_threshold_conventional(self):
self.CBox.lin_trans_coeffs.set([1, 0, 0, 1])
ssro_d = SSRO_Fidelity_Detector_CBox(
'SSRO_det', self.MC, self.AWG, self.CBox,
RO_pulse_length=self.sequence_swf.RO_pulse_length,
RO_pulse_delay=self.sequence_swf.RO_pulse_delay,
RO_trigger_delay=self.sequence_swf.RO_trigger_delay)
ssro_d.prepare()
ssro_d.acquire_data_point()
a = ma.SSRO_Analysis(auto=True, close_fig=True,
label='SSRO', no_fits=True,
close_file=True)
# SSRO analysis returns the angle to rotate by
theta = a.theta # analysis returns theta in rad
rot_mat = [np.cos(theta), -np.sin(theta),
np.sin(theta), np.cos(theta)]
self.CBox.lin_trans_coeffs.set(rot_mat)
self.threshold = a.V_th_a # allows
self.CBox.sig0_threshold_line.set(int(a.V_th_a))
self.sequence_swf.upload = True
# make sure the sequence gets uploaded
return int(self.threshold)
def calibrate_threshold_self_consistent(self):
self.CBox.lin_trans_coeffs.set([1, 0, 0, 1])
ssro_d = CBox_SSRO_discrimination_detector(
'SSRO-disc-det',
MC=self.MC, AWG=self.AWG, CBox=self.CBox,
sequence_swf=self.sequence_swf)
ssro_d.prepare()
discr_vals = ssro_d.acquire_data_point()
# hardcoded indices correspond to values in CBox SSRO discr det
theta = discr_vals[2] * 2 * np.pi/360
# Discr returns the current angle, rotation is - that angle
rot_mat = [np.cos(-1*theta), -np.sin(-1*theta),
np.sin(-1*theta), np.cos(-1*theta)]
self.CBox.lin_trans_coeffs.set(rot_mat)
# Measure it again to determine the threshold after rotating
discr_vals = ssro_d.acquire_data_point()
# hardcoded indices correspond to values in CBox SSRO discr det
theta = discr_vals[2]
self.threshold = int(discr_vals[3])
self.CBox.sig0_threshold_line.set(int(self.threshold))
return int(self.threshold)
def prepare(self, **kw):
self.i = 0
if self.threshold is None: # calibrate threshold
if self.calibrate_threshold is 'conventional':
self.calibrate_threshold_conventional()
elif self.calibrate_threshold == 'self-consistent':
self.calibrate_threshold_self_consistent()
else:
raise Exception(
'calibrate_threshold "{}"'.format(self.calibrate_threshold)
+ 'not recognized')
else:
self.CBox.sig0_threshold_line.set(int(self.threshold))
self.MC.set_sweep_function(self.sequence_swf)
# if self.counters:
# self.counters_d = det.CBox_state_counters_det(self.CBox, self.AWG)
self.dig_shots_det = det.CBox_digitizing_shots_det(
self.CBox, self.AWG,
threshold=self.CBox.sig0_threshold_line.get())
self.MC.set_detector_function(self.dig_shots_det)
def acquire_data_point(self, **kw):
if self.i > 0:
# overwrites the upload arg if the sequence swf has it to
# prevent reloading
self.sequence_swf.upload = False
self.i += 1
if self.save_raw_trace:
self.MC.run(self.name+'_{}'.format(self.i))
a = ma.MeasurementAnalysis(auto=False)
a.get_naming_and_values()
trace = a.measured_values[0]
a.finish() # close the datafile
return self.count_error_fractions(trace, len(trace))
else:
self.sequence_swf.prepare()
counters = self.counters_d.get_values()
# no err, single and double for weight A
return counters[0:3]/self.CBox.get('log_length')*100
def count_error_fractions(self, trace, trace_length):
no_err_counter = 0
single_err_counter = 0
double_err_counter = 0
for i in range(len(trace)-2):
if trace[i] == trace[i+1]:
# A single error is associated with a qubit error
single_err_counter += 1
if trace[i] == trace[i+2]:
# If there are two errors in a row this is associated with
# a RO error, this counter must be substracted from the
# single counter
double_err_counter += 1
else:
no_err_counter += 1
return (no_err_counter/len(trace)*100,
single_err_counter/len(trace)*100,
double_err_counter/len(trace)*100)
class CBox_SSRO_discrimination_detector(det.Soft_Detector):
def __init__(self, measurement_name, MC, AWG, CBox,
sequence_swf,
threshold=None,
calibrate_threshold=False,
save_raw_trace=False,
counters=True,
analyze=True,
**kw):
super().__init__(**kw)
self.name = measurement_name
if threshold is None:
self.threshold = CBox.sig0_threshold_line.get()
else:
self.threshold = threshold
self.value_names = ['F-discr. cur. th.',
'F-discr. optimal',
'theta',
'optimal I-threshold',
'rel. separation',
'rel. separation I'] # projected along I axis
self.value_units = ['%', '%', 'deg', 'a.u', '1/sigma', '1/sigma']
self.AWG = AWG
self.MC = MC
self.CBox = CBox
# Required to set some kind of sequence that does a pulse
self.sequence_swf = sequence_swf
# If analyze is False it cannot be used as a detector anymore
self.analyze = analyze
def prepare(self, **kw):
self.i = 0
self.MC.set_sweep_function(self.sequence_swf)
self.MC.set_detector_function(det.CBox_integration_logging_det(
self.CBox, self.AWG))
def acquire_data_point(self, **kw):
if self.i > 0:
# overwrites the upload arg if the sequence swf has it to
# prevent reloading
self.sequence_swf.upload = False
self.i += 1
self.MC.run(self.name+'_{}'.format(self.i))
if self.analyze:
a = ma.SSRO_discrimination_analysis(
label=self.name+'_{}'.format(self.i),
current_threshold=self.threshold)
return (a.F_discr_curr_t*100, a.F_discr*100,
a.theta, a.opt_I_threshold,
a.relative_separation, a.relative_separation_I)
class CBox_RB_detector(det.Soft_Detector):
def __init__(self, measurement_name, MC, AWG, CBox, LutMan,
nr_cliffords, desired_nr_seeds,
IF,
RO_pulse_length, RO_pulse_delay, RO_trigger_delay,
pulse_delay,
T1=None, **kw):
super().__init__(**kw)
self.name = measurement_name
self.nr_cliffords = nr_cliffords
self.desired_nr_seeds = desired_nr_seeds
self.AWG = AWG
self.MC = MC
self.CBox = CBox
self.LutMan = LutMan
self.IF = IF
self.RO_pulse_length = RO_pulse_length
self.RO_pulse_delay = RO_pulse_delay
self.RO_trigger_delay = RO_trigger_delay
self.pulse_delay = pulse_delay
self.T1 = T1
self.value_names = ['F_cl']
self.value_units = ['']
def calculate_seq_duration_and_max_nr_seeds(self, nr_cliffords,
pulse_delay):
max_nr_cliffords = max(nr_cliffords)
# For few cliffords the number of gates is not the average number of
# gates so pick the max, rounded to ns
max_seq_duration = np.round(max(max_nr_cliffords*pulse_delay *
(1.875+.5), 10e-6), 9)
max_idling_waveforms_per_seed = max_seq_duration/(1200e-9)
max_nr_waveforms = 29184 # hard limit from the CBox
max_nr_seeds = int(max_nr_waveforms/((max_idling_waveforms_per_seed +
np.mean(nr_cliffords)*1.875)*(len(nr_cliffords)+4)))
return max_seq_duration, max_nr_seeds
def prepare(self, **kw):
max_seq_duration, max_nr_seeds = \
self.calculate_seq_duration_and_max_nr_seeds(self.nr_cliffords,
self.pulse_delay)
nr_repetitions = int(np.ceil(self.desired_nr_seeds/max_nr_seeds))
self.total_nr_seeds = nr_repetitions*max_nr_seeds
averages_per_tape = self.desired_nr_seeds//nr_repetitions
self.CBox.nr_averages.set(int(2**np.ceil(np.log2(averages_per_tape))))
rb_swf = awg_swf.CBox_RB_sweep(nr_cliffords=self.nr_cliffords,
nr_seeds=max_nr_seeds,
max_seq_duration=max_seq_duration,
safety_margin=0,
IF=self.IF,
RO_pulse_length=self.RO_pulse_length,
RO_pulse_delay=self.RO_pulse_delay,
RO_trigger_delay=self.RO_trigger_delay,
pulse_delay=self.pulse_delay,
AWG=self.AWG,
CBox=self.CBox,
LutMan=self.LutMan)
self.i = 0
self.MC.set_sweep_function(rb_swf)
self.MC.set_sweep_function_2D(awg_swf.Two_d_CBox_RB_seq(rb_swf))
self.MC.set_sweep_points_2D(np.arange(nr_repetitions))
self.MC.set_detector_function(det.CBox_integrated_average_detector(
self.CBox, self.AWG))
def acquire_data_point(self, **kw):
self.i += 1
self.MC.run(self.name+'_{}_{}seeds'.format(
self.i, self.total_nr_seeds), mode='2D')
a = ma.RandomizedBench_2D_flat_Analysis(
auto=True, close_main_fig=True, T1=self.T1,
pulse_delay=self.pulse_delay)
F_cl = a.fit_res.params['fidelity_per_Clifford'].value
return F_cl
class Chevron_optimization_v1(det.Soft_Detector):
'''
Chevron optimization.
'''
def __init__(self, flux_channel, dist_dict, AWG, MC_nested, qubit,
kernel_obj, cost_function_opt=0, **kw):
super().__init__()
kernel_dir = 'kernels/'
self.name = 'chevron_optimization_v1'
self.value_names = ['Cost function', 'SWAP Time']
self.value_units = ['a.u.', 'ns']
self.kernel_obj = kernel_obj
self.AWG = AWG
self.MC_nested = MC_nested
self.qubit = qubit
self.dist_dict = dist_dict
self.flux_channel = flux_channel
self.cost_function_opt = cost_function_opt
self.dist_dict['ch%d' % self.flux_channel].append('')
self.nr_averages = kw.get('nr_averages', 1024)
self.awg_amp_par = ManualParameter(
name='AWG_amp', unit='Vpp', label='AWG Amplitude')
self.awg_amp_par.get = lambda: self.AWG.get(
'ch{}_amp'.format(self.flux_channel))
self.awg_amp_par.set = lambda val: self.AWG.set(
'ch{}_amp'.format(self.flux_channel), val)
self.awg_value = 2.0
kernel_before_list = self.dist_dict['ch%d' % self.flux_channel]
kernel_before_loaded = []
for k in kernel_before_list:
if k is not '':
kernel_before_loaded.append(np.loadtxt(kernel_dir+k))
self.kernel_before = kernel_obj.convolve_kernel(kernel_before_loaded,
30000)
def acquire_data_point(self, **kw):
# # Before writing it
# # Summarize what to do:
# # Update kernel from kernel object
kernel_file = 'optimizing_kernel_%s' % a_tools.current_timestamp()
self.kernel_obj.save_corrections_kernel(
kernel_file, self.kernel_before,)
self.dist_dict['ch%d' % self.flux_channel][-1] = kernel_file+'.txt'
self.qubit.dist_dict = self.dist_dict
self.qubit.RO_acq_averages(self.nr_averages)
self.qubit.measure_chevron(amps=[self.awg_amp_par()],
length=np.arange(0, 81e-9, 1e-9),
MC=self.MC_nested)
# # fit it
ma_obj = ma.chevron_optimization_v2(auto=True, label='Chevron_slice')
cost_val = ma_obj.cost_value[self.cost_function_opt]
# # Return the cost function sum(min)+sum(1-max)
return cost_val, 0.5*ma_obj.period
def prepare(self):
pass
def finish(self):
pass
class SWAPN_optimization(det.Soft_Detector):
'''
SWAPN optimization.
Wrapper around a SWAPN sequence to create a cost function.
The kernel object is used to determine the (pre)distortion kernel.
It is common to do a sweep over one of the kernel parameters as a sweep
function.
'''
def __init__(self, nr_pulses_list, AWG, MC_nested, qubit,
kernel_obj, cache, cost_choice='sum', **kw):
super().__init__()
self.name = 'swapn_optimization'
self.value_names = ['Cost function', 'Single SWAP Fid']
self.value_units = ['a.u.', 'ns']
self.kernel_obj = kernel_obj
self.cache_obj = cache
self.AWG = AWG
self.MC_nested = MC_nested
self.cost_choice = cost_choice
self.nr_pulses_list = nr_pulses_list
self.qubit = qubit
def acquire_data_point(self, **kw):
# # Update kernel from kernel object
# # Measure the swapn
times_vec = self.nr_pulses_list
cal_points = 4
lengths_cal = times_vec[-1] + \
np.arange(1, 1+cal_points)*(times_vec[1]-times_vec[0])
lengths_vec = np.concatenate((times_vec, lengths_cal))
flux_pulse_pars = self.qubit.get_flux_pars()
mw_pulse_pars, RO_pars = self.qubit.get_pulse_pars()
repSWAP = awg_swf.SwapN(mw_pulse_pars,
RO_pars,
flux_pulse_pars, AWG=self.AWG,
dist_dict=self.kernel_obj.kernel(),
upload=True)
# self.AWG.set('ch%d_amp'%self.qubit.fluxing_channel(), 2.)
# seq = repSWAP.pre_upload()
self.MC_nested.set_sweep_function(repSWAP)
self.MC_nested.set_sweep_points(lengths_vec)
self.MC_nested.set_detector_function(self.qubit.int_avg_det_rot)
self.AWG.set('ch%d_amp' % self.qubit.fluxing_channel(),
self.qubit.SWAP_amp())
self.MC_nested.run('SWAPN_%s' % self.qubit.name)
# # fit it
ma_obj = ma.SWAPN_cost(auto=True, cost_func=self.cost_choice)
return ma_obj.cost_val, ma_obj.single_swap_fid
def prepare(self):
pass
def finish(self):
pass
class AllXY_devition_detector_CBox(det.Soft_Detector):
'''
Currently only for CBox.
Todo: remove the predefined values for the sequence
'''
def __init__(self, measurement_name, MC, AWG, CBox,
IF, RO_trigger_delay, RO_pulse_delay, RO_pulse_length,
pulse_delay,
LutMan=None,
reload_pulses=False, **kw):
'''
If reloading of pulses is desired the LutMan is a required instrument
'''
self.detector_control = 'soft'
self.name = 'AllXY_dev_i'
# For an explanation of the difference between the different
# Fidelities look in the analysis script
self.value_names = ['Total_deviation', 'Avg deviation']
# Should only return one instead of two but for now just for
# convenience as I understand the scale of total deviation
self.value_units = ['', '']
self.measurement_name = measurement_name
self.MC = MC
self.CBox = CBox
self.AWG = AWG
self.IF = IF
self.RO_trigger_delay = RO_trigger_delay
self.RO_pulse_delay = RO_pulse_delay
self.pulse_delay = pulse_delay
self.RO_pulse_length = RO_pulse_length
self.LutMan = LutMan
self.reload_pulses = reload_pulses
def prepare(self, **kw):
self.i = 0
self.MC.set_sweep_function(awg_swf.CBox_AllXY(
IF=self.IF,
pulse_delay=self.pulse_delay,
RO_pulse_delay=self.RO_pulse_delay,
RO_trigger_delay=self.RO_trigger_delay,
RO_pulse_length=self.RO_pulse_length,
AWG=self.AWG, CBox=self.CBox))
self.MC.set_detector_function(
det.CBox_integrated_average_detector(self.CBox, self.AWG))
def acquire_data_point(self, *args, **kw):
if self.i > 0:
self.MC.sweep_functions[0].upload = False
self.i += 1
if self.reload_pulses:
self.LutMan.load_pulses_onto_AWG_lookuptable(0)
self.LutMan.load_pulses_onto_AWG_lookuptable(1)
self.LutMan.load_pulses_onto_AWG_lookuptable(2)
self.MC.run(name=self.measurement_name+'_'+str(self.i))
ana = ma.AllXY_Analysis(label=self.measurement_name)
tot_dev = ana.deviation_total
avg_dev = tot_dev/21
return tot_dev, avg_dev
class Qubit_Spectroscopy(det.Soft_Detector):
'''
Performs a set of measurements that finds f_resonator, f_qubit,
'''
def __init__(self,
qubit,
res_start=None,
res_stop=None,
res_freq_step=None,
res_use_min=False,
res_use_max=False,
use_FWHM=False,
res_t_int=None,
spec_start=None,
spec_stop=None,
spec_freq_step=0.0001,
spec_t_int=None,
use_offset=None,
spec_sweep_range=0.04,
fitting_model='hanger',
pulsed=False,
**kw):
# # import placed here to prevent circular import statement
# # as some cal_tools use composite detectors.
from pycqed.measurement import calibration_toolbox as cal_tools
imp.reload(cal_tools)
self.cal_tools = cal_tools
self.qubit = qubit
self.nested_MC_name = 'Qubit_Spectroscopy_MC'
self.cal_tools = cal_tools
self.use_FWHM = use_FWHM
# Instruments
self.HM = qt.instruments['HM']
self.Pulsed_Spec = qt.instruments['Pulsed_Spec']
self.RF_power = kw.pop('RF_power', qubit.get_RF_CW_power())
self.qubit_source = qt.instruments[qubit.get_qubit_source()]
self.qubit_source_power = kw.pop('qubit_source_power',
qubit.get_source_power())
self.Plotmon = qt.instruments['Plotmon']
self.res_start = res_start
self.res_stop = res_stop
self.res_freq_step = res_freq_step
self.res_use_min = res_use_min
self.res_use_max = res_use_max
self.spec_start = spec_start
self.spec_stop = spec_stop
self.spec_freq_step = spec_freq_step
self.spec_sweep_range = spec_sweep_range
if (res_t_int is not None) and (spec_t_int is not None):
self.res_t_int = res_t_int
self.spec_t_int = spec_t_int
self.alternate_t_int = True
else:
self.alternate_t_int = False
self.resonator_data = None
self.qubit_data = None
self.fitting_model = fitting_model
self.pulsed = pulsed
self.detector_control = 'soft'
self.name = 'Qubit_Spectroscopy_detector'
self.value_names = ['f_resonator', 'f_resonator_stderr',
'f_qubit', 'f_qubit_stderr']
self.value_units = ['GHz', 'GHz', 'GHz', 'GHz']
self.msmt_kw = {key.split('msmt_')[1]: val
for key, val in list(kw.items()) if 'msmt_' in key}
# Setting the constants
def prepare(self, **kw):
if self.qubit.get_RF_source() is not None:
self.HM.set_RF_source(self.qubit.get_RF_source())
self.HM.set_RF_power(self.RF_power)
self.qubit_source.set_power(self.qubit_source_power)
self.HM.init()
self.HM.set_sources('On')
self.nested_MC = qt.instruments.create(
self.nested_MC_name,
'MeasurementControl')
self.loopcnt = 0
def acquire_data_point(self, *args, **kw):
def plot_resonator_data(data):
if self.resonator_data is None:
self.resonator_data = data[0]
else:
self.resonator_data = np.vstack((self.resonator_data, data[0]))
self.Plotmon.plot3D(1, np.transpose(self.resonator_data*1.))
def plot_qubit_data(data):
if self.qubit_data is None:
self.qubit_data = data[0]
else:
self.qubit_data = np.vstack((self.qubit_data, data[0]))
self.Plotmon.plot3D(2, np.transpose(self.qubit_data*1.))
if self.alternate_t_int:
self.HM.set_t_int(self.res_t_int)
self.HM.init(optimize=True)
self.loopcnt += 1
self.HM.set_sources('On')
if self.res_start is None:
cur_f_RO = self.qubit.get_current_RO_frequency()
self.res_start = cur_f_RO-0.003
self.res_stop = cur_f_RO+0.003
print('Scanning for resonator starting at ' + str(self.res_start))
resonator_scan = \
self.qubit.find_resonator_frequency(
use_FWHM=self.use_FWHM,
MC_name=self.nested_MC_name,
f_start=self.res_start,
f_stop=self.res_stop,
f_step=self.res_freq_step,
suppress_print_statements=False,
fitting_model=self.fitting_model,
use_min=self.res_use_min,
use_max=self.res_use_max)
# plot_resonator_data(resonator_scan['data'])
# print 'BLUUUUURB'
f_resonator = resonator_scan['f_resonator']+0.00001
f_resonator_stderr = resonator_scan['f_resonator_stderr']
self.qubit.set_current_RO_frequency(f_resonator)
print('Finished resonator scan. Readout frequency: ', f_resonator)
if self.pulsed is True:
if self.qubit.get_RF_source() is not None:
self.Pulsed_Spec.set_RF_source(self.qubit.get_RF_source())
self.Pulsed_Spec.set_RF_power(self.qubit.get_RF_TD_power())
self.Pulsed_Spec.set_f_readout(
self.qubit.get_current_RO_frequency()*1e9)
else:
self.HM.set_RF_power(self.qubit.get_RF_CW_power())
self.HM.set_frequency(self.qubit.get_current_RO_frequency()*1e9)
if self.alternate_t_int:
self.HM.set_t_int(self.spec_t_int)
self.HM.init(optimize=True)
print('Scanning for qubit')
qubit_scan = self.qubit.find_frequency_spec(
MC_name=self.nested_MC_name,
f_step=self.spec_freq_step,
f_start=self.spec_start,
f_stop=self.spec_stop,
# update_qubit=False, # We do not want a failed track to update
suppress_print_statements=True,
source_power=self.qubit_source_power,
pulsed=self.pulsed)
# plot_qubit_data(qubit_scan['data'])
f_qubit = qubit_scan['f_qubit']
f_qubit_stderr = qubit_scan['f_qubit_stderr']
print('Estimated qubit frequency: ', f_qubit)
self.qubit.set_current_frequency(f_qubit)
self.HM.set_sources('Off')
return_vals = [f_resonator, f_resonator_stderr,
f_qubit, f_qubit_stderr]
return return_vals
def finish(self, **kw):
self.HM.set_sources('Off')
self.nested_MC.remove()
class Tracked_Qubit_Spectroscopy(det.Soft_Detector):
'''
Performs a set of measurements that finds f_resonator, f_qubit, and
tracks them.
Uses functions on the qubit object.
If the sweep points are handed it uses those in predicting frequencies,
if no sweep points are given it assumes linear spacing between sweep points.
'''
def __init__(self, qubit,
nested_MC,
qubit_initial_frequency=None,
qubit_span=0.08e9,
qubit_init_factor=5,
qubit_stepsize=0.0005e9,
resonator_initial_frequency=None,
resonator_span=0.010e9,
resonator_stepsize=0.0001e9,
resonator_use_min=False,
resonator_use_max=False,
No_of_fitpoints=10,
sweep_points=None,
fitting_model='hanger',
mode='pulsed_marked',
polycoeffs=None,
**kw):
self.nested_MC = nested_MC
self.qubit = qubit
if resonator_initial_frequency != None:
self.resonator_frequency = resonator_initial_frequency
else:
self.resonator_frequency = self.qubit.f_res()
self.resonator_span = resonator_span
self.resonator_stepsize = resonator_stepsize
if qubit_initial_frequency != None:
self.qubit_frequency = qubit_initial_frequency
else:
self.qubit_frequency = self.qubit.f_qubit()
self.qubit_span = qubit_span
self.qubit_init_factor = qubit_init_factor
self.qubit_stepsize = qubit_stepsize
self.No_of_fitpoints = No_of_fitpoints
self.mode = mode
self.resonator_use_min = resonator_use_min
self.resonator_use_max = resonator_use_max
self.sweep_points = sweep_points
self.polycoeffs = polycoeffs
# Instruments
self.fitting_model = fitting_model
# self.qubit_source = qubit.cw_source
# self.RF_power = kw.pop('RF_power', self.qubit.RO_power_cw())
# if pulsed:
# self.qubit_source_power = kw.pop('qubit_source_power',
# self.qubit.get_source_power())
# else:
self.detector_control = 'soft'
self.name = 'Qubit_Spectroscopy'
self.value_names = ['f_resonator', # 'f_resonator_stderr',
'f_qubit'] # , 'f_qubit_stderr']
self.value_units = ['Hz', 'Hz'] # , 'Hz', 'Hz']
def prepare(self, **kw):
# if self.pulsed is True:
# if self.qubit.get_RF_source() is not None:
# self.Pulsed_Spec.set_RF_source(self.qubit.get_RF_source())
# self.HM.set_RF_source(self.qubit.get_RF_source())
# self.Pulsed_Spec.set_RF_power(self.RF_power)
# self.HM.set_RF_power(self.RF_power)
# else:
# if self.qubit.get_RF_source() is not None:
# self.HM.set_RF_source(self.qubit.get_RF_source())
# print('Setting RF source of HM to'+self.qubit.get_RF_source())
# self.HM.set_RF_power(self.RF_power)
# self.qubit_source.set_power(self.qubit_source_power)
# self.AWG.start()
# self.HM.init()
# self.AWG.stop()
# self.HM.set_sources('On')
self.resonator_frequencies = np.zeros(len(self.sweep_points))
self.qubit_frequencies = np.zeros(len(self.sweep_points))
self.loopcnt = 0
print('\nStarting Tracked Spectroscopy')
def determine_frequencies(self, loopcnt):
if self.loopcnt == 0:
'''
Uses the inital frequencies to determine where to look
'''
f_resonator = self.resonator_frequency
if self.polycoeffs is None:
f_qubit = self.qubit_frequency
f_qubit_start = self.qubit_frequency - self.qubit_span/2
f_qubit_end = self.qubit_frequency + self.qubit_span/2
else:
qub_fit = np.poly1d(self.polycoeffs)
f_qubit = qub_fit(self.sweep_points[0])
f_qubit_start = f_qubit - self.qubit_span/2
f_qubit_end = f_qubit + self.qubit_span/2
elif self.loopcnt == 1:
'''
Expects the qubit at self.qubit_frequency.
'''
f_resonator = self.resonator_frequency
if self.polycoeffs is None:
f_qubit = self.qubit_frequency
f_qubit_start = self.qubit_frequency \
- self.qubit_span * self.qubit_init_factor/2
f_qubit_end = self.qubit_frequency\
+ self.qubit_span * self.qubit_init_factor/2
else:
qub_fit = np.poly1d(self.polycoeffs)
f_qubit = qub_fit(self.sweep_points[0])
f_qubit_start = f_qubit - self.qubit_span / 2
f_qubit_end = f_qubit + self.qubit_span / 2
elif self.loopcnt == 2:
'''
Predicts the qubit and resonator frequency for the third point
by using the last two points in linear extrapolation.
uses the np.polyfit and np.poly2d functions.
'''
res_fit_coeff = np.polyfit(
self.sweep_points[:self.loopcnt],
self.resonator_frequencies[:self.loopcnt], 1)
res_fit = np.poly1d(res_fit_coeff)
qub_fit_coeff = np.polyfit(
self.sweep_points[:self.loopcnt],
self.qubit_frequencies[:self.loopcnt], 1)
qub_fit = np.poly1d(qub_fit_coeff)
f_resonator = res_fit(self.sweep_points[loopcnt])
f_qubit = qub_fit(self.sweep_points[loopcnt])
f_qubit_start = f_qubit - self.qubit_span / 2
f_qubit_end = f_qubit + self.qubit_span / 2
else:
'''
After measuring 3 points quadratic extrapolation is used based
on all the previous measured points to predict the frequencies.
uses the np.polyfit and np.poly1d functions.
'''
res_fit_coeff = np.polyfit(
self.sweep_points[:self.loopcnt],
self.resonator_frequencies[:self.loopcnt], 2)
res_fit = np.poly1d(res_fit_coeff)
qub_fit_coeff = np.polyfit(
self.sweep_points[:self.loopcnt],
self.qubit_frequencies[:self.loopcnt], 2)
qub_fit = np.poly1d(qub_fit_coeff)
f_resonator = res_fit(self.sweep_points[loopcnt])
f_qubit = qub_fit(self.sweep_points[loopcnt])
f_qubit_start = f_qubit - self.qubit_span / 2
f_qubit_end = f_qubit + self.qubit_span / 2
f_resonator_start = f_resonator - self.resonator_span/2
f_resonator_end = f_resonator + self.resonator_span/2
print('\nExpected qubit frequency: %s' % f_qubit)
print('Expected resonator frequency: %s' % f_resonator)
return {'f_resonator_start': f_resonator_start,
'f_resonator_end': f_resonator_end,
'f_resonator': f_resonator,
'f_qubit_start': f_qubit_start,
'f_qubit_end': f_qubit_end,
'f_qubit': f_qubit}
def acquire_data_point(self, *args, **kw):
# self.HM.set_sources('On')
frequencies = self.determine_frequencies(self.loopcnt)
# Resonator
f_res_start, f_res_start_unit = plt_tools.SI_val_to_msg_str(
frequencies['f_resonator_start'], 'Hz', float)
f_res_end, f_res_end_unit = plt_tools.SI_val_to_msg_str(
frequencies['f_resonator_end'], 'Hz', float)
f_res_span, f_res_span_unit = plt_tools.SI_val_to_msg_str(
frequencies['f_resonator_end'] - frequencies['f_resonator_start'],
'Hz', float)
print('\nScanning for resonator. ' +
'Range: {:.3f} {} - {:.3f} {} (span of {:.1f} {})'
.format(f_res_start, f_res_start_unit,
f_res_end, f_res_end_unit,
f_res_span, f_res_span_unit)
)
# if self.alternate_t_int:
# self.HM.set_t_int(self.resonator_t_int)
# self.HM.init(optimize=True)
freqs_res = np.arange(frequencies['f_resonator_start'],
frequencies['f_resonator_end'],
self.resonator_stepsize)
f_resonator = self.qubit.find_resonator_frequency(
MC=self.nested_MC,
freqs=freqs_res, update=False,
use_min=self.resonator_use_min) # to correct for fit
# FIXME: remove the 1e9 after reloading the qubit object
# FIXME not returned in newest version
# self.resonator_frequency = f_resonator # in 2nd loop value is updated
# f_resonator_stderr = resonator_scan['f_resonator_stderr']
# Q_resonator = resonator_scan['quality_factor']
# Q_resonator_stderr = resonator_scan['quality_factor_stderr']
print('Finished resonator scan. Readout frequency: ', f_resonator)
# Qubit
f_qub_start, f_qub_start_unit = plt_tools.SI_val_to_msg_str(
frequencies['f_qubit_start'], 'Hz', float)
f_qub_end, f_qub_end_unit = plt_tools.SI_val_to_msg_str(
frequencies['f_qubit_end'], 'Hz', float)
f_qub_span, f_qub_span_unit = plt_tools.SI_val_to_msg_str(
frequencies['f_qubit_end'] - frequencies['f_qubit_start'],
'Hz', float)
print('\nScanning for qubit. ' +
'Range: {:.3f} {} - {:.3f} {} (span of {:.1f} {})'
.format(f_qub_start, f_qub_start_unit,
f_qub_end, f_qub_end_unit,
f_qub_span, f_qub_span_unit)
)
self.qubit.ro_freq(f_resonator)
freqs_qub = np.arange(frequencies['f_qubit_start'],
frequencies['f_qubit_end'],
self.qubit_stepsize)
self.qubit.measure_spectroscopy(
freqs=freqs_qub,
MC=self.nested_MC,
mode=self.mode)
a = ma.Qubit_Spectroscopy_Analysis(label=self.qubit.msmt_suffix)
# f_qubit, std_err_f_qubit = a.get_frequency_estimate()
f_qubit = a.fitted_freq
# f_qubit = qubit_scan['f_qubit']
# f_qubit_stderr = qubit_scan['f_qubit_stderr']
# qubit_linewidth = qubit_scan['qubit_linewidth']
# self.qubit_frequency = f_qubit
self.qubit_frequency = f_qubit
print('Measured Qubit frequency: ', f_qubit)
self.qubit.freq_qubit(f_qubit)
# self.resonator_linewidth = f_resonator / Q_resonator
# self.qubit_linewidth = qubit_linewidth
self.resonator_linewidth = 0.001
if self.loopcnt == 1:
self.resonator_span = max(min(5*self.resonator_linewidth, 0.005),
self.resonator_span)
# print('Resonator width = {linewidth}, span = {span}'.format(
# linewidth=self.resonator_linewidth, span=self.resonator_span))
# print('Qubit width = {}'.format(self.qubit_linewidth))
self.resonator_frequencies[self.loopcnt] = f_resonator
self.qubit_frequencies[self.loopcnt] = f_qubit
# self.HM.set_sources('Off')
self.loopcnt += 1
return_vals = [f_resonator, f_qubit]
# return_vals = [f_resonator, f_resonator_stderr,
# f_qubit, f_qubit_stderr]
return return_vals
def finish(self, **kw):
# self.HM.set_sources('Off')
pass
class FluxTrack(det.Soft_Detector):
'''
'''
def __init__(self, qubit, device, MC, AWG, cal_points=False, **kw):
self.detector_control = 'soft'
self.name = 'FluxTrack'
self.cal_points = cal_points
self.value_names = [r' +/- $F |1\rangle$',
r' + $F |1\rangle$', r' - $F |1\rangle$']
self.value_units = ['', '', '']
self.qubit = qubit
self.AWG = AWG
self.MC = MC
self.operations_dict = device.get_operation_dict()
self.dist_dict = qubit.dist_dict()
self.nested_MC = MC
self.FluxTrack_swf = awg_swf.awg_seq_swf(
fsqs.FluxTrack,
# parameter_name='Amplitude',
unit='V',
AWG=self.AWG,
fluxing_channels=[self.qubit.fluxing_channel()],
awg_seq_func_kwargs={'operation_dict': self.operations_dict,
'q0': self.qubit.name,
'cal_points': self.cal_points,
'distortion_dict': self.dist_dict,
'upload': True})
def prepare(self, **kw):
self.FluxTrack_swf.prepare()
self.FluxTrack_swf.upload = False
def acquire_data_point(self, *args, **kw):
# acquire with MC_nested
self.MC.set_sweep_function(self.FluxTrack_swf)
self.MC.set_sweep_points(np.arange(2+4*self.cal_points))
if self.cal_points:
d = self.qubit.int_avg_det_rot
else:
d = self.qubit.int_avg_det
self.MC.set_detector_function(d)
self.MC.run('FluxTrack_point_%s' % self.qubit.name)
ma_obj = ma.MeasurementAnalysis(auto=True, label='FluxTrack_point')
y_p = ma_obj.measured_values[0, 0]
y_m = ma_obj.measured_values[0, 1]
y_mean = np.mean([y_p, y_m])
return (y_mean, y_p, y_m)
class purity_CZ_detector(det.Soft_Detector):
def __init__(self, measurement_name: str, MC, device, q0, q1,
return_purity_only: bool=True):
self.measurement_name = measurement_name
self.MC = MC
self.name = 'purity_CZ_detector'
self.detector_control = 'soft'
self.device = device
self.q0 = q0
self.q1 = q1
self.return_purity_only = return_purity_only
if self.return_purity_only:
self.value_names = ['Purity sum', 'Purity {}'.format(q0.name),
'Purity {}'.format(q1.name)]
self.value_units = ['']*3
else:
self.value_names = ['Ps', 'P_{}'.format(q0.name),
'p_{}'.format(q1.name), 'IX', 'IY',
'IZ', 'XI', 'YI', 'ZI']
self.value_units = ['']*3 + ['frac']*6
def prepare(self):
self.i = 0
purity_CZ_seq = qwfs.purity_CZ_seq(self.q0.name, self.q1.name)
self.s = swf.QASM_Sweep_v2(qasm_fn=purity_CZ_seq.name,
config=self.device.qasm_config(),
CBox=self.device.central_controller.get_instr(),
verbosity_level=0,
parameter_name='Segment',
unit='#', disable_compile_and_upload=True)
self.d = self.device.get_correlation_detector()
# the sequence only get's compiled and uploaded in the prepare
self.s.compile_and_upload(self.s.qasm_fn, self.s.config)
def acquire_data_point(self, **kw):
self.MC.set_sweep_function(self.s)
self.MC.set_sweep_points(np.arange(3))
self.MC.set_detector_function(self.d)
dat = self.MC.run(name=self.measurement_name+'_'+str(self.i))
dset = dat["dset"]
q0_states = dset[:, 1]
q1_states = dset[:, 2]
# P_q0 = <sigma_x>^2 + <sigma_y>^2 + <sigma_z>^2
purity_q0 = (self.frac_to_pauli_exp(q0_states[0]) +
self.frac_to_pauli_exp(q0_states[1]) +
self.frac_to_pauli_exp(q0_states[2]))
purity_q1 = (self.frac_to_pauli_exp(q1_states[0]) +
self.frac_to_pauli_exp(q1_states[1]) +
self.frac_to_pauli_exp(q1_states[2]))
ps = purity_q0 + purity_q1
self.i += 1
if self.return_purity_only:
return ps, purity_q0, purity_q1
else:
return ps, purity_q0, purity_q1, q0_states, q1_states
def frac_to_pauli_exp(self, frac):
"""
converts a measured fraction to a pauli expectation value
<sigma_i>^2 = (2 * (frac - 0.5))**2
"""
sigma_i2 = (2*(frac - 0.5))**2
return sigma_i2
class purityN_CZ_detector(purity_CZ_detector):
def __init__(self, measurement_name: str, N: int,
MC, device, q0, q1,
return_purity_only: bool=True):
super().__init__(measurement_name=measurement_name, MC=MC,
device=device, q0=q0, q1=q1,
return_purity_only=return_purity_only)
self.N = N
def prepare(self):
self.i = 0
purity_CZ_seq = qwfs.purity_N_CZ_seq(self.q0.name, self.q1.name,
N=self.N)
QWG_flux_lutmans = [self.q0.flux_LutMan.get_instr(),
self.q1.flux_LutMan.get_instr()]
self.s = swf.QWG_flux_QASM_Sweep(
qasm_fn=purity_CZ_seq.name,
config=self.device.qasm_config(),
CBox=self.device.central_controller.get_instr(),
QWG_flux_lutmans=QWG_flux_lutmans,
parameter_name='Segment',
unit='#', disable_compile_and_upload=False,
verbosity_level=0)
self.d = self.device.get_correlation_detector()
def acquire_data_point(self, **kw):
self.MC.set_sweep_function(self.s)
self.MC.set_sweep_points(np.arange(3))
self.MC.set_detector_function(self.d)
dat = self.MC.run(name=self.measurement_name+'_'+str(self.i))
dset = dat["dset"]
q0_states = dset[:, 1]
q1_states = dset[:, 2]
# P_q0 = <sigma_x>^2 + <sigma_y>^2 + <sigma_z>^2
purity_q0 = (self.frac_to_pauli_exp(q0_states[0]) +
self.frac_to_pauli_exp(q0_states[1]) +
self.frac_to_pauli_exp(q0_states[2]))
purity_q1 = (self.frac_to_pauli_exp(q1_states[0]) +
self.frac_to_pauli_exp(q1_states[1]) +
self.frac_to_pauli_exp(q1_states[2]))
ps = purity_q0 + purity_q1
self.i += 1
# self.s.disable_compile_and_upload = True
if self.return_purity_only:
return ps, purity_q0, purity_q1
else:
return ps, purity_q0, purity_q1, q0_states, q1_states
def frac_to_pauli_exp(self, frac):
"""
converts a measured fraction to a pauli expectation value
<sigma_i>^2 = (2 * (frac - 0.5))**2
"""
sigma_i2 = (2*(frac - 0.5))**2
return sigma_i2
| 41.556258
| 139
| 0.57245
|
ac8ce46c988a6a9e0ee29122a9648e1094f70505
| 5,805
|
py
|
Python
|
sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/_microsoft_storage_sync.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/_microsoft_storage_sync.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/_microsoft_storage_sync.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import MicrosoftStorageSyncConfiguration
from .operations import Operations
from .operations import StorageSyncServicesOperations
from .operations import PrivateLinkResourcesOperations
from .operations import PrivateEndpointConnectionsOperations
from .operations import SyncGroupsOperations
from .operations import CloudEndpointsOperations
from .operations import ServerEndpointsOperations
from .operations import RegisteredServersOperations
from .operations import WorkflowsOperations
from .operations import OperationStatusOperations
from . import models
class MicrosoftStorageSync(object):
"""Microsoft Storage Sync Service API.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.storagesync.operations.Operations
:ivar storage_sync_services: StorageSyncServicesOperations operations
:vartype storage_sync_services: azure.mgmt.storagesync.operations.StorageSyncServicesOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.storagesync.operations.PrivateLinkResourcesOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.storagesync.operations.PrivateEndpointConnectionsOperations
:ivar sync_groups: SyncGroupsOperations operations
:vartype sync_groups: azure.mgmt.storagesync.operations.SyncGroupsOperations
:ivar cloud_endpoints: CloudEndpointsOperations operations
:vartype cloud_endpoints: azure.mgmt.storagesync.operations.CloudEndpointsOperations
:ivar server_endpoints: ServerEndpointsOperations operations
:vartype server_endpoints: azure.mgmt.storagesync.operations.ServerEndpointsOperations
:ivar registered_servers: RegisteredServersOperations operations
:vartype registered_servers: azure.mgmt.storagesync.operations.RegisteredServersOperations
:ivar workflows: WorkflowsOperations operations
:vartype workflows: azure.mgmt.storagesync.operations.WorkflowsOperations
:ivar operation_status: OperationStatusOperations operations
:vartype operation_status: azure.mgmt.storagesync.operations.OperationStatusOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = MicrosoftStorageSyncConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.storage_sync_services = StorageSyncServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.sync_groups = SyncGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.cloud_endpoints = CloudEndpointsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.server_endpoints = ServerEndpointsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.registered_servers = RegisteredServersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.workflows = WorkflowsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operation_status = OperationStatusOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> MicrosoftStorageSync
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 50.043103
| 129
| 0.744358
|
c5516dcfe4b04f359e05eca570c781e598d2d740
| 1,330
|
py
|
Python
|
test/functional/resendwallettransactions.py
|
BlenderSleuth/schleems
|
0dbaa35598e4a8352192615085da54b85192a205
|
[
"MIT"
] | null | null | null |
test/functional/resendwallettransactions.py
|
BlenderSleuth/schleems
|
0dbaa35598e4a8352192615085da54b85192a205
|
[
"MIT"
] | null | null | null |
test/functional/resendwallettransactions.py
|
BlenderSleuth/schleems
|
0dbaa35598e4a8352192615085da54b85192a205
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test resendwallettransactions RPC."""
from test_framework.test_framework import SchleemsTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class ResendWalletTransactionsTest(SchleemsTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['--walletbroadcast=false']]
def run_test(self):
# Should raise RPC_WALLET_ERROR (-4) if walletbroadcast is disabled.
assert_raises_rpc_error(-4, "Error: Wallet transaction broadcasting is disabled with -walletbroadcast", self.nodes[0].resendwallettransactions)
# Should return an empty array if there aren't unconfirmed wallet transactions.
self.stop_node(0)
self.start_node(0, extra_args=[])
assert_equal(self.nodes[0].resendwallettransactions(), [])
# Should return an array with the unconfirmed wallet transaction.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
assert_equal(self.nodes[0].resendwallettransactions(), [txid])
if __name__ == '__main__':
ResendWalletTransactionsTest().main()
| 44.333333
| 151
| 0.741353
|
8fa9c8336db9db9eea56d28906cfa2a6adafc30f
| 17,396
|
py
|
Python
|
src/welcome_system.py
|
UnableToCode/Welcome_system
|
1076fce6ab8ee135f34207077937d40b88c84449
|
[
"MIT"
] | null | null | null |
src/welcome_system.py
|
UnableToCode/Welcome_system
|
1076fce6ab8ee135f34207077937d40b88c84449
|
[
"MIT"
] | 6
|
2020-01-28T22:14:36.000Z
|
2022-02-09T23:33:25.000Z
|
src/welcome_system.py
|
UnableToCode/Welcome_system
|
1076fce6ab8ee135f34207077937d40b88c84449
|
[
"MIT"
] | 2
|
2019-09-06T00:57:57.000Z
|
2019-09-06T05:05:23.000Z
|
import datetime
import json
import os
import sys
import threading
import time
from multiprocessing import Process, Pipe
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from src.get_weather import get_weather
from src.video import face_regcon
log_file = "../log.txt"
out_mode = 0
def Log(*msg):
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if out_mode == 0:
print(now, ": ", *msg)
elif out_mode == 1:
with open(log_file, 'a+') as write_log:
logmsg = str(now) + ": "
for i in msg:
logmsg += str(i)
logmsg += '\n'
write_log.write(logmsg)
class Welcome_system(QMainWindow):
def __init__(self):
# noinspection PyArgumentList
super().__init__()
self.people = {}
self.event = {}
self.weather_info = {}
self.speech_info = {}
self.people_file = "../data/people.json"
self.event_file = "../data/events.json"
self.weather_file = "../data/cur_weather.json"
self.speech_file = "../data/speech_info.json"
self.father_weather, self.weather = Pipe()
self.father_face, self.face = Pipe()
self.run_time = datetime.timedelta()
self.now_time = datetime.datetime.now()
self.face_detect = False
# self.setWindowOpacity(0.9)
self.weather_l = Weather_win(self, 720, 540, 360, 360)
self.time_l = QLCDNumber(self)
self.people_l = QLabel(self)
self.event_l = QLabel(self)
self.speech_l = Speech_win(self, 720, 0, 360, 540)
self.pic_l = QLabel(self)
self.run_timer = threading.Timer(0, self.count_runtime)
self.run_timer.setDaemon(True)
self.face_timer = threading.Timer(0, self.face_thread)
self.face_timer.setDaemon(True)
self.speech_timer = threading.Timer(0, self.speech_thread)
self.speech_timer.setDaemon(True)
self.now_timer = threading.Timer(0, self.now_thread)
self.now_timer.setDaemon(True)
self.pic_timer = threading.Timer(300, self.pic_change)
self.pic_timer.setDaemon(True)
self.p_weather = Process(target=get_weather, args=(self.weather,))
self.p_face = Process(target=face_regcon, args=(self.face,))
self.init_config()
self.initUI()
def initUI(self):
# 设置窗口的位置和大小
self.setGeometry(100, 80, 1440, 900)
self.setFixedSize(1440, 900)
self.setWindowFlags(Qt.FramelessWindowHint)
# 设置窗口的背景图片,引用资源目录下的图片
palette = QPalette()
palette.setBrush(QPalette.Background, QBrush(QPixmap("../res/background.jpg")))
self.setPalette(palette)
# Quit btn
self.quit_btn = QPushButton(self)
self.quit_btn.setGeometry(1400, 0, 40, 40)
self.quit_btn.setStyleSheet("background-color:transparent; border-width:0;")
quit_icon = QIcon("../res/exit.png")
self.quit_btn.setIcon(quit_icon)
self.quit_btn.setIconSize(QSize(40, 40))
self.quit_btn.setText("")
self.quit_btn.clicked.connect(self.on_quit_click)
self.weather_l.set_info(self.weather_info)
self.weather_l.raise_()
self.weather_l.show()
self.pic_l.setGeometry(1110, 450, 300, 300)
self.pic_l.setScaledContents(1)
self.pic_l.setPixmap(QPixmap(QImage("../res/logo.png")))
self.pic_l.setStyleSheet("border-width:0px;")
# 每小时更新一次
next_hour = datetime.datetime.now() + datetime.timedelta(hours=1)
next_hour = next_hour.replace(minute=0, second=0, microsecond=0)
interval = (next_hour - datetime.datetime.now()).total_seconds()
self.hour_timer = threading.Timer(interval, self.renewal_cur_weather)
self.hour_timer.start()
self.people_l.setGeometry(0, 200, 1440, 300)
self.event_l.setGeometry(0, 200, 1440, 300)
self.time_l.setGeometry(1100, 140, 320, 80)
self.time_l.setDigitCount(19)
self.time_l.setMode(QLCDNumber.Dec)
self.time_l.setSegmentStyle(QLCDNumber.Flat)
self.time_l.setStyleSheet("border-style:outset; border-width:0px; color: solid black;")
self.time_l.display(self.now_time.strftime("%Y-%m-%d %H:%M:%S"))
self.speech_timer.start()
self.now_timer.start()
self.pic_timer.start()
# 显示窗口
self.show()
def init_config(self):
self.read_event_list()
self.read_people_list()
self.read_speech_lsit()
self.p_weather.start()
self.p_face.start()
self.renewal_today_weather(repeat=False)
# self.father_weather.send(1)
# if self.father_weather.recv() == -1:
# Log("renewal today weather failed!")
# exit(-1)
# else:
# Log("renewal today weather info success")
self.renewal_cur_weather(repeat=False)
# self.father_weather.send(2)
# if self.father_weather.recv() == -1:
# Log("renewal cur weather failed!")
# exit(-1)
# else:
# Log("renewal cur weather info success")
# 获取明天时间
next_day = datetime.datetime.now() + datetime.timedelta(days=1)
# 获取明天0点时间
next_day = next_day.replace(hour=0, minute=0, second=0, microsecond=0)
# 获取距离明天0点时间,单位为秒
timer_start_time = (next_day - datetime.datetime.now()).total_seconds()
# print(timer_start_time)
# 定时器,参数为(多少时间后执行,单位为秒,执行的方法)
self.day_timer = threading.Timer(timer_start_time, self.renewal_today_weather)
self.day_timer.start()
self.run_timer.start()
self.face_timer.start()
Log("init config success.")
def read_people_list(self):
with open(self.people_file, 'r') as load_f:
readin = json.load(load_f)
for i in readin["people"]:
# print(i)
self.people[i['id']] = Person(i)
load_f.close()
Log("read people list success.")
def read_event_list(self):
with open(self.event_file, 'r') as load_f:
self.event = json.load(load_f)
# print(self.event)
load_f.close()
Log("read events list success.")
def read_speech_lsit(self):
with open(self.speech_file, 'r', encoding='UTF-8') as load_f:
self.speech_info = json.load(load_f)
load_f.close()
Log("read speech list success.")
def stages(self):
# Log("stage1")
if self.face_detect is True:
# Log("stage1 to stage2")
face_queue = []
ret = self.father_face.recv()
# print("recv")
# print(ret)
if ret != -1:
face_queue = ret
for id in face_queue:
# print(id)
if id != -1:
Log("stage1 to stage2!")
self.people_l.setText(self.people[id].name)
self.people_l.setFont(QFont("黑体", 16))
self.people_l.setStyleSheet(
"border: 2px solid black; color: black; background: rgb(192, 192, 192, 50);")
self.people_l.raise_()
self.people_l.show()
people_timer = threading.Timer(3, self.people_l.hide)
people_timer.start()
event_id = self.check_event(id)
if event_id == '-1':
Log("No event,back to stage1")
continue
cur_event = self.event[event_id]
Log("stage2 to stage3")
Log(cur_event)
self.event_l.setText(cur_event)
self.event_l.setFont(QFont("黑体", 16))
self.event_l.setStyleSheet(
"border: 2px solid black; color: black; background: rgb(255, 241, 67, 50);")
self.event_l.raise_()
self.event_l.show()
event_timer = threading.Timer(2, self.event_l.hide)
event_timer.start()
Log("stage3 to stage1")
else:
Log("No check!")
self.face_detect = False
def check_event(self, person_id):
return self.people[str(person_id)].event
def renewal_today_weather(self, repeat=True):
self.father_weather.send(1)
if self.father_weather.recv() == -1:
Log("renewal today weather failed!")
exit(-1)
else:
Log("renewal today weather info success")
if repeat is True:
self.day_timer = threading.Timer(86400, self.renewal_today_weather)
self.day_timer.setDaemon(True)
self.day_timer.start()
def renewal_cur_weather(self, repeat=True):
self.father_weather.send(2)
if self.father_weather.recv() == -1:
Log("renewal cur weather failed!")
exit(-1)
else:
Log("renewal cur weather info success")
with open(self.weather_file, 'r') as load_f:
self.weather_info = json.load(load_f)
# print(self.event)
load_f.close()
Log("read weather info success.")
if repeat is True:
self.weather_l.set_info(self.weather_info)
self.weather_l.raise_()
self.weather_l.show()
self.hour_timer = threading.Timer(3600, self.renewal_cur_weather)
self.hour_timer.setDaemon(True)
self.hour_timer.start()
def count_runtime(self):
self.run_time += datetime.timedelta(seconds=1)
print("system has run for ", self.run_time)
self.run_timer = threading.Timer(1, self.count_runtime)
self.run_timer.setDaemon(True)
self.run_timer.start()
def face_thread(self):
if self.face_detect is False:
self.face_detect = True
self.stages()
self.face_timer = threading.Timer(4, self.face_thread)
self.face_timer.setDaemon(True)
self.face_timer.start()
def now_thread(self):
self.now_time = datetime.datetime.now()
# Log("Now time is ", self.now_time)
self.time_l.display(self.now_time.strftime("%Y-%m-%d %H:%M:%S"))
self.now_timer = threading.Timer(1, self.now_thread)
self.now_timer.setDaemon(True)
self.now_timer.start()
def speech_thread(self):
for i in self.speech_info['data']:
self.speech_l.set_info(i)
self.speech_l.raise_()
self.speech_l.show()
time.sleep(10)
self.speech_timer = threading.Timer(10, self.speech_thread)
self.speech_timer.setDaemon(True)
self.speech_timer.start()
def pic_change(self):
# TODO: finish picture change and delete print code(for test)
print("picture changed")
self.pic_timer = threading.Timer(15, self.pic_change)
self.pic_timer.setDaemon(True)
self.pic_timer.start()
def closeEvent(self, event):
"""
对MainWindow的函数closeEvent进行重构
退出软件时结束所有进程
:param event:
:return:
"""
reply = QMessageBox.question(self,
'Exit',
"Quit?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
self.p_face.terminate()
self.p_weather.terminate()
os._exit(0)
else:
event.ignore()
def on_quit_click(self):
reply = QMessageBox.question(self,
'Exit',
"Quit?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.Yes:
self.p_face.terminate()
self.p_weather.terminate()
os._exit(0)
class Person:
def __init__(self, person):
self.id = person["id"]
self.name = person["name"]
self.birth = person["birth"]
self.event = person["event"]
self.rank = person["rank"]
class Speech_win(QWidget):
def __init__(self, father, x, y, w, h):
super().__init__(parent=father)
self.setGeometry(x, y, w, h)
self.pic = QLabel(self)
self.pic.setGeometry(0.25 * w, 0.02 * h, 0.50 * w, 0.48 * h)
self.title = QLabel(self)
self.title.setGeometry(0.05 * w, 0.52 * h, 0.9 * w, 0.05 * h)
self.person = QLabel(self)
self.person.setGeometry(0.05 * w, 0.59 * h, 0.9 * w, 0.05 * h)
self.date = QLabel(self)
self.date.setGeometry(0.05 * w, 0.66 * h, 0.9 * w, 0.05 * h)
self.info = QLabel(self)
self.info.setGeometry(0.05 * w, 0.75 * h, 0.9 * w, 0.25 * h)
def set_info(self, info):
self.title.setText(info['title'])
self.title.setFont(QFont("黑体", 15))
self.title.setAlignment(Qt.AlignHCenter)
self.title.raise_()
self.title.show()
self.person.setText(info['person'])
self.person.setFont(QFont("黑体", 15))
self.person.setAlignment(Qt.AlignHCenter)
self.person.raise_()
self.person.show()
self.pic.setStyleSheet("border-width:0px")
self.date.setText(info['date'])
self.date.setFont(QFont("黑体", 15))
self.date.setAlignment(Qt.AlignHCenter)
self.date.raise_()
self.date.show()
self.info.setText(info['info'])
self.info.setFont(QFont("黑体", 14))
self.info.setStyleSheet("border-width:0px; color: black;")
self.info.setAlignment(Qt.AlignHCenter)
self.info.raise_()
self.info.show()
self.pic.setPixmap(QPixmap("../res/speech_image/" + info['image']))
self.pic.setScaledContents(True)
self.pic.raise_()
self.pic.show()
class Weather_win(QWidget):
def __init__(self, father, x, y, w, h):
super(Weather_win, self).__init__(parent=father)
self.setGeometry(x, y, w, h)
self.pic = QLabel(self)
self.pic.setGeometry(5, 0, w - 5, 130)
self.city = QTextBrowser(self)
self.city.setGeometry(5, 135, w - 5, 30)
self.wea = QTextBrowser(self)
self.wea.setGeometry(5, 170, w - 5, 30)
self.tem = QTextBrowser(self)
self.tem.setGeometry(5, 205, w - 5, 30)
self.wind = QTextBrowser(self)
self.wind.setGeometry(5, 240, w - 5, 30)
self.humidity = QTextBrowser(self)
self.humidity.setGeometry(5, 275, w - 5, 30)
self.air = QTextBrowser(self)
self.air.setGeometry(5, 310, w - 5, 30)
def set_info(self, info):
self.pic.setPixmap(QPixmap("../res/weather_image/" + info['data'][0]['hours'][0]['wea'] + ".png"))
self.pic.setAlignment(Qt.AlignTop | Qt.AlignHCenter)
self.pic.raise_()
self.pic.show()
self.city.setText(info['city'])
self.city.setFont(QFont("黑体", 12))
self.city.setStyleSheet("border-style:outset; border-width:0px; color: black; background-color: transparent")
self.city.setAlignment(Qt.AlignHCenter)
self.city.raise_()
self.city.show()
self.wea.setText(info['data'][0]['hours'][0]['wea'])
self.wea.setFont(QFont("黑体", 12))
self.wea.setStyleSheet("border-style:outset; border-width:0px; color: black; background-color: transparent")
self.wea.setAlignment(Qt.AlignHCenter)
self.wea.raise_()
self.wea.show()
self.tem.setText(
"气温:" + info['data'][0]['hours'][0]['tem'] + " " + info['data'][0]['tem1'] + "/" + info['data'][0][
'tem2'])
self.tem.setFont(QFont("黑体", 12))
self.tem.setStyleSheet("border-style:outset; border-width:0px; color: black; background-color: transparent")
self.tem.setAlignment(Qt.AlignHCenter)
self.tem.raise_()
self.tem.show()
self.wind.setText("风力:" + info['data'][0]['hours'][0]['win'] + " " + info['data'][0]['hours'][0]['win_speed'])
self.wind.setFont(QFont("黑体", 12))
self.wind.setStyleSheet("border-style:outset; border-width:0px; color: black; background-color: transparent")
self.wind.setAlignment(Qt.AlignHCenter)
self.wind.raise_()
self.wind.show()
self.humidity.setText("湿度:" + str(info['data'][0]['humidity']))
self.humidity.setFont(QFont("黑体", 12))
self.humidity.setStyleSheet("border-style:outset; border-width:0px; color: black; background-color: transparent")
self.humidity.setAlignment(Qt.AlignHCenter)
self.humidity.raise_()
self.humidity.show()
self.air.setText("空气指数:" + str(info['data'][0]['air']) + " " + info['data'][0]['air_level'])
self.air.setFont(QFont("黑体", 12))
self.air.setAlignment(Qt.AlignHCenter)
self.air.setStyleSheet("border-style:outset; border-width:0px; color: black; background-color: transparent")
self.air.raise_()
self.air.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
win = Welcome_system()
sys.exit(app.exec_())
| 35.574642
| 121
| 0.576167
|
2bfd937fc89f6440570eb57a63ec8609a9386135
| 1,414
|
py
|
Python
|
torchblocks/models/transformer/utils.py
|
lonePatient/TorchBlocks
|
4a65d746cc8a396cb7df73ed4644d97ddf843e29
|
[
"MIT"
] | 82
|
2020-06-23T05:51:08.000Z
|
2022-03-29T08:11:08.000Z
|
torchblocks/models/transformer/utils.py
|
Raiselimit/TorchBlocks
|
a5baecb9a2470ff175087475630f2b7db3f7ef51
|
[
"MIT"
] | null | null | null |
torchblocks/models/transformer/utils.py
|
Raiselimit/TorchBlocks
|
a5baecb9a2470ff175087475630f2b7db3f7ef51
|
[
"MIT"
] | 22
|
2020-06-23T05:51:10.000Z
|
2022-03-18T07:01:43.000Z
|
from torchblocks.models.transformer.modeling_bert_for_theseus import BertEncoder
class ConstantReplacementScheduler:
def __init__(self, bert_encoder: BertEncoder, replacing_rate, replacing_steps=None):
self.bert_encoder = bert_encoder
self.replacing_rate = replacing_rate
self.replacing_steps = replacing_steps
self.step_counter = 0
self.bert_encoder.set_replacing_rate(replacing_rate)
def step(self):
self.step_counter += 1
if self.replacing_steps is None or self.replacing_rate == 1.0:
return self.replacing_rate
else:
if self.step_counter >= self.replacing_steps:
self.bert_encoder.set_replacing_rate(1.0)
self.replacing_rate = 1.0
return self.replacing_rate
class LinearReplacementScheduler:
def __init__(self, bert_encoder: BertEncoder, base_replacing_rate, k):
self.bert_encoder = bert_encoder
self.base_replacing_rate = base_replacing_rate
self.step_counter = 0
self.k = k
self.bert_encoder.set_replacing_rate(base_replacing_rate)
def step(self):
self.step_counter += 1
current_replacing_rate = min(self.k * self.step_counter + self.base_replacing_rate, 1.0)
self.bert_encoder.set_replacing_rate(current_replacing_rate)
return current_replacing_rate
| 41.588235
| 97
| 0.690948
|
353b864aa599d574dd46b1034e931456f0df8389
| 6,708
|
py
|
Python
|
faersPreprocess.py
|
jl223vy/FAERS-data-toolkit
|
7927d28267ddc2733938b0823c4aff6b28018642
|
[
"MIT"
] | 8
|
2021-03-10T12:42:36.000Z
|
2021-07-30T20:46:11.000Z
|
faersPreprocess.py
|
backmind/FAERS-data-toolkit
|
7927d28267ddc2733938b0823c4aff6b28018642
|
[
"MIT"
] | null | null | null |
faersPreprocess.py
|
backmind/FAERS-data-toolkit
|
7927d28267ddc2733938b0823c4aff6b28018642
|
[
"MIT"
] | 8
|
2021-02-01T15:01:11.000Z
|
2022-01-15T00:29:26.000Z
|
# coding: utf-8
# author: Jing Li
# date: 2019/04/01
import os
import warnings
import pandas as pd
import numpy as np
# local directory to save files.
data_dir = "FAERSdata"
directoryPath = os.getcwd() + '/' + data_dir
# ignore warnings
warnings.filterwarnings('ignore')
def processDemo():
for filename in os.listdir(directoryPath):
if "DEMO" in filename.upper() and "TXT" in filename.upper():
print("Process " + filename)
demo_df = pd.read_csv(directoryPath + "/" + filename, low_memory=False, sep='$', error_bad_lines=False)
# keep primaryid, caseid, age, sex, wt
demo_df.drop(
['caseversion', 'i_f_code', 'lit_ref', 'event_dt', 'auth_num', 'fda_dt', 'age_grp', 'e_sub',
'rept_dt', 'to_mfr', 'reporter_country', 'mfr_dt', 'init_fda_dt', 'rept_cod', 'mfr_num',
'mfr_sndr', 'occp_cod', 'occr_country'], inplace=True, axis=1, errors='ignore')
# process sex
demo_df['sex'] = demo_df['sex'].fillna('UNK')
sex_map = {'M': "0", 'F': "1", 'UNK': "2"}
demo_df['sex'] = demo_df['sex'].map(sex_map)
# process age
demo_df = demo_df[pd.notnull(demo_df['age'])]
# unified age unit
demo_df = demo_df[demo_df.age_cod != 'dec'].reset_index(drop=True)
demo_df['age'] = demo_df['age'].apply(pd.to_numeric, errors='coerce')
demo_df['age'] = np.where(demo_df['age_cod'] == 'MON', demo_df['age'] * 1 / 12, demo_df['age']) # mounth
demo_df['age'] = np.where(demo_df['age_cod'] == 'WK', demo_df['age'] * 1 / 52, demo_df['age']) # week
demo_df['age'] = np.where(demo_df['age_cod'] == 'DY', demo_df['age'] * 1 / 365, demo_df['age']) # day
demo_df['age'] = np.where(demo_df['age_cod'] == 'HR', demo_df['age'] * 1 / 8760, demo_df['age']) # hour
demo_df = demo_df.drop(['age_cod'], axis=1)
# age discretization and label encode
# Newborn, Infant, Child Preschool, Child, Adolescent, Young Adult, Adult,Middle Aged, Aged, Aged+
age_bins = [0, 1, 2, 5, 12, 18, 24, 44, 64, 79, 123]
demo_df['age'] = pd.cut(demo_df.age, age_bins, labels=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
demo_df = demo_df.dropna(axis=0, subset=["age"]) # drop unreasonable age <0 or >123
# process weight(wt)
demo_df = demo_df[pd.notnull(demo_df['wt'])]
# unified weight unit
demo_df['wt'] = demo_df['wt'].apply(pd.to_numeric, errors='coerce')
demo_df['wt'] = np.where(demo_df['wt_cod'] == 'LBS', demo_df['wt'] * 0.453592, demo_df['wt']) # pounds
demo_df['wt'] = np.where(demo_df['wt_cod'] == 'GMS', demo_df['wt'] * 0.001, demo_df['wt']) # grams
demo_df = demo_df.drop(['wt_cod'], axis=1)
# weight discretization and label encode
wt_bins = [0, 5, 10, 40, 50, 60, 70, 80, 90, 100, 150, 200, 300]
demo_df['wt'] = pd.cut(demo_df.wt, wt_bins, labels=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
demo_df = demo_df.dropna(axis=0, subset=["wt"]) # drop unreasonable weight <0 or >300
# save file
demo_df.to_csv(directoryPath + "/" + filename[:-4] + '.csv', header=True, index=False)
def processDrug():
for filename in os.listdir(directoryPath):
if "DRUG" in filename.upper() and "TXT" in filename.upper():
print("Process " + filename)
drug_df = pd.read_csv(directoryPath + "/" + filename, low_memory=False, sep="$", error_bad_lines=False)
# keep primaryid, caseid, role_cod, drugname
drug_df.drop(
['drug_seq', 'val_vbm', 'dose_vbm', 'dose_form', 'dose_amt', 'dose_unit', 'cum_dose_chr', 'prod_ai',
'cum_dose_unit', 'dechal', 'rechal', 'lot_num', 'exp_dt', 'nda_num', 'route', 'dose_freq'],
inplace=True, axis=1, errors='ignore')
# process role_cod label encode
drug_df = drug_df[pd.notnull(drug_df['role_cod'])]
rolecod_map = {'PS': '0', 'SS': '1', 'C': '2', 'I': '3'}
drug_df['role_cod'] = drug_df['role_cod'].map(rolecod_map)
# process drugname
drug_df = drug_df[pd.notnull(drug_df['drugname'])]
drug_df['drugname'] = drug_df['drugname'].str.strip().str.lower() # to lowercase
drug_df = drug_df[~drug_df['drugname'].isin(['unknown'])] # drop unknown
drug_df['drugname'] = drug_df['drugname'].str.replace('\\', '/') # fix slashes
drug_df['drugname'] = drug_df['drugname'].map(
lambda x: x[:-1] if str(x).endswith(".") else x) # fix ending with period
# save file
drug_df.to_csv(directoryPath + "/" + filename[:-4] + '.csv', header=True, index=False)
def processReac():
for filename in os.listdir(directoryPath):
if "REAC" in filename.upper() and "TXT" in filename.upper():
print("Process " + filename)
reac_df = pd.read_csv(directoryPath + "/" + filename, low_memory=False, sep="$", error_bad_lines=False)
# keep primaryid, caseid, pt
reac_df.drop(['drug_rec_act'], inplace=True, axis=1, errors='ignore')
# process pt
reac_df = reac_df[pd.notnull(reac_df['pt'])]
reac_df['pt'] = reac_df['pt'].str.strip().str.lower() # to lowercase
reac_df = reac_df[~reac_df['pt'].isin(['unknown'])] # drop unknown
reac_df['pt'] = reac_df['pt'].map(
lambda x: x[:-1] if str(x).endswith(".") else x) # fix ending with period
# save file
reac_df.to_csv(directoryPath + "/" + filename[:-4] + '.csv', header=True, index=False)
def processOutc():
for filename in os.listdir(directoryPath):
if "OUTC" in filename.upper() and "TXT" in filename.upper():
print("Process " + filename)
outc_df = pd.read_csv(directoryPath + "/" + filename, low_memory=False, sep="$", error_bad_lines=False)
# process outc_cod
outc_df = outc_df[pd.notnull(outc_df['outc_cod'])]
outc_df = outc_df[outc_df['outc_cod'].isin(['DE', 'LT', 'HO', 'DS', 'CA', 'RI', 'OT'])]
outccod_map = {'DE': '0', 'LT': '1', 'HO': '2', 'DS': '3', 'CA': '4', 'RI': '5', 'OT': '6'}
outc_df['outc_cod'] = outc_df['outc_cod'].map(outccod_map)
# save file
outc_df.to_csv(directoryPath + "/" + filename[:-4] + '.csv', header=True, index=False)
def main():
processDemo()
processDrug()
processReac()
# processOutc()
if __name__ == '__main__':
main()
| 47.914286
| 117
| 0.561717
|
1db8fe9abaf1d64968b9e90f6ad4fe25ed6a0f43
| 3,521
|
py
|
Python
|
blueoil/common.py
|
oatawa1/blueoil
|
6a5f1cc1fb78c86423338f99cb9dbf506a76f3d6
|
[
"Apache-2.0"
] | 2
|
2020-02-06T08:59:50.000Z
|
2020-03-05T10:11:50.000Z
|
blueoil/common.py
|
oatawa1/blueoil
|
6a5f1cc1fb78c86423338f99cb9dbf506a76f3d6
|
[
"Apache-2.0"
] | null | null | null |
blueoil/common.py
|
oatawa1/blueoil
|
6a5f1cc1fb78c86423338f99cb9dbf506a76f3d6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import math
import numpy as np
from enum import Enum
from blueoil.turbo_color_map import TURBO_CMAP_DATA
class Tasks(Enum):
CLASSIFICATION = "IMAGE.CLASSIFICATION"
SEMANTIC_SEGMENTATION = "IMAGE.SEMANTIC_SEGMENTATION"
OBJECT_DETECTION = "IMAGE.OBJECT_DETECTION"
KEYPOINT_DETECTION = "IMAGE.KEYPOINT_DETECTION"
def get_color_map(length):
# Color Palette for General Purpose
# Sample image is here
# https://github.com/blue-oil/blueoil/tree/master/docs/_static/color_map.png
color_map_base = [
(192, 0, 128), # COLOR00
(0, 128, 192), # COLOR01
(0, 128, 64), # COLOR02
(128, 0, 0), # COLOR03
(64, 0, 128), # COLOR04
(64, 0, 192), # COLOR05
(192, 128, 64), # COLOR06
(192, 192, 128), # COLOR07
(64, 64, 128), # COLOR08
(128, 0, 192), # COLOR09
(192, 0, 64), # COLOR10
(128, 128, 64), # COLOR11
(192, 0, 192), # COLOR12
(128, 64, 64), # COLOR13
(64, 192, 128), # COLOR14
(64, 64, 0), # COLOR15
(128, 64, 128), # COLOR16
(128, 128, 192), # COLOR17
(0, 0, 192), # COLOR18
(192, 128, 128), # COLOR19
]
# This function generate arbitrary length color map.
color_map = color_map_base * int(math.ceil(length / len(color_map_base)))
return color_map[:length]
# For replacing the Matplotlib Jet colormap, we use the Turbo color map
# https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html
# The colormap allows for a large number of quantization levels:
# https://tinyurl.com/ybm3kpql
# Referred from the following gist:
# https://gist.github.com/mikhailov-work/ee72ba4191942acecc03fe6da94fc73f
# Copyright 2019 Google LLC.
# SPDX-License-Identifier: Apache-2.0
# Changes:
# 1. Vectorized the implementation using numpy
# 2. Use numpy.modf to get integer and float parts
# 3. Provided an example in comments
def apply_color_map(image):
turbo_cmap_data = np.asarray(TURBO_CMAP_DATA)
x = np.asarray(image)
x = x.clip(0., 1.)
# Use numpy.modf to get the integer and decimal parts of feature values
# in the input feature map (or heatmap) that has to be colored.
# Example:
# >>> import numpy as np
# >>> x = np.array([1.2, 2.3, 4.5, 20.45, 6.75, 8.88])
# >>> f, i = np.modf(x) # returns a tuple of length 2
# >>> print(i.shape, f.shape)
# (6,) (6,)
# >>> print(i)
# array([ 1. 2. 4. 20. 6. 8.])
# >>> print(f)
# array([0.2 0.3 0.5 0.45 0.75 0.88])
f, a = np.modf(x * 255.0)
a = a.astype(int)
b = (a + 1).clip(max=255)
image_colored = (
turbo_cmap_data[a]
+ (turbo_cmap_data[b] - turbo_cmap_data[a]) * f[..., None]
)
return image_colored
| 34.519608
| 80
| 0.618006
|
5f26470f84de1b3a1832bba8536aa7cfb4b157f1
| 2,085
|
py
|
Python
|
tests/test_geometry.py
|
blrm/vsketch
|
38bd24e121edb6e654e66c51f0a45005c3e5627e
|
[
"MIT"
] | null | null | null |
tests/test_geometry.py
|
blrm/vsketch
|
38bd24e121edb6e654e66c51f0a45005c3e5627e
|
[
"MIT"
] | null | null | null |
tests/test_geometry.py
|
blrm/vsketch
|
38bd24e121edb6e654e66c51f0a45005c3e5627e
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
from shapely.geometry import (
LinearRing,
LineString,
MultiLineString,
MultiPoint,
MultiPolygon,
Point,
Polygon,
)
from tests.utils import line_count_equal, line_exists
@pytest.mark.parametrize(
["data", "expected"],
[
[LinearRing([(0, 0), (1, 0), (0, 2)]), [[0, 1, 2j, 0]]],
[LineString([(1, 1), (0, 3), (10, 0), (4, 3)]), [[1 + 1j, 3j, 10, 4 + 3j]]],
[
MultiLineString([[(0, 0), (0, 1), (3, 4)], [(1, 0), (4, 3), (1, 2)]]),
[[0, 1j, 3 + 4j], [1, 4 + 3j, 1 + 2j]],
],
[Polygon([(0, 0), (3, 0), (1, 2)]), [[0, 3, 1 + 2j, 0]]],
[
Polygon(
[(0, 0), (30, 0), (30, 30), (0, 30)],
holes=[[(1, 1), (3, 3), (2, 3)], [(10, 10), (12, 10), (10, 12)]],
),
[
[0, 30, 30 + 30j, 30j, 0],
[1 + 1j, 3 + 3j, 2 + 3j, 1 + 1j],
[10 + 10j, 12 + 10j, 10 + 12j, 10 + 10j],
],
],
[
MultiPolygon(
[
(
[(0, 0), (30, 0), (30, 30), (0, 30)],
[[(1, 1), (3, 3), (2, 3)], [(10, 10), (12, 10), (10, 12)]],
),
([(0, 0), (3, 0), (1, 2)], []),
]
),
[
[0, 30, 30 + 30j, 30j, 0],
[1 + 1j, 3 + 3j, 2 + 3j, 1 + 1j],
[10 + 10j, 12 + 10j, 10 + 12j, 10 + 10j],
[0, 3, 1 + 2j, 0],
],
],
],
)
def test_geometry_single_path(vsk, data, expected):
vsk.geometry(data)
assert line_count_equal(vsk, len(expected))
for line in expected:
assert line_exists(vsk, np.array(line, dtype=complex))
def test_geometry_wrong_arg(vsk):
with pytest.raises(ValueError):
vsk.geometry(np.arange(10))
with pytest.raises(ValueError):
vsk.geometry(Point([10, 12]))
with pytest.raises(ValueError):
vsk.geometry(MultiPoint([(3, 2), (3, 4)]))
| 28.958333
| 84
| 0.393765
|
74ce614d5cfd4cc722721b28a4ebb20e46ed69ae
| 2,175
|
py
|
Python
|
shards/utils/colors.py
|
ReigenAraka/milvus
|
b2f19ace0e1dcd431a512141f42b748581d4b92d
|
[
"Apache-2.0"
] | 4
|
2020-07-29T02:59:53.000Z
|
2021-11-16T11:07:51.000Z
|
shards/utils/colors.py
|
liangwlw/milvus
|
7e7f626b9c7288c1c82f5dafed87d33897f4b64e
|
[
"Apache-2.0"
] | 2
|
2020-08-20T07:17:50.000Z
|
2020-08-21T04:21:34.000Z
|
shards/utils/colors.py
|
liangwlw/milvus
|
7e7f626b9c7288c1c82f5dafed87d33897f4b64e
|
[
"Apache-2.0"
] | 2
|
2020-03-02T05:16:57.000Z
|
2020-03-04T06:05:55.000Z
|
# Reset
Color_Off = '\033[0m' # Text Reset
# Regular Colors
Black = '\033[0;30m' # Black
Red = '\033[0;31m' # Red
Green = '\033[0;32m' # Green
Yellow = '\033[0;33m' # Yellow
Blue = '\033[0;34m' # Blue
Purple = '\033[0;35m' # Purple
Cyan = '\033[0;36m' # Cyan
White = '\033[0;37m' # White
# Bold
BBlack = '\033[1;30m' # Black
BRed = '\033[1;31m' # Red
BGreen = '\033[1;32m' # Green
BYellow = '\033[1;33m' # Yellow
BBlue = '\033[1;34m' # Blue
BPurple = '\033[1;35m' # Purple
BCyan = '\033[1;36m' # Cyan
BWhite = '\033[1;37m' # White
# Underline
UBlack = '\033[4;30m' # Black
URed = '\033[4;31m' # Red
UGreen = '\033[4;32m' # Green
UYellow = '\033[4;33m' # Yellow
UBlue = '\033[4;34m' # Blue
UPurple = '\033[4;35m' # Purple
UCyan = '\033[4;36m' # Cyan
UWhite = '\033[4;37m' # White
# Background
On_Black = '\033[40m' # Black
On_Red = '\033[41m' # Red
On_Green = '\033[42m' # Green
On_Yellow = '\033[43m' # Yellow
On_Blue = '\033[44m' # Blue
On_Purple = '\033[45m' # Purple
On_Cyan = '\033[46m' # Cyan
On_White = '\033[47m' # White
# High Intensity
IBlack = '\033[0;90m' # Black
IRed = '\033[0;91m' # Red
IGreen = '\033[0;92m' # Green
IYellow = '\033[0;93m' # Yellow
IBlue = '\033[0;94m' # Blue
IPurple = '\033[0;95m' # Purple
ICyan = '\033[0;96m' # Cyan
IWhite = '\033[0;97m' # White
# Bold High Intensity
BIBlack = '\033[1;90m' # Black
BIRed = '\033[1;91m' # Red
BIGreen = '\033[1;92m' # Green
BIYellow = '\033[1;93m' # Yellow
BIBlue = '\033[1;94m' # Blue
BIPurple = '\033[1;95m' # Purple
BICyan = '\033[1;96m' # Cyan
BIWhite = '\033[1;97m' # White
# High Intensity backgrounds
On_IBlack = '\033[0;100m' # Black
On_IRed = '\033[0;101m' # Red
On_IGreen = '\033[0;102m' # Green
On_IYellow = '\033[0;103m' # Yellow
On_IBlue = '\033[0;104m' # Blue
On_IPurple = '\033[0;105m' # Purple
On_ICyan = '\033[0;106m' # Cyan
On_IWhite = '\033[0;107m' # White
| 29.794521
| 40
| 0.523218
|
9146da596886640da27bf6b979a7d3a99cde7064
| 3,297
|
py
|
Python
|
accounts/dashboards/migrations/0001_initial.py
|
HaeckelK/bookkeeping
|
6f8b62f1322fe1c409f397222653382d302d9754
|
[
"MIT"
] | null | null | null |
accounts/dashboards/migrations/0001_initial.py
|
HaeckelK/bookkeeping
|
6f8b62f1322fe1c409f397222653382d302d9754
|
[
"MIT"
] | 7
|
2021-06-30T12:05:47.000Z
|
2021-07-14T07:50:27.000Z
|
accounts/dashboards/migrations/0001_initial.py
|
HaeckelK/bookkeeping
|
6f8b62f1322fe1c409f397222653382d302d9754
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-03 17:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="GLTransactionLine",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
],
),
migrations.CreateModel(
name="NominalAccount",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("name", models.CharField(max_length=100)),
("expected_sign", models.CharField(choices=[("dr", "debit"), ("cr", "credit")], max_length=2)),
("is_control_account", models.BooleanField()),
("is_bank_account", models.BooleanField()),
],
),
migrations.CreateModel(
name="PeriodBalance",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("period", models.IntegerField()),
("amount", models.IntegerField()),
("amount_cumulative", models.IntegerField()),
("count_transactions", models.IntegerField()),
(
"nominal",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="dashboards.nominalaccount"),
),
],
),
migrations.CreateModel(
name="NominalTransactions",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("transaction_id", models.IntegerField(unique=True)),
("journal_id", models.IntegerField()),
("date_transaction", models.DateField()),
("period", models.IntegerField()),
("amount", models.IntegerField()),
("description", models.CharField(max_length=500)),
(
"nominal",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="dashboards.nominalaccount"),
),
],
),
migrations.CreateModel(
name="JournalLine",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("amount", models.IntegerField()),
(
"nominal",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="dashboards.nominalaccount"),
),
],
),
migrations.CreateModel(
name="Journal",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("description", models.CharField(max_length=250)),
("period", models.IntegerField()),
("lines", models.ManyToManyField(to="dashboards.JournalLine")),
],
),
]
| 40.703704
| 117
| 0.536548
|
84985c8b56f1ab9e33dc7b9debc7a9f9bc55ccab
| 1,948
|
py
|
Python
|
2020/D11/D11Q1.py
|
buchasia/advent-of-code
|
f568c6330c8934325913705b39ef8c25a1023057
|
[
"MIT"
] | null | null | null |
2020/D11/D11Q1.py
|
buchasia/advent-of-code
|
f568c6330c8934325913705b39ef8c25a1023057
|
[
"MIT"
] | null | null | null |
2020/D11/D11Q1.py
|
buchasia/advent-of-code
|
f568c6330c8934325913705b39ef8c25a1023057
|
[
"MIT"
] | null | null | null |
def solveQuestion(inputPath):
fileP = open(inputPath, 'r')
fileLines = fileP.readlines()
fileP.close()
newPosition = []
numRows = len(fileLines)
numCols = len(fileLines[0].strip('\n'))
for line in fileLines:
currentLine = []
for grid in line.strip('\n'):
currentLine.append(grid)
newPosition.append(currentLine)
lastPosition = []
counter = 0
while lastPosition != newPosition:
lastPosition = list(newPosition)
newPosition =[]
counter += 1
for row in range(numRows):
currentLine = []
for col in range(numCols):
if lastPosition[row][col] == '.':
currentLine.append('.')
continue
numNeighbors = getNeighbors(lastPosition, row, col, numRows, numCols)
if numNeighbors == 0 and lastPosition[row][col] == 'L':
currentLine.append('#')
elif numNeighbors >= 4 and lastPosition[row][col] == '#':
currentLine.append('L')
else:
currentLine.append(lastPosition[row][col])
newPosition.append(currentLine)
totalOccupied = 0
for position in newPosition:
totalOccupied += ''.join(position).count('#')
return totalOccupied
def getNeighbors(lastPosition, row, col, maxRow, maxCol):
indexMove = [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]
counter = 0
for index in indexMove:
rowActual = row + index[0]
colActual = col + index[1]
if rowActual < 0 or colActual < 0:
continue
if rowActual == maxRow:
continue
if colActual == maxCol:
continue
if lastPosition[rowActual][colActual] == '#':
counter += 1
return counter
print(solveQuestion('InputD11Q1.txt'))
| 29.074627
| 86
| 0.533368
|
baf19f92c808ca6ce34fa48cee0308de4c24b75b
| 935
|
py
|
Python
|
6/test.py
|
cwaffles/CMPT225Labs
|
c4c6c2ff90e99ec3a5938a63f48c41dab4a8190b
|
[
"MIT"
] | 1
|
2016-06-04T07:39:21.000Z
|
2016-06-04T07:39:21.000Z
|
6/test.py
|
cwaffles/CMPT225Labs
|
c4c6c2ff90e99ec3a5938a63f48c41dab4a8190b
|
[
"MIT"
] | null | null | null |
6/test.py
|
cwaffles/CMPT225Labs
|
c4c6c2ff90e99ec3a5938a63f48c41dab4a8190b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import subprocess
import sys,os
passed = 0
n_tests = 6;
# Unbuffered stdout.
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
# Run a for loop of tests.
sys.stdout.write("Running merge_sort on battery of " + str(n_tests) + " tests.\n");
for i in range(1,n_tests+1):
sys.stdout.write(" Running test " + str(i) + "...");
# Run the merge_sort with this test input, redirect output to i.out.
subprocess.call("./merge_sort" + " < " + str(i) + ".in" + " > " + str(i) + ".out",shell=True);
# Compare output to the ground truth.
rt = subprocess.call("diff -b " + str(i) + ".out " + str(i) + ".er > /dev/null",shell=True);
subprocess.call("rm " + str(i) + ".out",shell=True)
if rt == 0:
passed += 1
sys.stdout.write( "passed\n");
else:
sys.stdout.write( "failed\n");
sys.stdout.write("Passed " + str(passed) + " of " + str(n_tests) + " tests.\n")
| 27.5
| 98
| 0.585027
|
6bc9584418fad424e19f5bea7cc746141561a163
| 22,863
|
py
|
Python
|
openapi_client/api/sources_api.py
|
chargio/using-koku-api-test
|
2f41fd83ab730705352b116b7a6e05ae3d9a8ebd
|
[
"MIT"
] | 1
|
2020-03-18T11:32:09.000Z
|
2020-03-18T11:32:09.000Z
|
openapi_client/api/sources_api.py
|
chargio/using-koku-api-test
|
2f41fd83ab730705352b116b7a6e05ae3d9a8ebd
|
[
"MIT"
] | null | null | null |
openapi_client/api/sources_api.py
|
chargio/using-koku-api-test
|
2f41fd83ab730705352b116b7a6e05ae3d9a8ebd
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Cost Management
The API for Project Koku and OpenShift cost management. You can find out more about Project Koku at [https://github.com/project-koku/](https://github.com/project-koku/). # noqa: E501
The version of the OpenAPI document: 1.0.2
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class SourcesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_source(self, source_id, **kwargs): # noqa: E501
"""Get a source # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_source(source_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int source_id: ID of source to get (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SourceOut
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_source_with_http_info(source_id, **kwargs) # noqa: E501
def get_source_with_http_info(self, source_id, **kwargs): # noqa: E501
"""Get a source # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_source_with_http_info(source_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int source_id: ID of source to get (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SourceOut, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'source_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_source" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'source_id' is set
if self.api_client.client_side_validation and ('source_id' not in local_var_params or # noqa: E501
local_var_params['source_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `source_id` when calling `get_source`") # noqa: E501
collection_formats = {}
path_params = {}
if 'source_id' in local_var_params:
path_params['source_id'] = local_var_params['source_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basic_auth'] # noqa: E501
return self.api_client.call_api(
'/sources/{source_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SourceOut', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_source_stats(self, source_id, **kwargs): # noqa: E501
"""Get a source statistics # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_source_stats(source_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int source_id: ID of source to get (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_source_stats_with_http_info(source_id, **kwargs) # noqa: E501
def get_source_stats_with_http_info(self, source_id, **kwargs): # noqa: E501
"""Get a source statistics # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_source_stats_with_http_info(source_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int source_id: ID of source to get (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'source_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_source_stats" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'source_id' is set
if self.api_client.client_side_validation and ('source_id' not in local_var_params or # noqa: E501
local_var_params['source_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `source_id` when calling `get_source_stats`") # noqa: E501
collection_formats = {}
path_params = {}
if 'source_id' in local_var_params:
path_params['source_id'] = local_var_params['source_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basic_auth'] # noqa: E501
return self.api_client.call_api(
'/sources/{source_id}/stats/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_sources(self, **kwargs): # noqa: E501
"""List the sources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_sources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str type: The type of source to filter for.
:param str name: The name of the source to filter for.
:param int offset: Parameter for selecting the offset of data.
:param int limit: Parameter for selecting the amount of data in a returned.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SourcePagination
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_sources_with_http_info(**kwargs) # noqa: E501
def list_sources_with_http_info(self, **kwargs): # noqa: E501
"""List the sources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_sources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str type: The type of source to filter for.
:param str name: The name of the source to filter for.
:param int offset: Parameter for selecting the offset of data.
:param int limit: Parameter for selecting the amount of data in a returned.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SourcePagination, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'type',
'name',
'offset',
'limit'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_sources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and 'offset' in local_var_params and local_var_params['offset'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `offset` when calling `list_sources`, must be a value greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 1000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_sources`, must be a value less than or equal to `1000`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_sources`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'type' in local_var_params and local_var_params['type'] is not None: # noqa: E501
query_params.append(('type', local_var_params['type'])) # noqa: E501
if 'name' in local_var_params and local_var_params['name'] is not None: # noqa: E501
query_params.append(('name', local_var_params['name'])) # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basic_auth'] # noqa: E501
return self.api_client.call_api(
'/sources/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SourcePagination', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_source(self, source_id, source_in, **kwargs): # noqa: E501
"""Update a source # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_source(source_id, source_in, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int source_id: ID of source to update (required)
:param SourceIn source_in: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SourceOut
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_source_with_http_info(source_id, source_in, **kwargs) # noqa: E501
def update_source_with_http_info(self, source_id, source_in, **kwargs): # noqa: E501
"""Update a source # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_source_with_http_info(source_id, source_in, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int source_id: ID of source to update (required)
:param SourceIn source_in: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SourceOut, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'source_id',
'source_in'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_source" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'source_id' is set
if self.api_client.client_side_validation and ('source_id' not in local_var_params or # noqa: E501
local_var_params['source_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `source_id` when calling `update_source`") # noqa: E501
# verify the required parameter 'source_in' is set
if self.api_client.client_side_validation and ('source_in' not in local_var_params or # noqa: E501
local_var_params['source_in'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `source_in` when calling `update_source`") # noqa: E501
collection_formats = {}
path_params = {}
if 'source_id' in local_var_params:
path_params['source_id'] = local_var_params['source_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'source_in' in local_var_params:
body_params = local_var_params['source_in']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basic_auth'] # noqa: E501
return self.api_client.call_api(
'/sources/{source_id}/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SourceOut', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 44.30814
| 187
| 0.592529
|
2b4182928cc38346886d8eb7cc25264888bad6e7
| 1,204
|
py
|
Python
|
vote/app.py
|
eluminare/example-voting-app
|
475d79b12b8f8a6d3621ddcdd19ed87eb2159319
|
[
"Apache-2.0"
] | null | null | null |
vote/app.py
|
eluminare/example-voting-app
|
475d79b12b8f8a6d3621ddcdd19ed87eb2159319
|
[
"Apache-2.0"
] | null | null | null |
vote/app.py
|
eluminare/example-voting-app
|
475d79b12b8f8a6d3621ddcdd19ed87eb2159319
|
[
"Apache-2.0"
] | 1
|
2020-03-19T13:53:58.000Z
|
2020-03-19T13:53:58.000Z
|
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request, make_response, g
from redis import Redis
import os
import socket
import random
import json
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
option_a = os.getenv('OPTION_A', 'Zurück')
option_b = os.getenv('OPTION_B', 'Vor')
hostname = socket.gethostname()
app = Flask(__name__)
def get_redis():
if not hasattr(g, 'redis'):
g.redis = Redis(host="redis", db=0, socket_timeout=5)
return g.redis
@app.route("/", methods=['POST','GET'])
def hello():
voter_id = request.cookies.get('voter_id')
if not voter_id:
voter_id = hex(random.getrandbits(64))[2:-1]
vote = None
if request.method == 'POST':
redis = get_redis()
vote = request.form['vote']
data = json.dumps({'voter_id': voter_id, 'vote': vote})
redis.rpush('votes', data)
resp = make_response(render_template(
'index.html',
option_a=option_a,
option_b=option_b,
hostname=hostname,
vote=vote,
))
resp.set_cookie('voter_id', voter_id)
return resp
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
| 23.607843
| 67
| 0.638704
|
ae2569cb56a76c23f80f25715e70f398e1f4bd8a
| 766
|
py
|
Python
|
datasets/dataset_prepare/prepare_VisDrone.py
|
pohjao/IIM
|
c721c952c5bc99af4bc8019f67afc613113e6ac6
|
[
"MIT"
] | null | null | null |
datasets/dataset_prepare/prepare_VisDrone.py
|
pohjao/IIM
|
c721c952c5bc99af4bc8019f67afc613113e6ac6
|
[
"MIT"
] | null | null | null |
datasets/dataset_prepare/prepare_VisDrone.py
|
pohjao/IIM
|
c721c952c5bc99af4bc8019f67afc613113e6ac6
|
[
"MIT"
] | null | null | null |
from PIL import Image
import os
import cv2 as cv
import matplotlib.pyplot as plt
from pylab import plot
import numpy as np
import json
import math
import torch
from functions import euclidean_dist, generate_cycle_mask, average_del_min
mode = 'train'
import glob
img_path = '/home/ubuntu/src/IIM/datasets/ProcessedData/VisDrone/images/'
json_path = '/home/ubuntu/src/IIM/datasets/ProcessedData/VisDrone/json'
mask_path = '/home/ubuntu/src/IIM/datasets/ProcessedData/VisDrone/mask_50_60'
cycle = False
def calc_mean_std():
count = 0
mean = 0
delta = 0
delta2 = 0
M2 = 0
for filename in glob.glob(os.path.join(img_path, '*.jpg')): #assuming gif
img = np.asarray(Image.open(filename))
if __name__ == '__main__':
calc_mean_std()
| 24.709677
| 77
| 0.736292
|
14958c099525d43f7e81fa9cc0cbaab372a54a7e
| 2,919
|
py
|
Python
|
paasta_tools/cli/cmds/autoscale.py
|
jackchi/paasta
|
0899adcef43cb07c247a36f5af82f09bb6f8db12
|
[
"Apache-2.0"
] | 1
|
2019-05-07T12:01:48.000Z
|
2019-05-07T12:01:48.000Z
|
paasta_tools/cli/cmds/autoscale.py
|
jackchi/paasta
|
0899adcef43cb07c247a36f5af82f09bb6f8db12
|
[
"Apache-2.0"
] | 4
|
2021-02-08T21:00:33.000Z
|
2021-06-02T03:29:31.000Z
|
paasta_tools/cli/cmds/autoscale.py
|
jackchi/paasta
|
0899adcef43cb07c247a36f5af82f09bb6f8db12
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from paasta_tools.api import client
from paasta_tools.cli.utils import figure_out_service_name
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import list_instances
from paasta_tools.cli.utils import list_services
from paasta_tools.utils import list_clusters
from paasta_tools.utils import paasta_print
log = logging.getLogger(__name__)
def add_subparser(subparsers):
autoscale_parser = subparsers.add_parser(
'autoscale',
help="Manually scale a service up and down manually, bypassing the normal autoscaler",
)
autoscale_parser.add_argument(
'-s', '--service',
help="Service that you want to stop. Like 'example_service'.",
).completer = lazy_choices_completer(list_services)
autoscale_parser.add_argument(
'-i', '--instance',
help="Instance of the service that you want to stop. Like 'main' or 'canary'.",
required=True,
).completer = lazy_choices_completer(list_instances)
autoscale_parser.add_argument(
'-c', '--cluster',
help="The PaaSTA cluster that has the service instance you want to stop. Like 'norcal-prod'.",
required=True,
).completer = lazy_choices_completer(list_clusters)
autoscale_parser.add_argument(
'--set',
help="Set the number to scale to. Must be an Int.",
type=int,
)
autoscale_parser.set_defaults(command=paasta_autoscale)
def paasta_autoscale(args):
log.setLevel(logging.DEBUG)
service = figure_out_service_name(args)
api = client.get_paasta_api_client(cluster=args.cluster, http_res=True)
if not api:
paasta_print('Could not connect to paasta api. Maybe you misspelled the cluster?')
return 1
if args.set is None:
log.debug("Getting the current autoscaler count...")
res, http = api.autoscaler.get_autoscaler_count(service=service, instance=args.instance).result()
else:
log.debug(f"Setting desired instances to {args.set}.")
body = {'desired_instances': int(args.set)}
res, http = api.autoscaler.update_autoscaler_count(
service=service, instance=args.instance, json_body=body,
).result()
log.debug(f"Res: {res} Http: {http}")
print(res["desired_instances"])
return 0
| 37.423077
| 105
| 0.715313
|
9287b6d92d8e3c2703b4ef67673dde4413e32cfe
| 17,050
|
py
|
Python
|
mysmb.py
|
0x24bin/MS17-010-Python
|
ff3abc167b862c120931369abacd7e89739b649c
|
[
"MIT"
] | 1
|
2019-11-09T13:16:12.000Z
|
2019-11-09T13:16:12.000Z
|
mysmb.py
|
killvxk/MS17-010_WORAWIT
|
c55361263171827081b91c66db0b1c9335193ec4
|
[
"MIT"
] | null | null | null |
mysmb.py
|
killvxk/MS17-010_WORAWIT
|
c55361263171827081b91c66db0b1c9335193ec4
|
[
"MIT"
] | null | null | null |
# impacket SMB extension for MS17-010 exploit.
# this file contains only valid SMB packet format operation.
from impacket import smb, smbconnection
from impacket.dcerpc.v5 import transport
from struct import pack
import os
import random
def getNTStatus(self):
return (self['ErrorCode'] << 16) | (self['_reserved'] << 8) | self['ErrorClass']
setattr(smb.NewSMBPacket, "getNTStatus", getNTStatus)
############# SMB_COM_TRANSACTION_SECONDARY (0x26)
class SMBTransactionSecondary_Parameters(smb.SMBCommand_Parameters):
structure = (
('TotalParameterCount','<H=0'),
('TotalDataCount','<H'),
('ParameterCount','<H=0'),
('ParameterOffset','<H=0'),
('ParameterDisplacement','<H=0'),
('DataCount','<H'),
('DataOffset','<H'),
('DataDisplacement','<H=0'),
)
# Note: impacket-0.9.15 struct has no ParameterDisplacement
############# SMB_COM_TRANSACTION2_SECONDARY (0x33)
class SMBTransaction2Secondary_Parameters(smb.SMBCommand_Parameters):
structure = (
('TotalParameterCount','<H=0'),
('TotalDataCount','<H'),
('ParameterCount','<H=0'),
('ParameterOffset','<H=0'),
('ParameterDisplacement','<H=0'),
('DataCount','<H'),
('DataOffset','<H'),
('DataDisplacement','<H=0'),
('FID','<H=0'),
)
############# SMB_COM_NT_TRANSACTION_SECONDARY (0xA1)
class SMBNTTransactionSecondary_Parameters(smb.SMBCommand_Parameters):
structure = (
('Reserved1','3s=""'),
('TotalParameterCount','<L'),
('TotalDataCount','<L'),
('ParameterCount','<L'),
('ParameterOffset','<L'),
('ParameterDisplacement','<L=0'),
('DataCount','<L'),
('DataOffset','<L'),
('DataDisplacement','<L=0'),
('Reserved2','<B=0'),
)
def _put_trans_data(transCmd, parameters, data, noPad=False):
# have to init offset before calling len()
transCmd['Parameters']['ParameterOffset'] = 0
transCmd['Parameters']['DataOffset'] = 0
# SMB header: 32 bytes
# WordCount: 1 bytes
# ByteCount: 2 bytes
# Note: Setup length is included when len(param) is called
offset = 32 + 1 + len(transCmd['Parameters']) + 2
transData = ''
if len(parameters):
padLen = 0 if noPad else (4 - offset % 4 ) % 4
transCmd['Parameters']['ParameterOffset'] = offset + padLen
transData = ('\x00' * padLen) + parameters
offset += padLen + len(parameters)
if len(data):
padLen = 0 if noPad else (4 - offset % 4 ) % 4
transCmd['Parameters']['DataOffset'] = offset + padLen
transData += ('\x00' * padLen) + data
transCmd['Data'] = transData
origin_NewSMBPacket_addCommand = getattr(smb.NewSMBPacket, "addCommand")
login_MaxBufferSize = 61440
def NewSMBPacket_addCommand_hook_login(self, command):
# restore NewSMBPacket.addCommand
setattr(smb.NewSMBPacket, "addCommand", origin_NewSMBPacket_addCommand)
if isinstance(command['Parameters'], smb.SMBSessionSetupAndX_Extended_Parameters):
command['Parameters']['MaxBufferSize'] = login_MaxBufferSize
elif isinstance(command['Parameters'], smb.SMBSessionSetupAndX_Parameters):
command['Parameters']['MaxBuffer'] = login_MaxBufferSize
# call original one
origin_NewSMBPacket_addCommand(self, command)
def _setup_login_packet_hook(maxBufferSize):
# setup hook for next NewSMBPacket.addCommand if maxBufferSize is not None
if maxBufferSize is not None:
global login_MaxBufferSize
login_MaxBufferSize = maxBufferSize
setattr(smb.NewSMBPacket, "addCommand", NewSMBPacket_addCommand_hook_login)
class MYSMB(smb.SMB):
def __init__(self, remote_host, use_ntlmv2=True, timeout=8):
self.__use_ntlmv2 = use_ntlmv2
self._default_tid = 0
self._pid = os.getpid() & 0xffff
self._last_mid = random.randint(1000, 20000)
if 0x4000 <= self._last_mid <= 0x4110:
self._last_mid += 0x120
self._pkt_flags2 = 0
self._last_tid = 0 # last tid from connect_tree()
self._last_fid = 0 # last fid from nt_create_andx()
self._smbConn = None
smb.SMB.__init__(self, remote_host, remote_host, timeout=timeout)
def set_pid(self, pid):
self._pid = pid
def get_pid(self):
return self._pid
def set_last_mid(self, mid):
self._last_mid = mid
def next_mid(self):
self._last_mid += random.randint(1, 20)
if 0x4000 <= self._last_mid <= 0x4110:
self._last_mid += 0x120
return self._last_mid
def get_smbconnection(self):
if self._smbConn is None:
self.smbConn = smbconnection.SMBConnection(self.get_remote_host(), self.get_remote_host(), existingConnection=self, manualNegotiate=True)
return self.smbConn
def get_dce_rpc(self, named_pipe):
smbConn = self.get_smbconnection()
rpctransport = transport.SMBTransport(self.get_remote_host(), self.get_remote_host(), filename='\\'+named_pipe, smb_connection=smbConn)
return rpctransport.get_dce_rpc()
# override SMB.neg_session() to allow forcing ntlm authentication
def neg_session(self, extended_security=True, negPacket=None):
smb.SMB.neg_session(self, extended_security=self.__use_ntlmv2, negPacket=negPacket)
# to use any login method, SMB must not be used from multiple thread
def login(self, user, password, domain='', lmhash='', nthash='', ntlm_fallback=True, maxBufferSize=None):
_setup_login_packet_hook(maxBufferSize)
smb.SMB.login(self, user, password, domain, lmhash, nthash, ntlm_fallback)
def login_standard(self, user, password, domain='', lmhash='', nthash='', maxBufferSize=None):
_setup_login_packet_hook(maxBufferSize)
smb.SMB.login_standard(self, user, password, domain, lmhash, nthash)
def login_extended(self, user, password, domain='', lmhash='', nthash='', use_ntlmv2=True, maxBufferSize=None):
_setup_login_packet_hook(maxBufferSize)
smb.SMB.login_extended(self, user, password, domain, lmhash, nthash, use_ntlmv2)
def connect_tree(self, path, password=None, service=smb.SERVICE_ANY, smb_packet=None):
self._last_tid = smb.SMB.tree_connect_andx(self, path, password, service, smb_packet)
return self._last_tid
def get_last_tid(self):
return self._last_tid
def nt_create_andx(self, tid, filename, smb_packet=None, cmd=None, shareAccessMode=smb.FILE_SHARE_READ|smb.FILE_SHARE_WRITE, disposition=smb.FILE_OPEN, accessMask=0x2019f):
self._last_fid = smb.SMB.nt_create_andx(self, tid, filename, smb_packet, cmd, shareAccessMode, disposition, accessMask)
return self._last_fid
def get_last_fid(self):
return self._last_fid
def set_default_tid(self, tid):
self._default_tid = tid
def set_pkt_flags2(self, flags):
self._pkt_flags2 = flags
def send_echo(self, data):
pkt = smb.NewSMBPacket()
pkt['Tid'] = self._default_tid
transCommand = smb.SMBCommand(smb.SMB.SMB_COM_ECHO)
transCommand['Parameters'] = smb.SMBEcho_Parameters()
transCommand['Data'] = smb.SMBEcho_Data()
transCommand['Parameters']['EchoCount'] = 1
transCommand['Data']['Data'] = data
pkt.addCommand(transCommand)
self.sendSMB(pkt)
return self.recvSMB()
def do_write_andx_raw_pipe(self, fid, data, mid=None, pid=None, tid=None):
writeAndX = smb.SMBCommand(smb.SMB.SMB_COM_WRITE_ANDX)
writeAndX['Parameters'] = smb.SMBWriteAndX_Parameters_Short()
writeAndX['Parameters']['Fid'] = fid
writeAndX['Parameters']['Offset'] = 0
writeAndX['Parameters']['WriteMode'] = 4 # SMB_WMODE_WRITE_RAW_NAMED_PIPE
writeAndX['Parameters']['Remaining'] = 12345 # can be any. raw named pipe does not use it
writeAndX['Parameters']['DataLength'] = len(data)
writeAndX['Parameters']['DataOffset'] = 32 + len(writeAndX['Parameters']) + 1 + 2 + 1 # WordCount(1), ByteCount(2), Padding(1)
writeAndX['Data'] = '\x00' + data # pad 1 byte
self.send_raw(self.create_smb_packet(writeAndX, mid, pid, tid))
return self.recvSMB()
def create_smb_packet(self, smbReq, mid=None, pid=None, tid=None):
if mid is None:
mid = self.next_mid()
pkt = smb.NewSMBPacket()
pkt.addCommand(smbReq)
pkt['Tid'] = self._default_tid if tid is None else tid
pkt['Uid'] = self._uid
pkt['Pid'] = self._pid if pid is None else pid
pkt['Mid'] = mid
flags1, flags2 = self.get_flags()
pkt['Flags1'] = flags1
pkt['Flags2'] = self._pkt_flags2 if self._pkt_flags2 != 0 else flags2
if self._SignatureEnabled:
pkt['Flags2'] |= smb.SMB.FLAGS2_SMB_SECURITY_SIGNATURE
self.signSMB(pkt, self._SigningSessionKey, self._SigningChallengeResponse)
req = str(pkt)
return '\x00'*2 + pack('>H', len(req)) + req # assume length is <65536
def send_raw(self, data):
self.get_socket().send(data)
def create_trans_packet(self, setup, param='', data='', mid=None, maxSetupCount=None, totalParameterCount=None, totalDataCount=None, maxParameterCount=None, maxDataCount=None, pid=None, tid=None, noPad=False):
if maxSetupCount is None:
maxSetupCount = len(setup)
if totalParameterCount is None:
totalParameterCount = len(param)
if totalDataCount is None:
totalDataCount = len(data)
if maxParameterCount is None:
maxParameterCount = totalParameterCount
if maxDataCount is None:
maxDataCount = totalDataCount
transCmd = smb.SMBCommand(smb.SMB.SMB_COM_TRANSACTION)
transCmd['Parameters'] = smb.SMBTransaction_Parameters()
transCmd['Parameters']['TotalParameterCount'] = totalParameterCount
transCmd['Parameters']['TotalDataCount'] = totalDataCount
transCmd['Parameters']['MaxParameterCount'] = maxParameterCount
transCmd['Parameters']['MaxDataCount'] = maxDataCount
transCmd['Parameters']['MaxSetupCount'] = maxSetupCount
transCmd['Parameters']['Flags'] = 0
transCmd['Parameters']['Timeout'] = 0xffffffff
transCmd['Parameters']['ParameterCount'] = len(param)
transCmd['Parameters']['DataCount'] = len(data)
transCmd['Parameters']['Setup'] = setup
_put_trans_data(transCmd, param, data, noPad)
return self.create_smb_packet(transCmd, mid, pid, tid)
def send_trans(self, setup, param='', data='', mid=None, maxSetupCount=None, totalParameterCount=None, totalDataCount=None, maxParameterCount=None, maxDataCount=None, pid=None, tid=None, noPad=False):
self.send_raw(self.create_trans_packet(setup, param, data, mid, maxSetupCount, totalParameterCount, totalDataCount, maxParameterCount, maxDataCount, pid, tid, noPad))
return self.recvSMB()
def create_trans_secondary_packet(self, mid, param='', paramDisplacement=0, data='', dataDisplacement=0, pid=None, tid=None, noPad=False):
transCmd = smb.SMBCommand(smb.SMB.SMB_COM_TRANSACTION_SECONDARY)
transCmd['Parameters'] = SMBTransactionSecondary_Parameters()
transCmd['Parameters']['TotalParameterCount'] = len(param)
transCmd['Parameters']['TotalDataCount'] = len(data)
transCmd['Parameters']['ParameterCount'] = len(param)
transCmd['Parameters']['ParameterDisplacement'] = paramDisplacement
transCmd['Parameters']['DataCount'] = len(data)
transCmd['Parameters']['DataDisplacement'] = dataDisplacement
_put_trans_data(transCmd, param, data, noPad)
return self.create_smb_packet(transCmd, mid, pid, tid)
def send_trans_secondary(self, mid, param='', paramDisplacement=0, data='', dataDisplacement=0, pid=None, tid=None, noPad=False):
self.send_raw(self.create_trans_secondary_packet(mid, param, paramDisplacement, data, dataDisplacement, pid, tid, noPad))
def create_trans2_packet(self, setup, param='', data='', mid=None, maxSetupCount=None, totalParameterCount=None, totalDataCount=None, maxParameterCount=None, maxDataCount=None, pid=None, tid=None, noPad=False):
if maxSetupCount is None:
maxSetupCount = len(setup)
if totalParameterCount is None:
totalParameterCount = len(param)
if totalDataCount is None:
totalDataCount = len(data)
if maxParameterCount is None:
maxParameterCount = totalParameterCount
if maxDataCount is None:
maxDataCount = totalDataCount
transCmd = smb.SMBCommand(smb.SMB.SMB_COM_TRANSACTION2)
transCmd['Parameters'] = smb.SMBTransaction2_Parameters()
transCmd['Parameters']['TotalParameterCount'] = totalParameterCount
transCmd['Parameters']['TotalDataCount'] = totalDataCount
transCmd['Parameters']['MaxParameterCount'] = maxParameterCount
transCmd['Parameters']['MaxDataCount'] = maxDataCount
transCmd['Parameters']['MaxSetupCount'] = len(setup)
transCmd['Parameters']['Flags'] = 0
transCmd['Parameters']['Timeout'] = 0xffffffff
transCmd['Parameters']['ParameterCount'] = len(param)
transCmd['Parameters']['DataCount'] = len(data)
transCmd['Parameters']['Setup'] = setup
_put_trans_data(transCmd, param, data, noPad)
return self.create_smb_packet(transCmd, mid, pid, tid)
def send_trans2(self, setup, param='', data='', mid=None, maxSetupCount=None, totalParameterCount=None, totalDataCount=None, maxParameterCount=None, maxDataCount=None, pid=None, tid=None, noPad=False):
self.send_raw(self.create_trans2_packet(setup, param, data, mid, maxSetupCount, totalParameterCount, totalDataCount, maxParameterCount, maxDataCount, pid, tid, noPad))
return self.recvSMB()
def create_trans2_secondary_packet(self, mid, param='', paramDisplacement=0, data='', dataDisplacement=0, pid=None, tid=None, noPad=False):
transCmd = smb.SMBCommand(smb.SMB.SMB_COM_TRANSACTION2_SECONDARY)
transCmd['Parameters'] = SMBTransaction2Secondary_Parameters()
transCmd['Parameters']['TotalParameterCount'] = len(param)
transCmd['Parameters']['TotalDataCount'] = len(data)
transCmd['Parameters']['ParameterCount'] = len(param)
transCmd['Parameters']['ParameterDisplacement'] = paramDisplacement
transCmd['Parameters']['DataCount'] = len(data)
transCmd['Parameters']['DataDisplacement'] = dataDisplacement
_put_trans_data(transCmd, param, data, noPad)
return self.create_smb_packet(transCmd, mid, pid, tid)
def send_trans2_secondary(self, mid, param='', paramDisplacement=0, data='', dataDisplacement=0, pid=None, tid=None, noPad=False):
self.send_raw(self.create_trans2_secondary_packet(mid, param, paramDisplacement, data, dataDisplacement, pid, tid, noPad))
def create_nt_trans_packet(self, function, setup='', param='', data='', mid=None, maxSetupCount=None, totalParameterCount=None, totalDataCount=None, maxParameterCount=None, maxDataCount=None, pid=None, tid=None, noPad=False):
if maxSetupCount is None:
maxSetupCount = len(setup)
if totalParameterCount is None:
totalParameterCount = len(param)
if totalDataCount is None:
totalDataCount = len(data)
if maxParameterCount is None:
maxParameterCount = totalParameterCount
if maxDataCount is None:
maxDataCount = totalDataCount
transCmd = smb.SMBCommand(smb.SMB.SMB_COM_NT_TRANSACT)
transCmd['Parameters'] = smb.SMBNTTransaction_Parameters()
transCmd['Parameters']['MaxSetupCount'] = maxSetupCount
transCmd['Parameters']['TotalParameterCount'] = totalParameterCount
transCmd['Parameters']['TotalDataCount'] = totalDataCount
transCmd['Parameters']['MaxParameterCount'] = maxParameterCount
transCmd['Parameters']['MaxDataCount'] = maxDataCount
transCmd['Parameters']['ParameterCount'] = len(param)
transCmd['Parameters']['DataCount'] = len(data)
transCmd['Parameters']['Function'] = function
transCmd['Parameters']['Setup'] = setup
_put_trans_data(transCmd, param, data, noPad)
return self.create_smb_packet(transCmd, mid, pid, tid)
def send_nt_trans(self, function, setup='', param='', data='', mid=None, maxSetupCount=None, totalParameterCount=None, totalDataCount=None, maxParameterCount=None, maxDataCount=None, pid=None, tid=None, noPad=False):
self.send_raw(self.create_nt_trans_packet(function, setup, param, data, mid, maxSetupCount, totalParameterCount, totalDataCount, maxParameterCount, maxDataCount, pid, tid, noPad))
return self.recvSMB()
def create_nt_trans_secondary_packet(self, mid, param='', paramDisplacement=0, data='', dataDisplacement=0, pid=None, tid=None, noPad=False):
transCmd = smb.SMBCommand(smb.SMB.SMB_COM_NT_TRANSACT_SECONDARY)
transCmd['Parameters'] = SMBNTTransactionSecondary_Parameters()
transCmd['Parameters']['TotalParameterCount'] = len(param)
transCmd['Parameters']['TotalDataCount'] = len(data)
transCmd['Parameters']['ParameterCount'] = len(param)
transCmd['Parameters']['ParameterDisplacement'] = paramDisplacement
transCmd['Parameters']['DataCount'] = len(data)
transCmd['Parameters']['DataDisplacement'] = dataDisplacement
_put_trans_data(transCmd, param, data, noPad)
return self.create_smb_packet(transCmd, mid, pid, tid)
def send_nt_trans_secondary(self, mid, param='', paramDisplacement=0, data='', dataDisplacement=0, pid=None, tid=None, noPad=False):
self.send_raw(self.create_nt_trans_secondary_packet(mid, param, paramDisplacement, data, dataDisplacement, pid, tid, noPad))
def recv_transaction_data(self, mid, minLen):
data = ''
while len(data) < minLen:
recvPkt = self.recvSMB()
if recvPkt['Mid'] != mid:
continue
resp = smb.SMBCommand(recvPkt['Data'][0])
data += resp['Data'][1:] # skip padding
#print(len(data))
return data
| 44.633508
| 227
| 0.726862
|
6939f9f0103110d15d45cea1dfce4153a3eef1b1
| 1,774
|
py
|
Python
|
fibonacci_generators.py
|
johnnydevriese/python_fun
|
01fc5fcc82c7c27e25eabff85a2e88f3554129fe
|
[
"MIT"
] | null | null | null |
fibonacci_generators.py
|
johnnydevriese/python_fun
|
01fc5fcc82c7c27e25eabff85a2e88f3554129fe
|
[
"MIT"
] | null | null | null |
fibonacci_generators.py
|
johnnydevriese/python_fun
|
01fc5fcc82c7c27e25eabff85a2e88f3554129fe
|
[
"MIT"
] | null | null | null |
# See: http://stackoverflow.com/questions/102535/what-can-you-use-python-generator-functions-for
# Generators give you lazy evaluation.
# You use them by iterating over them, either explicitly with 'for' or implicitly by
# passing it to any function or construct that iterates. You can think of generators as
# returning multiple items, as if they return a list,
# but instead of returning them all at once they return them one-by-one, and
# the generator function is paused until the next item is requested.
# Generators are good for calculating large sets of results
# (in particular calculations involving loops themselves) where you
# don't know if you are going to need all results, or where you don't want to allocate the memory
# for all results at the same time. Or for situations where the generator uses another generator,
# or consumes some other resource, and it's more convenient if that happened as late as possible.
# function version
def fibon(n):
a = b = 1
result = []
for i in range(n):
result.append(a)
a, b = b, a + b
return result
# generator version
def fibon_generator(n):
a = b = 1
for i in range(n):
yield a
a, b = b, a + b
# a generator that yields items instead of returning a list
def firstn(n):
num = 0
while num < n:
yield num
num += 1
print(fibon(10))
# not sure if there would be a better way to calculate the final result.
for i in fibon_generator(10):
result_fib = i
# noinspection PyUnboundLocalVariable
print(result_fib)
# but if for some reason you wanted a list you could do:
print(list(fibon_generator(10)))
sum_of_first_n = sum(firstn(1000000))
# hey we could also do something like this!
dingetje = list(2 * n for n in range(50))
| 28.15873
| 97
| 0.711387
|
ab599dd89f2031e1e26c93734dec1cd484ad511a
| 664
|
py
|
Python
|
simuvex/procedures/libc___so___6/socket_.py
|
praetorian-inc/simuvex
|
7984bc4432a1c2126e6f2eb963c935e9f6a98da5
|
[
"BSD-2-Clause"
] | 8
|
2016-01-19T03:13:32.000Z
|
2020-11-03T09:30:05.000Z
|
simuvex/procedures/libc___so___6/socket_.py
|
praetorian-inc/simuvex
|
7984bc4432a1c2126e6f2eb963c935e9f6a98da5
|
[
"BSD-2-Clause"
] | null | null | null |
simuvex/procedures/libc___so___6/socket_.py
|
praetorian-inc/simuvex
|
7984bc4432a1c2126e6f2eb963c935e9f6a98da5
|
[
"BSD-2-Clause"
] | 3
|
2017-04-24T00:22:30.000Z
|
2020-11-03T09:30:06.000Z
|
import simuvex
######################################
# socket
######################################
class socket(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, sim_sock_type):
# TODO: Handling parameters
sock_type = self.state.se.any_int(sim_sock_type)
# TODO handle errors and symbolic path
fd = self.state.posix.open("socket_socket", "rw")
#if type is 0, it's UDP so create a socket for it, if not then it's 1 and we create a socket later in accept()
if sock_type is 0:
self.state.posix.back_with_pcap(fd)
self.state.posix.add_socket(fd)
return fd
| 30.181818
| 118
| 0.579819
|
5fce1942cf5f3d2b6d047a203cfdbc4b2d775d99
| 48,960
|
py
|
Python
|
pyscf/dft/libxc.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | 1
|
2018-05-02T19:55:30.000Z
|
2018-05-02T19:55:30.000Z
|
pyscf/dft/libxc.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | null | null | null |
pyscf/dft/libxc.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Authors: Qiming Sun <osirpt.sun@gmail.com>
# Susi Lehtola <susi.lehtola@gmail.com>
'''
XC functional, the interface to libxc
(http://www.tddft.org/programs/octopus/wiki/index.php/Libxc)
'''
import sys
import copy
import ctypes
import math
import numpy
from pyscf import lib
_itrf = lib.load_library('libxc_itrf')
_itrf.LIBXC_is_lda.restype = ctypes.c_int
_itrf.LIBXC_is_gga.restype = ctypes.c_int
_itrf.LIBXC_is_meta_gga.restype = ctypes.c_int
_itrf.LIBXC_is_hybrid.restype = ctypes.c_int
_itrf.LIBXC_max_deriv_order.restype = ctypes.c_int
_itrf.LIBXC_hybrid_coeff.restype = ctypes.c_double
# xc_code from libxc
#cat lib/deps/include/xc_funcs.h | awk '{printf("'\''%s'\'' %3i",$2,$3); for(i=4;i<NF;i++) {printf(" %s",$i)}; printf("\n")}' | sed "s|/\*|# |g" | awk '{printf("%-30s : %4i\,",$1,$2); for(i=4;i<NF;i++) {printf(" %s",$i)}; printf("\n")}'
XC = XC_CODES = {
'XC_LDA_X' : 1, # Exchange
'XC_LDA_C_WIGNER' : 2, # Wigner parametrization
'XC_LDA_C_RPA' : 3, # Random Phase Approximation
'XC_LDA_C_HL' : 4, # Hedin & Lundqvist
'XC_LDA_C_GL' : 5, # Gunnarson & Lundqvist
'XC_LDA_C_XALPHA' : 6, # Slater Xalpha
'XC_LDA_C_VWN' : 7, # Vosko, Wilk, & Nusair (5)
'XC_LDA_C_VWN_RPA' : 8, # Vosko, Wilk, & Nusair (RPA)
'XC_LDA_C_PZ' : 9, # Perdew & Zunger
'XC_LDA_C_PZ_MOD' : 10, # Perdew & Zunger (Modified)
'XC_LDA_C_OB_PZ' : 11, # Ortiz & Ballone (PZ)
'XC_LDA_C_PW' : 12, # Perdew & Wang
'XC_LDA_C_PW_MOD' : 13, # Perdew & Wang (Modified)
'XC_LDA_C_OB_PW' : 14, # Ortiz & Ballone (PW)
'XC_LDA_C_2D_AMGB' : 15, # Attaccalite et al
'XC_LDA_C_2D_PRM' : 16, # Pittalis, Rasanen & Marques correlation in 2D
'XC_LDA_C_VBH' : 17, # von Barth & Hedin
'XC_LDA_C_1D_CSC' : 18, # Casula, Sorella, and Senatore 1D correlation
'XC_LDA_X_2D' : 19, # Exchange in 2D
'XC_LDA_XC_TETER93' : 20, # Teter 93 parametrization
'XC_LDA_X_1D' : 21, # Exchange in 1D
'XC_LDA_C_ML1' : 22, # Modified LSD (version 1) of Proynov and Salahub
'XC_LDA_C_ML2' : 23, # Modified LSD (version 2) of Proynov and Salahub
'XC_LDA_C_GOMBAS' : 24, # Gombas parametrization
'XC_LDA_C_PW_RPA' : 25, # Perdew & Wang fit of the RPA
'XC_LDA_C_1D_LOOS' : 26, # P-F Loos correlation LDA
'XC_LDA_C_RC04' : 27, # Ragot-Cortona
'XC_LDA_C_VWN_1' : 28, # Vosko, Wilk, & Nusair (1)
'XC_LDA_C_VWN_2' : 29, # Vosko, Wilk, & Nusair (2)
'XC_LDA_C_VWN_3' : 30, # Vosko, Wilk, & Nusair (3)
'XC_LDA_C_VWN_4' : 31, # Vosko, Wilk, & Nusair (4)
'XC_LDA_XC_ZLP' : 43, # Zhao, Levy & Parr, Eq. (20)
'XC_LDA_K_TF' : 50, # Thomas-Fermi kinetic energy functional
'XC_LDA_K_LP' : 51, # Lee and Parr Gaussian ansatz
'XC_LDA_XC_KSDT' : 259, # Karasiev et al. parametrization
'XC_GGA_X_GAM' : 32, # GAM functional from Minnesota
'XC_GGA_C_GAM' : 33, # GAM functional from Minnesota
'XC_GGA_X_HCTH_A' : 34, # HCTH-A
'XC_GGA_X_EV93' : 35, # Engel and Vosko
'XC_GGA_X_BGCP' : 38, # Burke, Cancio, Gould, and Pittalis
'XC_GGA_C_BGCP' : 39, # Burke, Cancio, Gould, and Pittalis
'XC_GGA_X_LAMBDA_OC2_N' : 40, # lambda_OC2(N) version of PBE
'XC_GGA_X_B86_R' : 41, # Revised Becke 86 Xalpha,beta,gamma (with mod. grad. correction)
'XC_GGA_X_LAMBDA_CH_N' : 44, # lambda_CH(N) version of PBE
'XC_GGA_X_LAMBDA_LO_N' : 45, # lambda_LO(N) version of PBE
'XC_GGA_X_HJS_B88_V2' : 46, # HJS screened exchange corrected B88 version
'XC_GGA_C_Q2D' : 47, # Chiodo et al
'XC_GGA_X_Q2D' : 48, # Chiodo et al
'XC_GGA_X_PBE_MOL' : 49, # Del Campo, Gazquez, Trickey and Vela (PBE-like)
'XC_GGA_K_TFVW' : 52, # Thomas-Fermi plus von Weiszaecker correction
'XC_GGA_K_REVAPBEINT' : 53, # interpolated version of REVAPBE
'XC_GGA_K_APBEINT' : 54, # interpolated version of APBE
'XC_GGA_K_REVAPBE' : 55, # revised APBE
'XC_GGA_X_AK13' : 56, # Armiento & Kuemmel 2013
'XC_GGA_K_MEYER' : 57, # Meyer, Wang, and Young
'XC_GGA_X_LV_RPW86' : 58, # Berland and Hyldgaard
'XC_GGA_X_PBE_TCA' : 59, # PBE revised by Tognetti et al
'XC_GGA_X_PBEINT' : 60, # PBE for hybrid interfaces
'XC_GGA_C_ZPBEINT' : 61, # spin-dependent gradient correction to PBEint
'XC_GGA_C_PBEINT' : 62, # PBE for hybrid interfaces
'XC_GGA_C_ZPBESOL' : 63, # spin-dependent gradient correction to PBEsol
'XC_GGA_XC_OPBE_D' : 65, # oPBE_D functional of Goerigk and Grimme
'XC_GGA_XC_OPWLYP_D' : 66, # oPWLYP-D functional of Goerigk and Grimme
'XC_GGA_XC_OBLYP_D' : 67, # oBLYP-D functional of Goerigk and Grimme
'XC_GGA_X_VMT84_GE' : 68, # VMT{8,4} with constraint satisfaction with mu = mu_GE
'XC_GGA_X_VMT84_PBE' : 69, # VMT{8,4} with constraint satisfaction with mu = mu_PBE
'XC_GGA_X_VMT_GE' : 70, # Vela, Medel, and Trickey with mu = mu_GE
'XC_GGA_X_VMT_PBE' : 71, # Vela, Medel, and Trickey with mu = mu_PBE
'XC_GGA_C_N12_SX' : 79, # N12-SX functional from Minnesota
'XC_GGA_C_N12' : 80, # N12 functional from Minnesota
'XC_GGA_X_N12' : 82, # N12 functional from Minnesota
'XC_GGA_C_REGTPSS' : 83, # Regularized TPSS correlation (ex-VPBE)
'XC_GGA_C_OP_XALPHA' : 84, # one-parameter progressive functional (XALPHA version)
'XC_GGA_C_OP_G96' : 85, # one-parameter progressive functional (G96 version)
'XC_GGA_C_OP_PBE' : 86, # one-parameter progressive functional (PBE version)
'XC_GGA_C_OP_B88' : 87, # one-parameter progressive functional (B88 version)
'XC_GGA_C_FT97' : 88, # Filatov & Thiel correlation
'XC_GGA_C_SPBE' : 89, # PBE correlation to be used with the SSB exchange
'XC_GGA_X_SSB_SW' : 90, # Swarta, Sola and Bickelhaupt correction to PBE
'XC_GGA_X_SSB' : 91, # Swarta, Sola and Bickelhaupt
'XC_GGA_X_SSB_D' : 92, # Swarta, Sola and Bickelhaupt dispersion
'XC_GGA_XC_HCTH_407P' : 93, # HCTH/407+
'XC_GGA_XC_HCTH_P76' : 94, # HCTH p=7/6
'XC_GGA_XC_HCTH_P14' : 95, # HCTH p=1/4
'XC_GGA_XC_B97_GGA1' : 96, # Becke 97 GGA-1
'XC_GGA_C_HCTH_A' : 97, # HCTH-A
'XC_GGA_X_BPCCAC' : 98, # BPCCAC (GRAC for the energy)
'XC_GGA_C_REVTCA' : 99, # Tognetti, Cortona, Adamo (revised)
'XC_GGA_C_TCA' : 100, # Tognetti, Cortona, Adamo
'XC_GGA_X_PBE' : 101, # Perdew, Burke & Ernzerhof exchange
'XC_GGA_X_PBE_R' : 102, # Perdew, Burke & Ernzerhof exchange (revised)
'XC_GGA_X_B86' : 103, # Becke 86 Xalpha,beta,gamma
'XC_GGA_X_HERMAN' : 104, # Herman et al original GGA
'XC_GGA_X_B86_MGC' : 105, # Becke 86 Xalpha,beta,gamma (with mod. grad. correction)
'XC_GGA_X_B88' : 106, # Becke 88
'XC_GGA_X_G96' : 107, # Gill 96
'XC_GGA_X_PW86' : 108, # Perdew & Wang 86
'XC_GGA_X_PW91' : 109, # Perdew & Wang 91
'XC_GGA_X_OPTX' : 110, # Handy & Cohen OPTX 01
'XC_GGA_X_DK87_R1' : 111, # dePristo & Kress 87 (version R1)
'XC_GGA_X_DK87_R2' : 112, # dePristo & Kress 87 (version R2)
'XC_GGA_X_LG93' : 113, # Lacks & Gordon 93
'XC_GGA_X_FT97_A' : 114, # Filatov & Thiel 97 (version A)
'XC_GGA_X_FT97_B' : 115, # Filatov & Thiel 97 (version B)
'XC_GGA_X_PBE_SOL' : 116, # Perdew, Burke & Ernzerhof exchange (solids)
'XC_GGA_X_RPBE' : 117, # Hammer, Hansen & Norskov (PBE-like)
'XC_GGA_X_WC' : 118, # Wu & Cohen
'XC_GGA_X_MPW91' : 119, # Modified form of PW91 by Adamo & Barone
'XC_GGA_X_AM05' : 120, # Armiento & Mattsson 05 exchange
'XC_GGA_X_PBEA' : 121, # Madsen (PBE-like)
'XC_GGA_X_MPBE' : 122, # Adamo & Barone modification to PBE
'XC_GGA_X_XPBE' : 123, # xPBE reparametrization by Xu & Goddard
'XC_GGA_X_2D_B86_MGC' : 124, # Becke 86 MGC for 2D systems
'XC_GGA_X_BAYESIAN' : 125, # Bayesian best fit for the enhancement factor
'XC_GGA_X_PBE_JSJR' : 126, # JSJR reparametrization by Pedroza, Silva & Capelle
'XC_GGA_X_2D_B88' : 127, # Becke 88 in 2D
'XC_GGA_X_2D_B86' : 128, # Becke 86 Xalpha,beta,gamma
'XC_GGA_X_2D_PBE' : 129, # Perdew, Burke & Ernzerhof exchange in 2D
'XC_GGA_C_PBE' : 130, # Perdew, Burke & Ernzerhof correlation
'XC_GGA_C_LYP' : 131, # Lee, Yang & Parr
'XC_GGA_C_P86' : 132, # Perdew 86
'XC_GGA_C_PBE_SOL' : 133, # Perdew, Burke & Ernzerhof correlation SOL
'XC_GGA_C_PW91' : 134, # Perdew & Wang 91
'XC_GGA_C_AM05' : 135, # Armiento & Mattsson 05 correlation
'XC_GGA_C_XPBE' : 136, # xPBE reparametrization by Xu & Goddard
'XC_GGA_C_LM' : 137, # Langreth and Mehl correlation
'XC_GGA_C_PBE_JRGX' : 138, # JRGX reparametrization by Pedroza, Silva & Capelle
'XC_GGA_X_OPTB88_VDW' : 139, # Becke 88 reoptimized to be used with vdW functional of Dion et al
'XC_GGA_X_PBEK1_VDW' : 140, # PBE reparametrization for vdW
'XC_GGA_X_OPTPBE_VDW' : 141, # PBE reparametrization for vdW
'XC_GGA_X_RGE2' : 142, # Regularized PBE
'XC_GGA_C_RGE2' : 143, # Regularized PBE
'XC_GGA_X_RPW86' : 144, # refitted Perdew & Wang 86
'XC_GGA_X_KT1' : 145, # Keal and Tozer version 1
'XC_GGA_XC_KT2' : 146, # Keal and Tozer version 2
'XC_GGA_C_WL' : 147, # Wilson & Levy
'XC_GGA_C_WI' : 148, # Wilson & Ivanov
'XC_GGA_X_MB88' : 149, # Modified Becke 88 for proton transfer
'XC_GGA_X_SOGGA' : 150, # Second-order generalized gradient approximation
'XC_GGA_X_SOGGA11' : 151, # Second-order generalized gradient approximation 2011
'XC_GGA_C_SOGGA11' : 152, # Second-order generalized gradient approximation 2011
'XC_GGA_C_WI0' : 153, # Wilson & Ivanov initial version
'XC_GGA_XC_TH1' : 154, # Tozer and Handy v. 1
'XC_GGA_XC_TH2' : 155, # Tozer and Handy v. 2
'XC_GGA_XC_TH3' : 156, # Tozer and Handy v. 3
'XC_GGA_XC_TH4' : 157, # Tozer and Handy v. 4
'XC_GGA_X_C09X' : 158, # C09x to be used with the VdW of Rutgers-Chalmers
'XC_GGA_C_SOGGA11_X' : 159, # To be used with HYB_GGA_X_SOGGA11_X
'XC_GGA_X_LB' : 160, # van Leeuwen & Baerends
'XC_GGA_XC_HCTH_93' : 161, # HCTH functional fitted to 93 molecules
'XC_GGA_XC_HCTH_120' : 162, # HCTH functional fitted to 120 molecules
'XC_GGA_XC_HCTH_147' : 163, # HCTH functional fitted to 147 molecules
'XC_GGA_XC_HCTH_407' : 164, # HCTH functional fitted to 407 molecules
'XC_GGA_XC_EDF1' : 165, # Empirical functionals from Adamson, Gill, and Pople
'XC_GGA_XC_XLYP' : 166, # XLYP functional
'XC_GGA_XC_B97_D' : 170, # Grimme functional to be used with C6 vdW term
'XC_GGA_XC_PBE1W' : 173, # Functionals fitted for water
'XC_GGA_XC_MPWLYP1W' : 174, # Functionals fitted for water
'XC_GGA_XC_PBELYP1W' : 175, # Functionals fitted for water
'XC_GGA_X_LBM' : 182, # van Leeuwen & Baerends modified
'XC_GGA_X_OL2' : 183, # Exchange form based on Ou-Yang and Levy v.2
'XC_GGA_X_APBE' : 184, # mu fixed from the semiclassical neutral atom
'XC_GGA_K_APBE' : 185, # mu fixed from the semiclassical neutral atom
'XC_GGA_C_APBE' : 186, # mu fixed from the semiclassical neutral atom
'XC_GGA_K_TW1' : 187, # Tran and Wesolowski set 1 (Table II)
'XC_GGA_K_TW2' : 188, # Tran and Wesolowski set 2 (Table II)
'XC_GGA_K_TW3' : 189, # Tran and Wesolowski set 3 (Table II)
'XC_GGA_K_TW4' : 190, # Tran and Wesolowski set 4 (Table II)
'XC_GGA_X_HTBS' : 191, # Haas, Tran, Blaha, and Schwarz
'XC_GGA_X_AIRY' : 192, # Constantin et al based on the Airy gas
'XC_GGA_X_LAG' : 193, # Local Airy Gas
'XC_GGA_XC_MOHLYP' : 194, # Functional for organometallic chemistry
'XC_GGA_XC_MOHLYP2' : 195, # Functional for barrier heights
'XC_GGA_XC_TH_FL' : 196, # Tozer and Handy v. FL
'XC_GGA_XC_TH_FC' : 197, # Tozer and Handy v. FC
'XC_GGA_XC_TH_FCFO' : 198, # Tozer and Handy v. FCFO
'XC_GGA_XC_TH_FCO' : 199, # Tozer and Handy v. FCO
'XC_GGA_C_OPTC' : 200, # Optimized correlation functional of Cohen and Handy
'XC_GGA_C_PBELOC' : 246, # Semilocal dynamical correlation
'XC_GGA_XC_VV10' : 255, # Vydrov and Van Voorhis
'XC_GGA_C_PBEFE' : 258, # PBE for formation energies
'XC_GGA_C_OP_PW91' : 262, # one-parameter progressive functional (PW91 version)
'XC_GGA_X_PBEFE' : 265, # PBE for formation energies
'XC_GGA_X_CAP' : 270, # Correct Asymptotic Potential
'XC_GGA_K_VW' : 500, # von Weiszaecker functional
'XC_GGA_K_GE2' : 501, # Second-order gradient expansion (l = 1/9)
'XC_GGA_K_GOLDEN' : 502, # TF-lambda-vW form by Golden (l = 13/45)
'XC_GGA_K_YT65' : 503, # TF-lambda-vW form by Yonei and Tomishima (l = 1/5)
'XC_GGA_K_BALTIN' : 504, # TF-lambda-vW form by Baltin (l = 5/9)
'XC_GGA_K_LIEB' : 505, # TF-lambda-vW form by Lieb (l = 0.185909191)
'XC_GGA_K_ABSP1' : 506, # gamma-TFvW form by Acharya et al [g = 1 - 1.412/N^(1/3)]
'XC_GGA_K_ABSP2' : 507, # gamma-TFvW form by Acharya et al [g = 1 - 1.332/N^(1/3)]
'XC_GGA_K_GR' : 508, # gamma-TFvW form by Gazquez and Robles
'XC_GGA_K_LUDENA' : 509, # gamma-TFvW form by Ludena
'XC_GGA_K_GP85' : 510, # gamma-TFvW form by Ghosh and Parr
'XC_GGA_K_PEARSON' : 511, # Pearson
'XC_GGA_K_OL1' : 512, # Ou-Yang and Levy v.1
'XC_GGA_K_OL2' : 513, # Ou-Yang and Levy v.2
'XC_GGA_K_FR_B88' : 514, # Fuentealba & Reyes (B88 version)
'XC_GGA_K_FR_PW86' : 515, # Fuentealba & Reyes (PW86 version)
'XC_GGA_K_DK' : 516, # DePristo and Kress
'XC_GGA_K_PERDEW' : 517, # Perdew
'XC_GGA_K_VSK' : 518, # Vitos, Skriver, and Kollar
'XC_GGA_K_VJKS' : 519, # Vitos, Johansson, Kollar, and Skriver
'XC_GGA_K_ERNZERHOF' : 520, # Ernzerhof
'XC_GGA_K_LC94' : 521, # Lembarki & Chermette
'XC_GGA_K_LLP' : 522, # Lee, Lee & Parr
'XC_GGA_K_THAKKAR' : 523, # Thakkar 1992
'XC_GGA_X_WPBEH' : 524, # short-range version of the PBE
'XC_GGA_X_HJS_PBE' : 525, # HJS screened exchange PBE version
'XC_GGA_X_HJS_PBE_SOL' : 526, # HJS screened exchange PBE_SOL version
'XC_GGA_X_HJS_B88' : 527, # HJS screened exchange B88 version
'XC_GGA_X_HJS_B97X' : 528, # HJS screened exchange B97x version
'XC_GGA_X_ITYH' : 529, # short-range recipe for exchange GGA functionals
'XC_GGA_X_SFAT' : 530, # short-range recipe for exchange GGA functionals
'XC_HYB_GGA_X_N12_SX' : 81, # N12-SX functional from Minnesota
'XC_HYB_GGA_XC_B97_1P' : 266, # version of B97 by Cohen and Handy
'XC_HYB_GGA_XC_B3PW91' : 401, # The original (ACM) hybrid of Becke
'XC_HYB_GGA_XC_B3LYP' : 402, # The (in)famous B3LYP
'XC_HYB_GGA_XC_B3P86' : 403, # Perdew 86 hybrid similar to B3PW91
'XC_HYB_GGA_XC_O3LYP' : 404, # hybrid using the optx functional
'XC_HYB_GGA_XC_MPW1K' : 405, # mixture of mPW91 and PW91 optimized for kinetics
'XC_HYB_GGA_XC_PBEH' : 406, # aka PBE0 or PBE1PBE
'XC_HYB_GGA_XC_B97' : 407, # Becke 97
'XC_HYB_GGA_XC_B97_1' : 408, # Becke 97-1
'XC_HYB_GGA_XC_B97_2' : 410, # Becke 97-2
'XC_HYB_GGA_XC_X3LYP' : 411, # hybrid by Xu and Goddard
'XC_HYB_GGA_XC_B1WC' : 412, # Becke 1-parameter mixture of WC and PBE
'XC_HYB_GGA_XC_B97_K' : 413, # Boese-Martin for Kinetics
'XC_HYB_GGA_XC_B97_3' : 414, # Becke 97-3
'XC_HYB_GGA_XC_MPW3PW' : 415, # mixture with the mPW functional
'XC_HYB_GGA_XC_B1LYP' : 416, # Becke 1-parameter mixture of B88 and LYP
'XC_HYB_GGA_XC_B1PW91' : 417, # Becke 1-parameter mixture of B88 and PW91
'XC_HYB_GGA_XC_MPW1PW' : 418, # Becke 1-parameter mixture of mPW91 and PW91
'XC_HYB_GGA_XC_MPW3LYP' : 419, # mixture of mPW and LYP
'XC_HYB_GGA_XC_SB98_1A' : 420, # Schmider-Becke 98 parameterization 1a
'XC_HYB_GGA_XC_SB98_1B' : 421, # Schmider-Becke 98 parameterization 1b
'XC_HYB_GGA_XC_SB98_1C' : 422, # Schmider-Becke 98 parameterization 1c
'XC_HYB_GGA_XC_SB98_2A' : 423, # Schmider-Becke 98 parameterization 2a
'XC_HYB_GGA_XC_SB98_2B' : 424, # Schmider-Becke 98 parameterization 2b
'XC_HYB_GGA_XC_SB98_2C' : 425, # Schmider-Becke 98 parameterization 2c
'XC_HYB_GGA_X_SOGGA11_X' : 426, # Hybrid based on SOGGA11 form
'XC_HYB_GGA_XC_HSE03' : 427, # the 2003 version of the screened hybrid HSE
'XC_HYB_GGA_XC_HSE06' : 428, # the 2006 version of the screened hybrid HSE
'XC_HYB_GGA_XC_HJS_PBE' : 429, # HJS hybrid screened exchange PBE version
'XC_HYB_GGA_XC_HJS_PBE_SOL' : 430, # HJS hybrid screened exchange PBE_SOL version
'XC_HYB_GGA_XC_HJS_B88' : 431, # HJS hybrid screened exchange B88 version
'XC_HYB_GGA_XC_HJS_B97X' : 432, # HJS hybrid screened exchange B97x version
'XC_HYB_GGA_XC_CAM_B3LYP' : 433, # CAM version of B3LYP
'XC_HYB_GGA_XC_TUNED_CAM_B3LYP' : 434, # CAM version of B3LYP tuned for excitations
'XC_HYB_GGA_XC_BHANDH' : 435, # Becke half-and-half
'XC_HYB_GGA_XC_BHANDHLYP' : 436, # Becke half-and-half with B88 exchange
'XC_HYB_GGA_XC_MB3LYP_RC04' : 437, # B3LYP with RC04 LDA
'XC_HYB_GGA_XC_MPWLYP1M' : 453, # MPW with 1 par. for metals/LYP
'XC_HYB_GGA_XC_REVB3LYP' : 454, # Revised B3LYP
'XC_HYB_GGA_XC_CAMY_BLYP' : 455, # BLYP with yukawa screening
'XC_HYB_GGA_XC_PBE0_13' : 456, # PBE0-1/3
'XC_HYB_GGA_XC_B3LYPS' : 459, # B3LYP* functional
'XC_HYB_GGA_XC_WB97' : 463, # Chai and Head-Gordon
'XC_HYB_GGA_XC_WB97X' : 464, # Chai and Head-Gordon
'XC_HYB_GGA_XC_LRC_WPBEH' : 465, # Long-range corrected functional by Rorhdanz et al
'XC_HYB_GGA_XC_WB97X_V' : 466, # Mardirossian and Head-Gordon
'XC_HYB_GGA_XC_LCY_PBE' : 467, # PBE with yukawa screening
'XC_HYB_GGA_XC_LCY_BLYP' : 468, # BLYP with yukawa screening
'XC_HYB_GGA_XC_LC_VV10' : 469, # Vydrov and Van Voorhis
'XC_HYB_GGA_XC_CAMY_B3LYP' : 470, # B3LYP with Yukawa screening
'XC_HYB_GGA_XC_WB97X_D' : 471, # Chai and Head-Gordon
'XC_HYB_GGA_XC_HPBEINT' : 472, # hPBEint
'XC_HYB_GGA_XC_LRC_WPBE' : 473, # Long-range corrected functional by Rorhdanz et al
'XC_HYB_GGA_XC_B3LYP5' : 475, # B3LYP with VWN functional 5 instead of RPA
'XC_HYB_GGA_XC_EDF2' : 476, # Empirical functional from Lin, George and Gill
'XC_HYB_GGA_XC_CAP0' : 477, # Correct Asymptotic Potential hybrid
'XC_MGGA_C_DLDF' : 37, # Dispersionless Density Functional
'XC_MGGA_XC_ZLP' : 42, # Zhao, Levy & Parr, Eq. (21)
'XC_MGGA_XC_OTPSS_D' : 64, # oTPSS_D functional of Goerigk and Grimme
'XC_MGGA_C_CS' : 72, # Colle and Salvetti
'XC_MGGA_C_MN12_SX' : 73, # Worker for MN12-SX functional
'XC_MGGA_C_MN12_L' : 74, # MN12-L functional from Minnesota
'XC_MGGA_C_M11_L' : 75, # M11-L functional from Minnesota
'XC_MGGA_C_M11' : 76, # Worker for M11 functional
'XC_MGGA_C_M08_SO' : 77, # Worker for M08-SO functional
'XC_MGGA_C_M08_HX' : 78, # Worker for M08-HX functional
'XC_MGGA_X_LTA' : 201, # Local tau approximation of Ernzerhof & Scuseria
'XC_MGGA_X_TPSS' : 202, # Perdew, Tao, Staroverov & Scuseria exchange
'XC_MGGA_X_M06_L' : 203, # M06-Local functional of Minnesota
'XC_MGGA_X_GVT4' : 204, # GVT4 from Van Voorhis and Scuseria
'XC_MGGA_X_TAU_HCTH' : 205, # tau-HCTH from Boese and Handy
'XC_MGGA_X_BR89' : 206, # Becke-Roussel 89
'XC_MGGA_X_BJ06' : 207, # Becke & Johnson correction to Becke-Roussel 89
'XC_MGGA_X_TB09' : 208, # Tran & Blaha correction to Becke & Johnson
'XC_MGGA_X_RPP09' : 209, # Rasanen, Pittalis, and Proetto correction to Becke & Johnson
'XC_MGGA_X_2D_PRHG07' : 210, # Pittalis, Rasanen, Helbig, Gross Exchange Functional
'XC_MGGA_X_2D_PRHG07_PRP10' : 211, # PRGH07 with PRP10 correction
'XC_MGGA_X_REVTPSS' : 212, # revised Perdew, Tao, Staroverov & Scuseria exchange
'XC_MGGA_X_PKZB' : 213, # Perdew, Kurth, Zupan, and Blaha
'XC_MGGA_X_M05' : 214, # Worker for M05 functional
'XC_MGGA_X_M05_2X' : 215, # Worker for M05-2X functional
'XC_MGGA_X_M06_HF' : 216, # Worker for M06-HF functional
'XC_MGGA_X_M06' : 217, # Worker for M06 functional
'XC_MGGA_X_M06_2X' : 218, # Worker for M06-2X functional
'XC_MGGA_X_M08_HX' : 219, # Worker for M08-HX functional
'XC_MGGA_X_M08_SO' : 220, # Worker for M08-SO functional
'XC_MGGA_X_MS0' : 221, # MS exchange of Sun, Xiao, and Ruzsinszky
'XC_MGGA_X_MS1' : 222, # MS1 exchange of Sun, et al
'XC_MGGA_X_MS2' : 223, # MS2 exchange of Sun, et al
'XC_MGGA_X_M11' : 225, # Worker for M11 functional
'XC_MGGA_X_M11_L' : 226, # M11-L functional from Minnesota
'XC_MGGA_X_MN12_L' : 227, # MN12-L functional from Minnesota
'XC_MGGA_C_CC06' : 229, # Cancio and Chou 2006
'XC_MGGA_X_MK00' : 230, # Exchange for accurate virtual orbital energies
'XC_MGGA_C_TPSS' : 231, # Perdew, Tao, Staroverov & Scuseria correlation
'XC_MGGA_C_VSXC' : 232, # VSxc from Van Voorhis and Scuseria (correlation part)
'XC_MGGA_C_M06_L' : 233, # M06-Local functional from Minnesota
'XC_MGGA_C_M06_HF' : 234, # Worker for M06-HF functional
'XC_MGGA_C_M06' : 235, # Worker for M06 functional
'XC_MGGA_C_M06_2X' : 236, # Worker for M06-2X functional
'XC_MGGA_C_M05' : 237, # Worker for M05 functional
'XC_MGGA_C_M05_2X' : 238, # Worker for M05-2X functional
'XC_MGGA_C_PKZB' : 239, # Perdew, Kurth, Zupan, and Blaha
'XC_MGGA_C_BC95' : 240, # Becke correlation 95
'XC_MGGA_C_REVTPSS' : 241, # revised TPSS correlation
'XC_MGGA_XC_TPSSLYP1W' : 242, # Functionals fitted for water
'XC_MGGA_X_MK00B' : 243, # Exchange for accurate virtual orbital energies (v. B)
'XC_MGGA_X_BLOC' : 244, # functional with balanced localization
'XC_MGGA_X_MODTPSS' : 245, # Modified Perdew, Tao, Staroverov & Scuseria exchange
'XC_MGGA_C_TPSSLOC' : 247, # Semilocal dynamical correlation
'XC_MGGA_X_MBEEF' : 249, # mBEEF exchange
'XC_MGGA_X_MBEEFVDW' : 250, # mBEEF-vdW exchange
'XC_MGGA_XC_B97M_V' : 254, # Mardirossian and Head-Gordon
'XC_MGGA_X_MVS' : 257, # MVS exchange of Sun, Perdew, and Ruzsinszky
'XC_MGGA_X_MN15_L' : 260, # MN15-L functional from Minnesota
'XC_MGGA_C_MN15_L' : 261, # MN15-L functional from Minnesota
'XC_MGGA_X_SCAN' : 263, # SCAN exchange of Sun, Ruzsinszky, and Perdew
'XC_MGGA_C_SCAN' : 267, # SCAN correlation
'XC_MGGA_C_MN15' : 269, # MN15 functional from Minnesota
'XC_HYB_MGGA_X_DLDF' : 36, # Dispersionless Density Functional
'XC_HYB_MGGA_X_MS2H' : 224, # MS2 hybrid exchange of Sun, et al
'XC_HYB_MGGA_X_MN12_SX' : 248, # MN12-SX hybrid functional from Minnesota
'XC_HYB_MGGA_X_SCAN0' : 264, # SCAN hybrid
'XC_HYB_MGGA_X_MN15' : 268, # MN15 functional from Minnesota
'XC_HYB_MGGA_XC_M05' : 438, # M05 functional from Minnesota
'XC_HYB_MGGA_XC_M05_2X' : 439, # M05-2X functional from Minnesota
'XC_HYB_MGGA_XC_B88B95' : 440, # Mixture of B88 with BC95 (B1B95)
'XC_HYB_MGGA_XC_B86B95' : 441, # Mixture of B86 with BC95
'XC_HYB_MGGA_XC_PW86B95' : 442, # Mixture of PW86 with BC95
'XC_HYB_MGGA_XC_BB1K' : 443, # Mixture of B88 with BC95 from Zhao and Truhlar
'XC_HYB_MGGA_XC_M06_HF' : 444, # M06-HF functional from Minnesota
'XC_HYB_MGGA_XC_MPW1B95' : 445, # Mixture of mPW91 with BC95 from Zhao and Truhlar
'XC_HYB_MGGA_XC_MPWB1K' : 446, # Mixture of mPW91 with BC95 for kinetics
'XC_HYB_MGGA_XC_X1B95' : 447, # Mixture of X with BC95
'XC_HYB_MGGA_XC_XB1K' : 448, # Mixture of X with BC95 for kinetics
'XC_HYB_MGGA_XC_M06' : 449, # M06 functional from Minnesota
'XC_HYB_MGGA_XC_M06_2X' : 450, # M06-2X functional from Minnesota
'XC_HYB_MGGA_XC_PW6B95' : 451, # Mixture of PW91 with BC95 from Zhao and Truhlar
'XC_HYB_MGGA_XC_PWB6K' : 452, # Mixture of PW91 with BC95 from Zhao and Truhlar for kinetics
'XC_HYB_MGGA_XC_TPSSH' : 457, # TPSS hybrid
'XC_HYB_MGGA_XC_REVTPSSH' : 458, # revTPSS hybrid
'XC_HYB_MGGA_XC_M08_HX' : 460, # M08-HX functional from Minnesota
'XC_HYB_MGGA_XC_M08_SO' : 461, # M08-SO functional from Minnesota
'XC_HYB_MGGA_XC_M11' : 462, # M11 functional from Minnesota
'XC_HYB_MGGA_X_MVSH' : 474, # MVS hybrid
'XC_HYB_MGGA_XC_WB97M_V' : 531, # Mardirossian and Head-Gordon
#
# alias
#
'LDA' : 1 ,
'SLATER' : 1 ,
'VWN3' : 8,
'VWNRPA' : 8,
'VWN5' : 7,
'BLYP' : 'B88,LYP',
'BP86' : 'B88,P86',
'PBE0' : 406,
'PBE1PBE' : 406,
'B3LYP' : 'B3LYP5', # VWN5 version
'B3LYP5' : '.2*HF + .08*LDA + .72*B88, .81*LYP + .19*VWN',
'B3LYPG' : 402, # VWN3, used by Gaussian
'B3P86' : 'B3P865', # VWN5 version
'B3P865' : '.2*HF + .08*LDA + .72*B88, .81*P86 + .19*VWN',
'B3P86G' : 403, # VWN3, used by Gaussian
'MPW3PW' : 'MPW3PW5', # VWN5 version
'MPW3PW5' : '.2*HF + .08*LDA + .72*MPW91, .81*PW91 + .19*VWN',
'MPW3PWG' : 415, # VWN3, used by Gaussian
'MPW3LYP' : 'MPW3LYP5', # VWN5 version
'MPW3LYP5' : '.218*HF + .073*LDA + .709*MPW91, .871*LYP + .129*VWN',
'MPW3LYPG' : 419, # VWN3, used by Gaussian
'REVB3LYP' : 'REVB3LYP5', # VWN5 version
'REVB3LYP5' : '.2*HF + .13*LDA + .67*B88, .84*LYP + .16*VWN',
'REVB3LYPG' : 454, # VWN3, used by Gaussian
'X3LYP' : 'X3LYP5', # VWN5 version
'X3LYP5' : '.218*HF + .073*LDA + .478575*B88 + .166615*PW91, .871*LYP + .129*VWN',
'X3LYPG' : 411, # VWN3, used by Gaussian
'XC_MGGA_X_M06L': 203,
'XC_MGGA_C_M06L': 233,
}
XC_KEYS = set(XC_CODES.keys())
def xc_type(xc_code):
if isinstance(xc_code, str):
hyb, fn_facs = parse_xc(xc_code)
else:
fn_facs = [(xc_code, 1)] # mimic fn_facs
if not fn_facs:
return 'HF'
elif all(_itrf.LIBXC_is_lda(ctypes.c_int(xid)) for xid, fac in fn_facs):
return 'LDA'
elif any(_itrf.LIBXC_is_meta_gga(ctypes.c_int(xid)) for xid, fac in fn_facs):
return 'MGGA'
else:
# any(_itrf.LIBXC_is_gga(ctypes.c_int(xid)) for xid, fac in fn_facs)
# include hybrid_xc
return 'GGA'
def is_lda(xc_code):
return xc_type(xc_code) == 'LDA'
def is_hybrid_xc(xc_code):
if isinstance(xc_code, str):
if xc_code.isdigit():
return _itrf.LIBXC_is_hybrid(ctypes.c_int(xc_code))
else:
return ('HF' in xc_code or hybrid_coeff(xc_code) != 0)
elif isinstance(xc_code, int):
return _itrf.LIBXC_is_hybrid(ctypes.c_int(xc_code))
else:
return any((is_hybrid_xc(x) for x in xc_code))
def is_meta_gga(xc_code):
return xc_type(xc_code) == 'MGGA'
def is_gga(xc_code):
return xc_type(xc_code) == 'GGA'
def max_deriv_order(xc_code):
hyb, fn_facs = parse_xc(xc_code)
if fn_facs:
return min(_itrf.LIBXC_max_deriv_order(ctypes.c_int(xid)) for xid, fac in fn_facs)
else:
return 4
def test_deriv_order(xc_code, deriv, raise_error=False):
support = deriv <= max_deriv_order(xc_code)
if not support and raise_error:
from pyscf.dft import xcfun
msg = ('libxc library does not support derivative order %d for %s' %
(deriv, xc_code))
try:
if xcfun.test_deriv_order(xc_code, deriv, raise_error=False):
msg += ('''
This functional derivative is supported in the xcfun library.
The following code can be used to change the libxc library to xcfun library:
from pyscf.dft import xcfun
mf._numint.libxc = xcfun
''')
raise NotImplementedError(msg)
except (NotImplementedError, KeyError):
raise NotImplementedError(msg)
return support
def hybrid_coeff(xc_code, spin=0):
'''Support recursively defining hybrid functional
'''
hyb, fn_facs = parse_xc(xc_code)
for xid, fac in fn_facs:
hyb += _itrf.LIBXC_hybrid_coeff(ctypes.c_int(xid))
return hyb
def parse_xc_name(xc_name='LDA,VWN'):
'''Convert the XC functional name to libxc library internal ID.
'''
fn_facs = parse_xc(xc_name)[1]
return fn_facs[0][0], fn_facs[1][0]
def parse_xc(description):
'''Rules to input functional description:
* The given functional description must be a one-line string.
* The functional description is case-insensitive.
* The functional description string has two parts, separated by ",". The
first part describes the exchange functional, the second is the correlation
functional.
- If "," not presented in string, the entire string is treated as
X functional.
- To neglect X functional (just apply C functional), leave blank in the
first part, eg description=',vwn' for pure VWN functional
- If compound XC functional (including both X and C functionals, such as
b3lyp) is specified, no matter whehter it is in the X part (the string
in front of comma) or the C part (the string behind comma), both X and C
functionals of the compound XC functional will be used.
* The functional name can be placed in arbitrary order. Two names needs to
be separated by operators + or -. Blank spaces are ignored.
NOTE the parser only reads operators + - *. / is not supported.
* A functional name is associated with one factor. If the factor is not
given, it is assumed equaling 1. Compound functional can be scaled as a
unit. For example '0.5*b3lyp' is equivalent to
'HF*0.1 + .04*LDA + .36*B88, .405*LYP + .095*VWN'
* String "HF" stands for exact exchange (HF K matrix). It is allowed to
put "HF" in C (correlation) functional part.
* Be careful with the libxc convention on GGA functional, in which the LDA
contribution is included.
Args:
xc_code : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" was appeared in the string, it stands for the exact exchange.
rho : ndarray
Shape of ((*,N)) for electron density (and derivatives) if spin = 0;
Shape of ((*,N),(*,N)) for alpha/beta electron density (and derivatives) if spin > 0;
where N is number of grids.
rho (*,N) are ordered as (den,grad_x,grad_y,grad_z,laplacian,tau)
where grad_x = d/dx den, laplacian = \nabla^2 den, tau = 1/2(\nabla f)^2
In spin unrestricted case,
rho is ((den_u,grad_xu,grad_yu,grad_zu,laplacian_u,tau_u)
(den_d,grad_xd,grad_yd,grad_zd,laplacian_d,tau_d))
Kwargs:
spin : int
spin polarized if spin > 0
relativity : int
No effects.
verbose : int or object of :class:`Logger`
No effects.
Returns:
ex, vxc, fxc, kxc
where
* vxc = (vrho, vsigma, vlapl, vtau) for restricted case
* vxc for unrestricted case
| vrho[:,2] = (u, d)
| vsigma[:,3] = (uu, ud, dd)
| vlapl[:,2] = (u, d)
| vtau[:,2] = (u, d)
* fxc for restricted case:
(v2rho2, v2rhosigma, v2sigma2, v2lapl2, vtau2, v2rholapl, v2rhotau, v2lapltau, v2sigmalapl, v2sigmatau)
* fxc for unrestricted case:
| v2rho2[:,3] = (u_u, u_d, d_d)
| v2rhosigma[:,6] = (u_uu, u_ud, u_dd, d_uu, d_ud, d_dd)
| v2sigma2[:,6] = (uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd)
| v2lapl2[:,3]
| vtau2[:,3]
| v2rholapl[:,4]
| v2rhotau[:,4]
| v2lapltau[:,4]
| v2sigmalapl[:,6]
| v2sigmatau[:,6]
* kxc for restricted case:
v3rho3, v3rho2sigma, v3rhosigma2, v3sigma3,
v3rho2tau, v3rhosigmatau, v3rhotau2, v3sigma2tau, v3sigmatau2, v3tau3
* kxc for unrestricted case:
| v3rho3[:,4] = (u_u_u, u_u_d, u_d_d, d_d_d)
| v3rho2sigma[:,9] = (u_u_uu, u_u_ud, u_u_dd, u_d_uu, u_d_ud, u_d_dd, d_d_uu, d_d_ud, d_d_dd)
| v3rhosigma2[:,12] = (u_uu_uu, u_uu_ud, u_uu_dd, u_ud_ud, u_ud_dd, u_dd_dd, d_uu_uu, d_uu_ud, d_uu_dd, d_ud_ud, d_ud_dd, d_dd_dd)
| v3sigma3[:,10] = (uu_uu_uu, uu_uu_ud, uu_uu_dd, uu_ud_ud, uu_ud_dd, uu_dd_dd, ud_ud_ud, ud_ud_dd, ud_dd_dd, dd_dd_dd)
| v3rho2tau
| v3rhosigmatau
| v3rhotau2
| v3sigma2tau
| v3sigmatau2
| v3tau3
see also libxc_itrf.c
'''
if isinstance(description, int):
return 0, [(description, 1.)]
elif not isinstance(description, str): #isinstance(description, (tuple,list)):
return parse_xc('%s,%s' % tuple(description))
hyb = [0]
fn_facs = []
def parse_token(token, possible_xc_for):
if token:
if '*' in token:
fac, key = token.split('*')
if fac[0].isalpha():
fac, key = key, fac
fac = float(fac)
else:
fac, key = 1, token
if key == 'HF':
hyb[0] += fac
else:
if key in XC_CODES:
x_id = XC_CODES[key]
else:
possible_xc = XC_KEYS.intersection(possible_xc_for(key))
if possible_xc:
if len(possible_xc) > 1:
sys.stderr.write('Possible xc_code %s matches %s. '
% (possible_xc, key))
x_id = possible_xc.pop()
sys.stderr.write('Take %s\n' % x_id)
else:
x_id = possible_xc.pop()
x_id = XC_CODES[x_id]
else:
raise KeyError('Unknown key %s' % key)
if isinstance(x_id, str):
hyb1, fn_facs1 = parse_xc(x_id)
# Recursively scale the composed functional, to support '0.5*b3lyp'
hyb[0] += hyb1 * fac
fn_facs.extend([(xid, c*fac) for xid, c in fn_facs1])
elif x_id is None:
raise NotImplementedError(key)
else:
fn_facs.append((x_id, fac))
def possible_x_for(key):
return set((key, 'XC_'+key,
'XC_LDA_X_'+key, 'XC_GGA_X_'+key, 'XC_MGGA_X_'+key,
'XC_HYB_GGA_X_'+key, 'XC_HYB_MGGA_X_'+key))
def possible_xc_for(key):
return set((key, 'XC_LDA_XC_'+key, 'XC_GGA_XC_'+key, 'XC_MGGA_XC_'+key,
'XC_HYB_GGA_XC_'+key, 'XC_HYB_MGGA_XC_'+key))
def possible_k_for(key):
return set((key, 'XC_'+key,
'XC_LDA_K_'+key, 'XC_GGA_K_'+key,))
def possible_c_for(key):
return set((key, 'XC_'+key,
'XC_LDA_C_'+key, 'XC_GGA_C_'+key, 'XC_MGGA_C_'+key))
def remove_dup(fn_facs):
fn_ids = []
facs = []
n = 0
for key, val in fn_facs:
if key in fn_ids:
facs[fn_ids.index(key)] += val
else:
fn_ids.append(key)
facs.append(val)
n += 1
return list(zip(fn_ids, facs))
if ',' in description:
x_code, c_code = description.replace(' ','').upper().split(',')
for token in x_code.replace('-', '+-').split('+'):
parse_token(token, possible_x_for)
for token in c_code.replace('-', '+-').split('+'):
parse_token(token, possible_c_for)
else:
x_code = description.replace(' ','').upper()
try:
for token in x_code.replace('-', '+-').split('+'):
parse_token(token, possible_xc_for)
except KeyError:
for token in x_code.replace('-', '+-').split('+'):
parse_token(token, possible_x_for)
return hyb[0], remove_dup(fn_facs)
def eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, verbose=None):
r'''Interface to call libxc library to evaluate XC functional, potential
and functional derivatives.
* The given functional xc_code must be a one-line string.
* The functional xc_code is case-insensitive.
* The functional xc_code string has two parts, separated by ",". The
first part describes the exchange functional, the second is the correlation
functional.
- If "," not appeared in string, the entire string is considered as X functional.
- To neglect X functional (just apply C functional), leave blank in the
first part, eg description=',vwn' for pure VWN functional
* The functional name can be placed in arbitrary order. Two name needs to
be separated by operators "+" or "-". Blank spaces are ignored.
NOTE the parser only reads operators "+" "-" "*". / is not in support.
* A functional name is associated with one factor. If the factor is not
given, it is assumed equaling 1.
* String "HF" stands for exact exchange (HF K matrix). It is allowed to
put in C functional part.
* Be careful with the libxc convention on GGA functional, in which the LDA
contribution is included.
Args:
xc_code : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" was appeared in the string, it stands for the exact exchange.
rho : ndarray
Shape of ((*,N)) for electron density (and derivatives) if spin = 0;
Shape of ((*,N),(*,N)) for alpha/beta electron density (and derivatives) if spin > 0;
where N is number of grids.
rho (*,N) are ordered as (den,grad_x,grad_y,grad_z,laplacian,tau)
where grad_x = d/dx den, laplacian = \nabla^2 den, tau = 1/2(\nabla f)^2
In spin unrestricted case,
rho is ((den_u,grad_xu,grad_yu,grad_zu,laplacian_u,tau_u)
(den_d,grad_xd,grad_yd,grad_zd,laplacian_d,tau_d))
Kwargs:
spin : int
spin polarized if spin > 0
relativity : int
No effects.
verbose : int or object of :class:`Logger`
No effects.
Returns:
ex, vxc, fxc, kxc
where
* vxc = (vrho, vsigma, vlapl, vtau) for restricted case
* vxc for unrestricted case
| vrho[:,2] = (u, d)
| vsigma[:,3] = (uu, ud, dd)
| vlapl[:,2] = (u, d)
| vtau[:,2] = (u, d)
* fxc for restricted case:
(v2rho2, v2rhosigma, v2sigma2, v2lapl2, vtau2, v2rholapl, v2rhotau, v2lapltau, v2sigmalapl, v2sigmatau)
* fxc for unrestricted case:
| v2rho2[:,3] = (u_u, u_d, d_d)
| v2rhosigma[:,6] = (u_uu, u_ud, u_dd, d_uu, d_ud, d_dd)
| v2sigma2[:,6] = (uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd)
| v2lapl2[:,3]
| vtau2[:,3]
| v2rholapl[:,4]
| v2rhotau[:,4]
| v2lapltau[:,4]
| v2sigmalapl[:,6]
| v2sigmatau[:,6]
* kxc for restricted case:
(v3rho3, v3rho2sigma, v3rhosigma2, v3sigma3)
* kxc for unrestricted case:
| v3rho3[:,4] = (u_u_u, u_u_d, u_d_d, d_d_d)
| v3rho2sigma[:,9] = (u_u_uu, u_u_ud, u_u_dd, u_d_uu, u_d_ud, u_d_dd, d_d_uu, d_d_ud, d_d_dd)
| v3rhosigma2[:,12] = (u_uu_uu, u_uu_ud, u_uu_dd, u_ud_ud, u_ud_dd, u_dd_dd, d_uu_uu, d_uu_ud, d_uu_dd, d_ud_ud, d_ud_dd, d_dd_dd)
| v3sigma3[:,10] = (uu_uu_uu, uu_uu_ud, uu_uu_dd, uu_ud_ud, uu_ud_dd, uu_dd_dd, ud_ud_ud, ud_ud_dd, ud_dd_dd, dd_dd_dd)
see also libxc_itrf.c
'''
hyb, fn_facs = parse_xc(xc_code)
return _eval_xc(fn_facs, rho, spin, relativity, deriv, verbose)
SINGULAR_IDS = set((131, # LYP functions
402, 404, 411, 416, 419, # hybrid LYP functions
74 , 75 , 226, 227)) # M11L and MN12L functional
def _eval_xc(fn_facs, rho, spin=0, relativity=0, deriv=1, verbose=None):
assert(deriv <= 3)
if spin == 0:
nspin = 1
rho_u = rho_d = numpy.asarray(rho, order='C')
else:
nspin = 2
rho_u = numpy.asarray(rho[0], order='C')
rho_d = numpy.asarray(rho[1], order='C')
if rho_u.ndim == 1:
rho_u = rho_u.reshape(1,-1)
rho_d = rho_d.reshape(1,-1)
ngrids = rho_u.shape[1]
fn_ids = [x[0] for x in fn_facs]
facs = [x[1] for x in fn_facs]
if all((is_lda(x) for x in fn_ids)):
if spin == 0:
nvar = 1
else:
nvar = 2
elif any((is_meta_gga(x) for x in fn_ids)):
if spin == 0:
nvar = 4
else:
nvar = 9
else: # GGA
if spin == 0:
nvar = 2
else:
nvar = 5
outlen = (math.factorial(nvar+deriv) //
(math.factorial(nvar) * math.factorial(deriv)))
if SINGULAR_IDS.intersection(fn_ids) and deriv > 1:
non0idx = (rho_u[0] > 1e-10) & (rho_d[0] > 1e-10)
rho_u = numpy.asarray(rho_u[:,non0idx], order='C')
rho_d = numpy.asarray(rho_d[:,non0idx], order='C')
outbuf = numpy.empty((outlen,non0idx.sum()))
else:
outbuf = numpy.empty((outlen,ngrids))
n = len(fn_ids)
_itrf.LIBXC_eval_xc(ctypes.c_int(n),
(ctypes.c_int*n)(*fn_ids), (ctypes.c_double*n)(*facs),
ctypes.c_int(nspin),
ctypes.c_int(deriv), ctypes.c_int(rho_u.shape[1]),
rho_u.ctypes.data_as(ctypes.c_void_p),
rho_d.ctypes.data_as(ctypes.c_void_p),
outbuf.ctypes.data_as(ctypes.c_void_p))
if outbuf.shape[1] != ngrids:
out = numpy.zeros((outlen,ngrids))
out[:,non0idx] = outbuf
outbuf = out
exc = outbuf[0]
vxc = fxc = kxc = None
if nvar == 1: # LDA
if deriv > 0:
vxc = (outbuf[1], None, None, None)
if deriv > 1:
fxc = (outbuf[2],) + (None,)*9
if deriv > 2:
kxc = (outbuf[3], None, None, None)
elif nvar == 2:
if spin == 0: # GGA
if deriv > 0:
vxc = (outbuf[1], outbuf[2], None, None)
if deriv > 1:
fxc = (outbuf[3], outbuf[4], outbuf[5],) + (None,)*7
if deriv > 2:
kxc = outbuf[6:10]
else: # LDA
if deriv > 0:
vxc = (outbuf[1:3].T, None, None, None)
if deriv > 1:
fxc = (outbuf[3:6].T,) + (None,)*9
if deriv > 2:
kxc = (outbuf[6:10].T, None, None, None)
elif nvar == 5: # GGA
if deriv > 0:
vxc = (outbuf[1:3].T, outbuf[3:6].T, None, None)
if deriv > 1:
fxc = (outbuf[6:9].T, outbuf[9:15].T, outbuf[15:21].T) + (None,)*7
if deriv > 2:
kxc = (outbuf[21:25].T, outbuf[25:34].T, outbuf[34:46].T, outbuf[46:56].T)
elif nvar == 4: # MGGA
if deriv > 0:
vxc = outbuf[1:5]
if deriv > 1:
fxc = outbuf[5:15]
elif nvar == 9: # MGGA
if deriv > 0:
vxc = (outbuf[1:3].T, outbuf[3:6].T, outbuf[6:8].T, outbuf[8:10].T)
if deriv > 1:
fxc = (outbuf[10:13].T, outbuf[13:19].T, outbuf[19:25].T,
outbuf[25:28].T, outbuf[28:31].T, outbuf[31:35].T,
outbuf[35:39].T, outbuf[39:43].T, outbuf[43:49].T,
outbuf[49:55].T)
return exc, vxc, fxc, kxc
def define_xc_(ni, description, xctype='LDA', hyb=0):
'''Define XC functional. See also :func:`eval_xc` for the rules of input description.
Args:
ni : an instance of :class:`_NumInt`
description : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" was appeared in the string, it stands for the exact exchange.
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz')
>>> mf = dft.RKS(mol)
>>> define_xc_(mf._numint, '.2*HF + .08*LDA + .72*B88, .81*LYP + .19*VWN')
>>> mf.kernel()
-76.3783361189611
>>> define_xc_(mf._numint, 'LDA*.08 + .72*B88 + .2*HF, .81*LYP + .19*VWN')
>>> mf.kernel()
-76.3783361189611
>>> def eval_xc(xc_code, rho, *args, **kwargs):
... exc = 0.01 * rho**2
... vrho = 0.01 * 2 * rho
... vxc = (vrho, None, None, None)
... fxc = None # 2nd order functional derivative
... kxc = None # 3rd order functional derivative
... return exc, vxc, fxc, kxc
>>> define_xc_(mf._numint, eval_xc, xctype='LDA')
>>> mf.kernel()
48.8525211046668
'''
if isinstance(description, str):
ni.eval_xc = lambda xc_code, rho, *args, **kwargs: \
eval_xc(description, rho, *args, **kwargs)
ni.hybrid_coeff = lambda *args, **kwargs: hybrid_coeff(description)
ni._xc_type = lambda *args: xc_type(description)
elif callable(description):
ni.eval_xc = description
ni.hybrid_coeff = lambda *args, **kwargs: hyb
ni._xc_type = lambda *args: xctype
else:
raise RuntimeError('Unknown description %s' % description)
return ni
def define_xc(ni, description):
return define_xc_(copy.copy(ni), description)
define_xc.__doc__ = define_xc_.__doc__
if __name__ == '__main__':
from pyscf import gto, dft
mol = gto.M(
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = '6311g*',)
mf = dft.RKS(mol)
mf.xc = 'b88,lyp'
eref = mf.kernel()
mf = dft.RKS(mol)
mf._numint = define_xc(mf._numint, 'BLYP')
e1 = mf.kernel()
print(e1 - eref)
mf = dft.RKS(mol)
mf._numint = define_xc(mf._numint, 'B3LYP5')
e1 = mf.kernel()
print(e1 - -76.4102840115744)
| 51.106472
| 238
| 0.586275
|
8d594466deaee947cb56bbf6fe330b07b1eff217
| 3,531
|
py
|
Python
|
tests/sources/basic/73-pscosRedis_python/src/modules/testRedis.py
|
mF2C/COMPSsOLD
|
ba5727e818735fd45fba4e9793cefe40456b2e1e
|
[
"Apache-2.0"
] | 1
|
2020-11-25T13:01:27.000Z
|
2020-11-25T13:01:27.000Z
|
tests/sources/basic/73-pscosRedis_python/src/modules/testRedis.py
|
mF2C/COMPSsOLD
|
ba5727e818735fd45fba4e9793cefe40456b2e1e
|
[
"Apache-2.0"
] | 1
|
2019-11-13T14:30:21.000Z
|
2019-11-13T14:30:21.000Z
|
tests/sources/basic/73-pscosRedis_python/src/modules/testRedis.py
|
mF2C/COMPSsOLD
|
ba5727e818735fd45fba4e9793cefe40456b2e1e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench Tasks
========================
"""
# Imports
import unittest
from .psco import PSCO
from .psco_with_tasks import PSCOWithTasks
from pycompss.api.task import task
@task(returns=int)
def compute_sum(psco):
return sum(psco.get_content())
@task(returns=PSCO)
def modifier_task(psco):
psco.set_content('Goodbye world')
return psco
@task(returns=PSCO)
def creator_task(obj):
myPSCO = PSCO(obj)
return myPSCO
@task(returns=list)
def selfConcat(a, b):
a.set_content(a.get_content() * 2)
b.set_content(b.get_content() * 2)
return [a, b]
@task(returns=PSCO)
def inc(x):
x.content += 1
return x
class TestRedis(unittest.TestCase):
def testMakePersistent(self):
myPSCO = PSCO('Hello world')
myPSCO.make_persistent()
self.assertTrue(myPSCO.getID() is not None)
def testDeletePersistent(self):
myPSCO = PSCO('Hello world')
myPSCO.make_persistent()
self.assertFalse(myPSCO.getID() is None)
myPSCO.delete_persistent()
self.assertTrue(myPSCO.getID() is None)
def testPSCOisCorrectlyRead(self):
from pycompss.api.api import compss_wait_on as sync
myPSCO = PSCO([1, 2, 3, 4, 5])
myPSCO.make_persistent()
res = compute_sum(myPSCO)
res = sync(res)
self.assertEqual(res, 15)
def testPSCOisCorrectlyModifiedInsideTask(self):
from pycompss.api.api import compss_wait_on as sync
myPSCO = PSCO('Hello world')
myPSCO = modifier_task(myPSCO)
myPSCO = sync(myPSCO)
self.assertEqual('Goodbye world', myPSCO.get_content())
@unittest.skip("TEMPORARY")
def testPSCOisCorrectlyCreatedInsideTask(self):
from pycompss.api.api import compss_wait_on as sync
myPSCO = creator_task(obj)
obj = list(range(100))
myPSCO = sync(myPSCO)
self.assertEqual(list(range(100)), myPSCO.get_content())
def testPipeline(self):
a = PSCO('a')
b = PSCO('b')
c = PSCO('c')
a.make_persistent()
b.make_persistent()
c.make_persistent()
from storage.api import getByID
an, bn, cn = getByID(a.getID(), b.getID(), c.getID())
self.assertEqual(a.get_content(), an.get_content())
self.assertEqual(b.get_content(), bn.get_content())
self.assertEqual(c.get_content(), cn.get_content())
def testMultiParam(self):
from pycompss.api.api import compss_wait_on as sync
a = PSCO('a')
b = PSCO('b')
a.make_persistent()
b.make_persistent()
l = selfConcat(a, b)
l = sync(l)
a, b = l
self.assertEqual('aa', a.get_content())
self.assertEqual('bb', b.get_content())
@unittest.skip("UNSUPPORTED IN REDIS")
def testPSCOwithTasks(self):
from pycompss.api.api import compss_wait_on as sync
obj = PSCOWithTasks("Hello world")
obj.make_persistent()
initialContent = obj.get_content()
obj.set_content("Goodbye world")
modifiedContent = obj.get_content()
iC = sync(initialContent)
mC = sync(modifiedContent)
self.assertEqual('Hello world', iC)
self.assertEqual('Goodbye world', mC)
def testAutoModification(self):
from pycompss.api.api import compss_wait_on as sync
p = creator_task(0)
p = inc(p)
p = inc(p)
p = sync(p)
self.assertEqual(2, p.get_content())
| 26.954198
| 64
| 0.624752
|
e4f4d7fee01ce7ebe200aa10478d40daefc44f6a
| 4,419
|
py
|
Python
|
figure 2b pk fitting.py
|
grantiain/Dendrimer-Drug-Delivery-Model
|
a03d234d4786e1acb5f0f211e4bbe3ec5de1b9a8
|
[
"MIT"
] | 2
|
2020-10-07T22:13:51.000Z
|
2020-10-07T22:14:41.000Z
|
figure 2b pk fitting.py
|
grantiain/Dendrimer-Drug-Delivery-Model
|
a03d234d4786e1acb5f0f211e4bbe3ec5de1b9a8
|
[
"MIT"
] | null | null | null |
figure 2b pk fitting.py
|
grantiain/Dendrimer-Drug-Delivery-Model
|
a03d234d4786e1acb5f0f211e4bbe3ec5de1b9a8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# =============================================================================
# Python script to optimize the PK parameters of the Naked API
#
# iain.grant@astrazeneca.com
# =============================================================================
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.optimize import minimize
def c_total(t, k_res, k_h, dose, V_c):
#c_max = dose / V_c
c_max = 170.0
c_tot = c_max * np.exp(-(k_res+k_h) * t)
return c_tot
# Definition of Model
def dxdt(X, t, CL, Vc, k12, k21, k13, k31):
# Iain Grant, AstraZeneca, Macclesfield
# iain.grant@astrazeneca.com
dx0 = -((CL / Vc) + k12 + k13) * X[0] + k21 * X[1] + k31 * X[2]
dx1 = k12 * X[0] - k21 * X[1]
dx2 = k13 * X[0] - k31 * X[2]
return dx0, dx1, dx2
# Objective function returning the sum of the residuals squared
def objective_function(parms, t_data, c_data, dose):
# Iain Grant, AstraZeneca, Macclesfield
# iain.grant@astrazeneca.com
# Parameters to be fitted
CL = parms[0]
Vc = parms[1]
k12 = parms[2]
k21 = parms[3]
k13 = parms[4]
k31 = parms[5]
# initial conditions
X_init = [dose, 0.0, 0.0]
soln = odeint(dxdt, X_init, t_data, args=(CL, Vc, k12, k21, k13, k31))
# Convert to concentration
c_model = soln[:, 0] / Vc
# calculate sum of the residuals squared (with 1/yhat ** 2 weighting)
res_sq = 0.0
for i in range(0,len(c_data)):
res_sq += ((c_data[i] - c_model[i]) ** 2 / c_data[i] ** 2)
return res_sq
# Plot model and experimental data
def plot_data(soln, t_soln, t, c_plasma, Vc, gcol):
# Iain Grant, AstraZeneca, Macclesfield
# iain.grant@astrazeneca.com
plt.plot(t_soln, soln[:,0] / Vc, c = gcol)
plt.plot(t, c_plasma, ls = 'none', marker = 'o', c = gcol,
markeredgecolor = 'k', markersize = '8')
plt.xlabel('Time [hr]', fontsize = 16)
plt.ylabel('Concentration [$\mu$g/mL]', fontsize = 16)
plt.grid(b=True)
plt.show()
def fit_model(init_vals, dose, t_data, c_data, bnds):
# Iain Grant, AstraZeneca, Macclesfield
# iain.grant@astrazeneca.com
res = minimize(objective_function, init_vals, args=(t_data, c_data, dose),
bounds = bnds)
return res.x
# Mouse PK at 5 mg/kg
t_data = [0.0, 0.05, 0.25, 0.5, 1, 2.0, 4.0, 6.0, 8.0]
c_data = [30.0, 15.3, 2.753, 0.977, 0.436, 0.0753, 0.0863, 0.0601, 0.0413]
t_data_all = [0.05, 0.25, 0.5, 1, 2, 4, 6, 8, 0.05, 0.25, 0.5, 1, 2, 4, 6,
8, 0.05, 0.25, 0.5, 1, 2, 4, 6, 8]
c_data_all = [13.9, 2.89, 1.08, 0.579, 0.0797, 0.131, 0.0921, 0.0529, 16,
2.74, 1.03, 0.402, 0.0678, 0.0544, 0.0395, 0.0534, 16, 2.63,
0.822, 0.328, 0.0784, 0.0736, 0.0488, 0.0176]
parms_init = [0.03, 0.004, 2.0, 0.02, 1.0, 1.0]
pmin = [0.01, 0.001, 0.001, 0.001, 0.001, 0.001]
pmax = [0.1, 0.02, 5.0, 5.0, 5.0, 5.0]
bnds = np.c_[pmin, pmax]
ts = np.linspace(0, 12, 300)
# 5 mg/kg in 25 g mouse
dose = 0.125
# Fit model find optimised values for PK parameters
CL, Vc, k12, k21, k13, k31 = fit_model(parms_init, dose, t_data, c_data, bnds)
# Solve model with optimised parameters
X_init = [dose, 0.0, 0.0]
soln = odeint(dxdt, X_init, ts, args=(CL, Vc, k12, k21, k13, k31))
# Plot the experimental data and the fitted model
plt.plot(ts, soln[:, 0] / Vc, color = 'k')
plt.plot(t_data_all, c_data_all, marker = '^', color = 'k', ls = 'None',
markersize = 8, label = 'AZD4320')
plt.ylabel(r'$C_{pl}$ AZD4320 ($\mu$g/ml)', fontsize=16, labelpad=12)
plt.xlabel(r'Time (h)', fontsize=16, labelpad=12)
plt.grid(linestyle='--', linewidth=0.5)
plt.xscale('linear')
plt.yscale('log')
plt.legend(loc = 0 , numpoints=1, prop={'size': 13.5})
plt.xlim(-0.4,12.4)
plt.ylim(0.008, 1500)
plt.tick_params(labelsize=15)
plt.xticks(np.arange(0.0, 14.0, step=2.0))
plt.tight_layout()
plt.savefig('figure2b_chart.svg', format='svg')
plt.show()
# Optimised Parameters (5 mg/kg in mouse)
print('CL = ', CL, ' L/hr')
print('Vc = ', Vc, ' L')
print('k12 = ', k12, ' per hour')
print('k21 = ', k21, ' per hour')
print('k13 = ', k13, ' per hour')
print('k31 = ', k31, ' per hour')
| 30.267123
| 80
| 0.560987
|
060842c1fb8a624b342ce31824abc79e9e687eeb
| 8,344
|
py
|
Python
|
pdf4me/Pdf4mePythonClientApi/pdf4me/model/mail_merge_doc_action.py
|
pdf4me/pdf4me-clientapi-python
|
17a1c1baae861369084d658475be56be42ca92d0
|
[
"MIT"
] | 1
|
2020-06-30T22:18:17.000Z
|
2020-06-30T22:18:17.000Z
|
pdf4me/Pdf4mePythonClientApi/pdf4me/model/mail_merge_doc_action.py
|
pdf4me/pdf4me-clientapi-python
|
17a1c1baae861369084d658475be56be42ca92d0
|
[
"MIT"
] | null | null | null |
pdf4me/Pdf4mePythonClientApi/pdf4me/model/mail_merge_doc_action.py
|
pdf4me/pdf4me-clientapi-python
|
17a1c1baae861369084d658475be56be42ca92d0
|
[
"MIT"
] | 1
|
2018-07-10T17:40:37.000Z
|
2018-07-10T17:40:37.000Z
|
# coding: utf-8
"""
Pdf4me
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MailMergeDocAction(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'_print': 'bool',
'print_package': 'str',
'envelope_delivery': 'str',
'envelope_type': 'str',
'print_pool': 'str',
'country': 'str',
'receiver_id': 'str',
'action_id': 'str'
}
attribute_map = {
'_print': 'print',
'print_package': 'printPackage',
'envelope_delivery': 'envelopeDelivery',
'envelope_type': 'envelopeType',
'print_pool': 'printPool',
'country': 'country',
'receiver_id': 'receiverId',
'action_id': 'actionId'
}
def __init__(self, _print=None, print_package=None, envelope_delivery=None, envelope_type=None, print_pool=None, country=None, receiver_id=None, action_id=None): # noqa: E501
"""MailMergeDocAction - a model defined in Swagger""" # noqa: E501
self.__print = None
self._print_package = None
self._envelope_delivery = None
self._envelope_type = None
self._print_pool = None
self._country = None
self._receiver_id = None
self._action_id = None
self.discriminator = None
if _print is not None:
self._print = _print
if print_package is not None:
self.print_package = print_package
if envelope_delivery is not None:
self.envelope_delivery = envelope_delivery
if envelope_type is not None:
self.envelope_type = envelope_type
if print_pool is not None:
self.print_pool = print_pool
if country is not None:
self.country = country
if receiver_id is not None:
self.receiver_id = receiver_id
if action_id is not None:
self.action_id = action_id
@property
def _print(self):
"""Gets the _print of this MailMergeDocAction. # noqa: E501
:return: The _print of this MailMergeDocAction. # noqa: E501
:rtype: bool
"""
return self.__print
@_print.setter
def _print(self, _print):
"""Sets the _print of this MailMergeDocAction.
:param _print: The _print of this MailMergeDocAction. # noqa: E501
:type: bool
"""
self.__print = _print
@property
def print_package(self):
"""Gets the print_package of this MailMergeDocAction. # noqa: E501
:return: The print_package of this MailMergeDocAction. # noqa: E501
:rtype: str
"""
return self._print_package
@print_package.setter
def print_package(self, print_package):
"""Sets the print_package of this MailMergeDocAction.
:param print_package: The print_package of this MailMergeDocAction. # noqa: E501
:type: str
"""
self._print_package = print_package
@property
def envelope_delivery(self):
"""Gets the envelope_delivery of this MailMergeDocAction. # noqa: E501
:return: The envelope_delivery of this MailMergeDocAction. # noqa: E501
:rtype: str
"""
return self._envelope_delivery
@envelope_delivery.setter
def envelope_delivery(self, envelope_delivery):
"""Sets the envelope_delivery of this MailMergeDocAction.
:param envelope_delivery: The envelope_delivery of this MailMergeDocAction. # noqa: E501
:type: str
"""
self._envelope_delivery = envelope_delivery
@property
def envelope_type(self):
"""Gets the envelope_type of this MailMergeDocAction. # noqa: E501
:return: The envelope_type of this MailMergeDocAction. # noqa: E501
:rtype: str
"""
return self._envelope_type
@envelope_type.setter
def envelope_type(self, envelope_type):
"""Sets the envelope_type of this MailMergeDocAction.
:param envelope_type: The envelope_type of this MailMergeDocAction. # noqa: E501
:type: str
"""
self._envelope_type = envelope_type
@property
def print_pool(self):
"""Gets the print_pool of this MailMergeDocAction. # noqa: E501
:return: The print_pool of this MailMergeDocAction. # noqa: E501
:rtype: str
"""
return self._print_pool
@print_pool.setter
def print_pool(self, print_pool):
"""Sets the print_pool of this MailMergeDocAction.
:param print_pool: The print_pool of this MailMergeDocAction. # noqa: E501
:type: str
"""
self._print_pool = print_pool
@property
def country(self):
"""Gets the country of this MailMergeDocAction. # noqa: E501
:return: The country of this MailMergeDocAction. # noqa: E501
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this MailMergeDocAction.
:param country: The country of this MailMergeDocAction. # noqa: E501
:type: str
"""
self._country = country
@property
def receiver_id(self):
"""Gets the receiver_id of this MailMergeDocAction. # noqa: E501
:return: The receiver_id of this MailMergeDocAction. # noqa: E501
:rtype: str
"""
return self._receiver_id
@receiver_id.setter
def receiver_id(self, receiver_id):
"""Sets the receiver_id of this MailMergeDocAction.
:param receiver_id: The receiver_id of this MailMergeDocAction. # noqa: E501
:type: str
"""
self._receiver_id = receiver_id
@property
def action_id(self):
"""Gets the action_id of this MailMergeDocAction. # noqa: E501
:return: The action_id of this MailMergeDocAction. # noqa: E501
:rtype: str
"""
return self._action_id
@action_id.setter
def action_id(self, action_id):
"""Sets the action_id of this MailMergeDocAction.
:param action_id: The action_id of this MailMergeDocAction. # noqa: E501
:type: str
"""
self._action_id = action_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MailMergeDocAction, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MailMergeDocAction):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28
| 179
| 0.600671
|
2aeb64e467e0c3ac5bb19e1f9e802dfcbdc51bf9
| 935
|
py
|
Python
|
core/migrations/0004_auto_20180423_1619.py
|
kaedroho/dit-directory-cms
|
67c15eeed19e7b3583f1fce1969230ddf83b6813
|
[
"MIT"
] | 6
|
2018-03-20T11:19:07.000Z
|
2021-10-05T07:53:11.000Z
|
core/migrations/0004_auto_20180423_1619.py
|
kaedroho/dit-directory-cms
|
67c15eeed19e7b3583f1fce1969230ddf83b6813
|
[
"MIT"
] | 802
|
2018-02-05T14:16:13.000Z
|
2022-02-10T10:59:21.000Z
|
core/migrations/0004_auto_20180423_1619.py
|
kaedroho/dit-directory-cms
|
67c15eeed19e7b3583f1fce1969230ddf83b6813
|
[
"MIT"
] | 6
|
2019-01-22T13:19:37.000Z
|
2019-07-01T10:35:26.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-04-23 16:19
from __future__ import unicode_literals
from django.db import migrations
def set_historic_slugs(apps, schema_manager):
Page = apps.get_model('wagtailcore', 'Page')
HistoricSlug = apps.get_model('core', 'HistoricSlug')
for model_class in Page.__subclasses__():
historic_model_class = apps.get_model(
model_class._meta.app_label,
model_class._meta.model_name
)
for page in historic_model_class.objects.all():
HistoricSlug.objects.create(
page=page,
slug=page.slug
)
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20180423_1122'),
]
operations = [
migrations.RunPython(
set_historic_slugs,
migrations.RunPython.noop,
elidable=True
)
]
| 24.605263
| 57
| 0.617112
|
a8ed9e2ca244d64b50626ac22456be575f3451e7
| 2,331
|
py
|
Python
|
Copycat/Framework/copycat/test.py
|
um-dsp/Morphence
|
0a109548bba87ca33fd98a6da7197967ea2d1e1b
|
[
"MIT"
] | 7
|
2021-09-01T05:12:37.000Z
|
2022-03-11T08:39:50.000Z
|
Copycat/Framework/copycat/test.py
|
Zhuosd/Morphence
|
781d84ebc884ee3053a0355adfbd20312c627308
|
[
"MIT"
] | null | null | null |
Copycat/Framework/copycat/test.py
|
Zhuosd/Morphence
|
781d84ebc884ee3053a0355adfbd20312c627308
|
[
"MIT"
] | 1
|
2021-10-18T08:07:50.000Z
|
2021-10-18T08:07:50.000Z
|
"""
@author: Abderrahmen Amich
@email: aamich@umich.edu
"""
#torch
import torch
#local files
from model import CNN
from cifar_data import get_datasets
from train_mnist import PyNet, ld_mnist
#general
from tqdm import tqdm
import numpy as np
from sklearn.metrics import f1_score
#system
from sys import argv, exit, stderr
if __name__ == '__main__':
if len(argv) != 4:
print('Use: {} model_file.pth target_model.pth data_name'.format(argv[0]), file=stderr)
exit(1)
data_name = argv[3]
if data_name == 'CIFAR10':
model = CNN()
target = CNN()
model.load_state_dict(torch.load(argv[1]))
target.load_state_dict(torch.load(argv[2]))
batch_size = 128
dataset = get_datasets(train=False, batch=batch_size)
elif data_name == 'MNIST':
model = PyNet()
target = PyNet()
model.load_state_dict(torch.load(argv[1]))
target.load_state_dict(torch.load(argv[2]))
batch_size = 128
dataset = ld_mnist(batch_size=batch_size)
print('Testing the model...')
correct = 0
matching = 0
total = 0
#results = np.zeros([dataset['n_test'], 2], dtype=np.int)
res_pos = 0
with torch.no_grad():
model.eval()
target.eval()
for data in tqdm(dataset['test']):
images, labels = data
outputs = model(images)
outputs_traget = target(images)
_, predicted = torch.max(outputs.data, 1)
_, predicted_target = torch.max(outputs_traget.data, 1)
#results[res_pos:res_pos+batch_size, :] = np.array([labels.tolist(), predicted.tolist()]).T
res_pos += batch_size
total += labels.size(0)
correct += (predicted == labels).sum().item()
matching += (predicted_target == predicted).sum().item()
#micro_avg = f1_score(results[:,0], results[:,1], average='micro')
#macro_avg = f1_score(results[:,0], results[:,1], average='macro')
#print('\nAverage: {:.2f}% ({:d} images)'.format(100. * (correct/total), total))
#print('Micro Average: {:.6f}'.format(micro_avg))
#print('Macro Average: {:.6f}'.format(macro_avg))
print('Accuracy: {:.2f}%'.format(100. * correct / total))
print('Fidelity: {:.2f}%'.format(100. * matching / total))
| 31.5
| 103
| 0.607036
|
e0649b9b5760fe1bb91cf34f254ae9c11debb326
| 2,186
|
py
|
Python
|
projects/python_plugin_code/pandas_1.1_code/tests/test_highlight_max.py
|
rendner/py-plugin-dataframe-viewer
|
188585bd31a6c14413949865b3467dcbf6f5e2d1
|
[
"Apache-2.0"
] | 1
|
2021-11-07T03:47:51.000Z
|
2021-11-07T03:47:51.000Z
|
projects/python_plugin_code/pandas_1.2_code/tests/test_highlight_max.py
|
rendner/py-plugin-dataframe-viewer
|
188585bd31a6c14413949865b3467dcbf6f5e2d1
|
[
"Apache-2.0"
] | null | null | null |
projects/python_plugin_code/pandas_1.2_code/tests/test_highlight_max.py
|
rendner/py-plugin-dataframe-viewer
|
188585bd31a6c14413949865b3467dcbf6f5e2d1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 cms.rendner (Daniel Schmidt)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytest
from tests.helpers.asserts.assert_styler import create_and_assert_patched_styler
df = pd.DataFrame.from_dict({
"col_0": [0, 1, 2, 3, 4],
"col_1": [5, 6, 7, 8, 9],
"col_2": [10, 11, 12, 13, 14],
"col_3": [15, 16, 17, 18, 19],
"col_4": [20, 21, 22, 23, 24],
})
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("subset", [None, pd.IndexSlice[2:3, ["col_2", "col_3"]]])
@pytest.mark.parametrize("color", [None, "pink"])
@pytest.mark.parametrize(
"rows_per_chunk, cols_per_chunk", [
(1, 2),
(len(df.index), len(df.columns)) # single chunk
])
def test_chunked(axis, subset, color, rows_per_chunk, cols_per_chunk):
create_and_assert_patched_styler(
df,
lambda styler: styler.highlight_max(axis=axis, subset=subset, color=color),
rows_per_chunk,
cols_per_chunk
)
@pytest.mark.parametrize("axis", [None, 0, 1, "index", "columns"])
@pytest.mark.parametrize("subset", [None, pd.IndexSlice[2:3, ["col_2", "col_3"]]])
def test_can_handle_axis_values(axis, subset):
create_and_assert_patched_styler(
df,
lambda styler: styler.highlight_max(axis=axis, subset=subset),
2,
2
)
@pytest.mark.parametrize("subset", [
2, # reduce to row
"col_2", # reduce to column
(2, "col_2"), # reduce to scalar
])
def test_frame_can_handle_reducing_subset(subset):
create_and_assert_patched_styler(
df,
lambda styler: styler.highlight_max(axis=None, subset=subset),
2,
2
)
| 32.147059
| 83
| 0.668801
|
5f541da3d222b91958f0dafae28e1b7c91caad34
| 4,911
|
py
|
Python
|
d7a/dll/test/access_profile.py
|
L-I-Am/pyd7a
|
7e3dd6ff71c92df72570d6b852ca74cc5af50707
|
[
"Apache-2.0"
] | 9
|
2016-05-12T20:11:30.000Z
|
2020-08-18T05:46:15.000Z
|
d7a/dll/test/access_profile.py
|
L-I-Am/pyd7a
|
7e3dd6ff71c92df72570d6b852ca74cc5af50707
|
[
"Apache-2.0"
] | 2
|
2018-01-14T12:39:06.000Z
|
2019-11-25T09:11:08.000Z
|
d7a/dll/test/access_profile.py
|
L-I-Am/pyd7a
|
7e3dd6ff71c92df72570d6b852ca74cc5af50707
|
[
"Apache-2.0"
] | 7
|
2016-09-06T11:08:15.000Z
|
2020-10-27T10:29:24.000Z
|
#
# Copyright (c) 2015-2021 University of Antwerp, Aloxy NV.
#
# This file is part of pyd7a.
# See https://github.com/Sub-IoT/pyd7a for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from bitstring import ConstBitStream
from d7a.dll.access_profile import AccessProfile, CsmaCaMode, SubBand
from d7a.dll.sub_profile import SubProfile
from d7a.phy.channel_header import ChannelHeader, ChannelBand, ChannelCoding, ChannelClass
from d7a.types.ct import CT
class TestAccessProfile(unittest.TestCase):
valid_channel_header = ChannelHeader(
channel_class=ChannelClass.NORMAL_RATE,
channel_coding=ChannelCoding.PN9,
channel_band=ChannelBand.BAND_433
)
valid_sub_bands = [
SubBand(),
SubBand(),
SubBand(),
SubBand(),
SubBand(),
SubBand(),
SubBand(),
SubBand()
]
valid_sub_profiles = [
SubProfile(),
SubProfile(),
SubProfile(),
SubProfile()
]
def test_validation_ok(self):
ap = AccessProfile(channel_header=self.valid_channel_header,
sub_profiles=self.valid_sub_profiles,
sub_bands=self.valid_sub_bands)
def test_validation_sub_profiles(self):
def bad():
ap = AccessProfile(channel_header=self.valid_channel_header,
sub_profiles=[],
sub_bands=self.valid_sub_bands)
self.assertRaises(ValueError, bad)
def test_validation_sub_profiles_count(self):
def bad():
sub_profiles = [SubProfile() for _ in range(10)] # too many ...
ap = AccessProfile(channel_header=self.valid_channel_header,
sub_profiles=sub_profiles,
sub_bands=self.valid_sub_bands)
self.assertRaises(ValueError, bad)
def test_validation_sub_bands_type(self):
def bad():
ap = AccessProfile(channel_header=self.valid_channel_header,
sub_profiles=self.valid_sub_profiles,
sub_bands=[None])
self.assertRaises(ValueError, bad)
def test_validation_sub_bands_count(self):
def bad():
sub_bands = [SubBand() for _ in range(10)] # too many ...
ap = AccessProfile(channel_header=self.valid_channel_header,
sub_profiles=self.valid_sub_profiles,
sub_bands=sub_bands)
self.assertRaises(ValueError, bad)
def test_byte_generation(self):
expected = [
0b00101000, # channel header
]
for _ in xrange(AccessProfile.NUMBER_OF_SUB_PROFILES):
expected.extend(list(bytearray(SubProfile())))
expected.extend(list(bytearray(SubBand()))) # only one sub_band
ap = AccessProfile(channel_header=self.valid_channel_header,
sub_bands=[SubBand()],
sub_profiles=self.valid_sub_profiles)
bytes = bytearray(ap)
for i in xrange(len(bytes)):
self.assertEqual(expected[i], bytes[i])
self.assertEqual(len(expected), len(bytes))
def test_parse(self):
bytes = list(bytearray(self.valid_channel_header))
for _ in xrange(AccessProfile.NUMBER_OF_SUB_PROFILES):
bytes.extend(list(bytearray(SubProfile())))
for _ in range(AccessProfile.MAX_NUMBER_OF_SUB_BANDS):
bytes.extend(list(bytearray(SubBand())))
ap = AccessProfile.parse(ConstBitStream(bytes=bytes))
self.assertEqual(ap.channel_header.channel_band, self.valid_channel_header.channel_band)
self.assertEqual(ap.channel_header.channel_coding, self.valid_channel_header.channel_coding)
self.assertEqual(ap.channel_header.channel_class, self.valid_channel_header.channel_class)
self.assertEqual(len(ap.sub_bands), AccessProfile.MAX_NUMBER_OF_SUB_BANDS)
for sb in ap.sub_bands:
self.assertEqual(sb.channel_index_start, SubBand().channel_index_start)
self.assertEqual(sb.channel_index_end, SubBand().channel_index_end)
self.assertEqual(sb.cca, SubBand().cca)
self.assertEqual(sb.duty, SubBand().duty)
self.assertEqual(sb.eirp, SubBand().eirp)
for sp in ap.sub_profiles:
self.assertEqual(sp.subband_bitmap, SubProfile().subband_bitmap)
self.assertEqual(sp.scan_automation_period.exp, SubProfile().scan_automation_period.exp)
self.assertEqual(sp.scan_automation_period.mant, SubProfile().scan_automation_period.mant)
self.assertEqual(len(ap.sub_profiles), AccessProfile.NUMBER_OF_SUB_PROFILES)
| 34.104167
| 96
| 0.706984
|
7cfcb3627623439dac4fc3af3da94dda0cdd839b
| 473
|
py
|
Python
|
aoc_wim/aoc2018/q25.py
|
wimglenn/advent-of-code-wim
|
6308c3fa5d29b318680419f877fd5b8ac1359b5d
|
[
"WTFPL"
] | 20
|
2019-10-15T07:33:13.000Z
|
2022-01-19T13:40:36.000Z
|
aoc_wim/aoc2018/q25.py
|
wimglenn/advent-of-code-wim
|
6308c3fa5d29b318680419f877fd5b8ac1359b5d
|
[
"WTFPL"
] | 5
|
2019-02-01T23:31:27.000Z
|
2021-12-03T06:55:58.000Z
|
aoc_wim/aoc2018/q25.py
|
wimglenn/advent-of-code-wim
|
6308c3fa5d29b318680419f877fd5b8ac1359b5d
|
[
"WTFPL"
] | 8
|
2019-12-03T15:41:23.000Z
|
2021-12-06T17:13:57.000Z
|
"""
--- Day 25: Four-Dimensional Adventure ---
https://adventofcode.com/2018/day/25
"""
from itertools import combinations
import networkx as nx
from aocd import data
nodes = [tuple(int(n) for n in s.split(",")) for s in data.splitlines()]
graph = nx.Graph()
graph.add_nodes_from(nodes)
for node1, node2 in combinations(nodes, 2):
if sum(abs(x - y) for x, y in zip(node1, node2)) <= 3:
graph.add_edge(node1, node2)
print(nx.number_connected_components(graph))
| 29.5625
| 72
| 0.704017
|
93f02bc5dd8280249dd537a719197f79f717be91
| 20,108
|
py
|
Python
|
adb_shell/transport/usb_transport.py
|
maybe-sybr/adb_shell
|
41c52ed9a315735ecb7187d905fec65eaeea73a5
|
[
"Apache-2.0"
] | 268
|
2019-09-25T16:38:51.000Z
|
2022-03-31T07:08:17.000Z
|
adb_shell/transport/usb_transport.py
|
maybe-sybr/adb_shell
|
41c52ed9a315735ecb7187d905fec65eaeea73a5
|
[
"Apache-2.0"
] | 73
|
2019-09-30T14:25:38.000Z
|
2022-01-23T23:04:29.000Z
|
adb_shell/transport/usb_transport.py
|
maybe-sybr/adb_shell
|
41c52ed9a315735ecb7187d905fec65eaeea73a5
|
[
"Apache-2.0"
] | 48
|
2019-11-05T20:37:59.000Z
|
2022-03-09T08:12:06.000Z
|
# Copyright (c) 2021 Jeff Irion and contributors
#
# This file is part of the adb-shell package. It incorporates work
# covered by the following license notice:
#
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class for creating a USB connection with the device and sending and receiving data.
.. warning::
USB support is an experimental feature.
* :func:`get_interface`
* :func:`interface_matcher`
* :class:`UsbTransport`
* :meth:`UsbTransport._find`
* :meth:`UsbTransport._find_and_open`
* :meth:`UsbTransport._find_devices`
* :meth:`UsbTransport._find_first`
* :meth:`UsbTransport._flush_buffers`
* :meth:`UsbTransport._open`
* :meth:`UsbTransport._port_path_matcher`
* :meth:`UsbTransport._serial_matcher`
* :meth:`UsbTransport._timeout`
* :meth:`UsbTransport.bulk_read`
* :meth:`UsbTransport.bulk_write`
* :meth:`UsbTransport.close`
* :meth:`UsbTransport.connect`
* :attr:`UsbTransport.port_path`
* :attr:`UsbTransport.serial_number`
* :attr:`UsbTransport.usb_info`
"""
import logging
import platform
import re
import threading
import warnings
import weakref
import usb1
from .base_transport import BaseTransport
from .. import exceptions
#: Default timeout
DEFAULT_TIMEOUT_S = 10
SYSFS_PORT_SPLIT_RE = re.compile("[,/:.-]")
_LOGGER = logging.getLogger(__name__)
CLASS = usb1.CLASS_VENDOR_SPEC # pylint: disable=no-member
SUBCLASS = 0x42
PROTOCOL = 0x01
def get_interface(setting): # pragma: no cover
"""Get the class, subclass, and protocol for the given USB setting.
Parameters
----------
setting : TODO
TODO
Returns
-------
TODO
TODO
TODO
TODO
TODO
TODO
"""
return (setting.getClass(), setting.getSubClass(), setting.getProtocol())
def interface_matcher(clazz, subclass, protocol): # pragma: no cover
"""Returns a matcher that returns the setting with the given interface.
Parameters
----------
clazz : TODO
TODO
subclass : TODO
TODO
protocol : TODO
TODO
Returns
-------
matcher : function
TODO
"""
interface = (clazz, subclass, protocol)
def matcher(device):
"""TODO
Parameters
----------
device : TODO
TODO
Returns
-------
TODO, None
TODO
"""
for setting in device.iterSettings():
if get_interface(setting) == interface:
return setting
return None
return matcher
class UsbTransport(BaseTransport): # pragma: no cover
"""USB communication object. Not thread-safe.
Handles reading and writing over USB with the proper endpoints, exceptions,
and interface claiming.
Parameters
----------
device : usb1.USBDevice
libusb_device to connect to.
setting : usb1.USBInterfaceSetting
libusb setting with the correct endpoints to communicate with.
usb_info : TODO, None
String describing the usb path/serial/device, for debugging.
default_transport_timeout_s : TODO, None
Timeout in seconds for all I/O.
Attributes
----------
_default_transport_timeout_s : TODO, None
Timeout in seconds for all I/O.
_device : TODO
libusb_device to connect to.
_transport : TODO
TODO
_interface_number : TODO
TODO
_max_read_packet_len : TODO
TODO
_read_endpoint : TODO
TODO
_setting : TODO
libusb setting with the correct endpoints to communicate with.
_usb_info : TODO
String describing the usb path/serial/device, for debugging.
_write_endpoint : TODO, None
TODO
"""
_HANDLE_CACHE = weakref.WeakValueDictionary()
_HANDLE_CACHE_LOCK = threading.Lock()
def __init__(self, device, setting, usb_info=None, default_transport_timeout_s=None):
self._setting = setting
self._device = device
self._transport = None
self._interface_number = None
self._read_endpoint = None
self._write_endpoint = None
self._usb_info = usb_info or ''
self._default_transport_timeout_s = default_transport_timeout_s if default_transport_timeout_s is not None else DEFAULT_TIMEOUT_S
self._max_read_packet_len = 0
def close(self):
"""Close the USB connection.
"""
if self._transport is None:
return
try:
self._transport.releaseInterface(self._interface_number)
self._transport.close()
except usb1.USBError:
_LOGGER.info('USBError while closing transport %s: ', self.usb_info, exc_info=True)
finally:
self._transport = None
def connect(self, transport_timeout_s=None):
"""Create a USB connection to the device.
Parameters
----------
transport_timeout_s : float, None
Set the timeout on the USB instance
"""
read_endpoint = None
write_endpoint = None
for endpoint in self._setting.iterEndpoints():
address = endpoint.getAddress()
if address & usb1.ENDPOINT_DIR_MASK: # pylint: disable=no-member
read_endpoint = address
# max_read_packet_len = endpoint.getMaxPacketSize()
else:
write_endpoint = address
assert read_endpoint is not None
assert write_endpoint is not None
transport = self._device.open()
iface_number = self._setting.getNumber()
try:
if (platform.system() != 'Windows' and transport.kernelDriverActive(iface_number)):
transport.detachKernelDriver(iface_number)
except usb1.USBErrorNotFound: # pylint: disable=no-member
warnings.warn('Kernel driver not found for interface: %s.', iface_number)
# # When this object is deleted, make sure it's closed.
# weakref.ref(self, self.close)
self._transport = transport
self._read_endpoint = read_endpoint
self._write_endpoint = write_endpoint
self._interface_number = iface_number
self._transport.claimInterface(self._interface_number)
def bulk_read(self, numbytes, transport_timeout_s=None):
"""Receive data from the USB device.
Parameters
----------
numbytes : int
The maximum amount of data to be received
transport_timeout_s : float, None
When the timeout argument is omitted, ``select.select`` blocks until at least one file descriptor is ready. A time-out value of zero specifies a poll and never blocks.
Returns
-------
bytes
The received data
Raises
------
adb_shell.exceptions.UsbReadFailedError
Could not receive data
"""
if self._transport is None:
raise exceptions.UsbReadFailedError('This transport has been closed, probably due to another being opened.', None)
try:
# python-libusb1 > 1.6 exposes bytearray()s now instead of bytes/str.
# To support older and newer versions, we ensure everything's bytearray()
# from here on out.
return bytes(self._transport.bulkRead(self._read_endpoint, numbytes, timeout=self._timeout_ms(transport_timeout_s)))
except usb1.USBError as e:
raise exceptions.UsbReadFailedError('Could not receive data from %s (timeout %sms)' % (self.usb_info, self._timeout_ms(transport_timeout_s)), e)
def bulk_write(self, data, transport_timeout_s=None):
"""Send data to the USB device.
Parameters
----------
data : bytes
The data to be sent
transport_timeout_s : float, None
When the timeout argument is omitted, ``select.select`` blocks until at least one file descriptor is ready. A time-out value of zero specifies a poll and never blocks.
Returns
-------
int
The number of bytes sent
Raises
------
adb_shell.exceptions.UsbWriteFailedError
This transport has been closed, probably due to another being opened
adb_shell.exceptions.UsbWriteFailedError
Could not send data
"""
if self._transport is None:
raise exceptions.UsbWriteFailedError('This transport has been closed, probably due to another being opened.', None)
try:
return self._transport.bulkWrite(self._write_endpoint, data, timeout=self._timeout_ms(transport_timeout_s))
except usb1.USBError as e:
raise exceptions.UsbWriteFailedError('Could not send data to %s (timeout %sms)' % (self.usb_info, self._timeout_ms(transport_timeout_s)), e)
def _open(self):
"""Opens the USB device for this setting, and claims the interface.
"""
# Make sure we close any previous transport open to this usb device.
port_path = tuple(self.port_path)
with self._HANDLE_CACHE_LOCK:
old_transport = self._HANDLE_CACHE.get(port_path)
if old_transport is not None:
old_transport.Close()
self._read_endpoint = None
self._write_endpoint = None
for endpoint in self._setting.iterEndpoints():
address = endpoint.getAddress()
if address & usb1.USB_ENDPOINT_DIR_MASK: # pylint: disable=no-member
self._read_endpoint = address
self._max_read_packet_len = endpoint.getMaxPacketSize()
else:
self._write_endpoint = address
assert self._read_endpoint is not None
assert self._write_endpoint is not None
transport = self._device.open()
iface_number = self._setting.getNumber()
try:
if (platform.system() != 'Windows' and transport.kernelDriverActive(iface_number)):
transport.detachKernelDriver(iface_number)
except usb1.USBErrorNotFound: # pylint: disable=no-member
warnings.warn('Kernel driver not found for interface: %s.', iface_number)
transport.claimInterface(iface_number)
self._transport = transport
self._interface_number = iface_number
with self._HANDLE_CACHE_LOCK:
self._HANDLE_CACHE[port_path] = self
# When this object is deleted, make sure it's closed.
weakref.ref(self, self.close)
def _timeout_ms(self, transport_timeout_s):
"""TODO
Returns
-------
TODO
TODO
"""
return int(transport_timeout_s * 1000 if transport_timeout_s is not None else self._default_transport_timeout_s * 1000)
def _flush_buffers(self):
"""TODO
Raises
------
adb_shell.exceptions.UsbReadFailedError
TODO
"""
while True:
try:
self.bulk_read(self._max_read_packet_len, transport_timeout_s=10)
except exceptions.UsbReadFailedError as e:
if isinstance(e.usb_error, usb1.USBErrorTimeout): # pylint: disable=no-member
break
raise
# ======================================================================= #
# #
# Properties #
# #
# ======================================================================= #
@property
def port_path(self):
"""TODO
Returns
-------
TODO
TODO
"""
return [self._device.getBusNumber()] + self._device.getPortNumberList()
@property
def serial_number(self):
"""TODO
Returns
-------
TODO
TODO
"""
return self._device.getSerialNumber()
@property
def usb_info(self):
"""TODO
Returns
-------
TODO
TODO
"""
try:
sn = self.serial_number
except usb1.USBError:
sn = ''
if sn and sn != self._usb_info:
return '%s %s' % (self._usb_info, sn)
return self._usb_info
# ======================================================================= #
# #
# Matchers #
# #
# ======================================================================= #
@classmethod
def _port_path_matcher(cls, port_path):
"""Returns a device matcher for the given port path.
Parameters
----------
port_path : TODO
TODO
Returns
-------
function
TODO
"""
if isinstance(port_path, str):
# Convert from sysfs path to port_path.
port_path = [int(part) for part in SYSFS_PORT_SPLIT_RE.split(port_path)]
return lambda device: device.port_path == port_path
@classmethod
def _serial_matcher(cls, serial):
"""Returns a device matcher for the given serial.
Parameters
----------
serial : TODO
TODO
Returns
-------
function
TODO
"""
return lambda device: device.serial_number == serial
# ======================================================================= #
# #
# Finders #
# #
# ======================================================================= #
@classmethod
def _find(cls, setting_matcher, port_path=None, serial=None, default_transport_timeout_s=None):
"""Gets the first device that matches according to the keyword args.
Parameters
----------
setting_matcher : TODO
TODO
port_path : TODO, None
TODO
serial : TODO, None
TODO
default_transport_timeout_s : TODO, None
TODO
Returns
-------
TODO
TODO
"""
if port_path:
device_matcher = cls._port_path_matcher(port_path)
usb_info = port_path
elif serial:
device_matcher = cls._serial_matcher(serial)
usb_info = serial
else:
device_matcher = None
usb_info = 'first'
return cls._find_first(setting_matcher, device_matcher, usb_info=usb_info, default_transport_timeout_s=default_transport_timeout_s)
@classmethod
def _find_and_open(cls, setting_matcher, port_path=None, serial=None, default_transport_timeout_s=None):
"""TODO
Parameters
----------
setting_matcher : TODO
TODO
port_path : TODO, None
TODO
serial : TODO, None
TODO
default_transport_timeout_s : TODO, None
TODO
Returns
-------
dev : TODO
TODO
"""
dev = cls._find(setting_matcher, port_path=port_path, serial=serial, default_transport_timeout_s=default_transport_timeout_s)
dev._open() # pylint: disable=protected-access
dev._flush_buffers() # pylint: disable=protected-access
return dev
@classmethod
def _find_devices(cls, setting_matcher, device_matcher=None, usb_info='', default_transport_timeout_s=None):
"""_find and yield the devices that match.
Parameters
----------
setting_matcher : TODO
Function that returns the setting to use given a ``usb1.USBDevice``, or ``None``
if the device doesn't have a valid setting.
device_matcher : TODO, None
Function that returns ``True`` if the given ``UsbTransport`` is
valid. ``None`` to match any device.
usb_info : str
Info string describing device(s).
default_transport_timeout_s : TODO, None
Default timeout of commands in seconds.
Yields
------
TODO
UsbTransport instances
"""
with usb1.USBContext() as ctx:
for device in ctx.getDeviceList(skip_on_error=True):
setting = setting_matcher(device)
if setting is None:
continue
transport = cls(device, setting, usb_info=usb_info, default_transport_timeout_s=default_transport_timeout_s)
if device_matcher is None or device_matcher(transport):
yield transport
@classmethod
def _find_first(cls, setting_matcher, device_matcher=None, usb_info='', default_transport_timeout_s=None):
"""Find and return the first matching device.
Parameters
----------
setting_matcher : TODO
Function that returns the setting to use given a ``usb1.USBDevice``, or ``None``
if the device doesn't have a valid setting.
device_matcher : TODO
Function that returns ``True`` if the given ``UsbTransport`` is
valid. ``None`` to match any device.
usb_info : str
Info string describing device(s).
default_transport_timeout_s : TODO, None
Default timeout of commands in seconds.
Returns
-------
TODO
An instance of `UsbTransport`
Raises
------
adb_shell.exceptions.DeviceNotFoundError
Raised if the device is not available.
"""
try:
return next(cls._find_devices(setting_matcher, device_matcher=device_matcher, usb_info=usb_info, default_transport_timeout_s=default_transport_timeout_s))
except StopIteration:
raise exceptions.UsbDeviceNotFoundError('No device available, or it is in the wrong configuration.')
@classmethod
def find_adb(cls, serial=None, port_path=None, default_transport_timeout_s=None):
"""TODO
Parameters
----------
serial : TODO
TODO
port_path : TODO
TODO
default_transport_timeout_s : TODO, None
Default timeout of commands in seconds.
Returns
-------
UsbTransport
TODO
"""
return cls._find(
interface_matcher(CLASS, SUBCLASS, PROTOCOL),
serial=serial,
port_path=port_path,
default_transport_timeout_s=default_transport_timeout_s
)
@classmethod
def find_all_adb_devices(cls, default_transport_timeout_s=None):
"""Find all ADB devices attached via USB.
Parameters
----------
default_transport_timeout_s : TODO, None
Default timeout of commands in seconds.
Returns
-------
generator
A generator which yields each ADB device attached via USB.
"""
for dev in cls._find_devices(interface_matcher(CLASS, SUBCLASS, PROTOCOL), default_transport_timeout_s=default_transport_timeout_s):
yield dev
| 31.223602
| 179
| 0.578078
|
2c783ac67b64221254e2baca7cd33b05e83f0714
| 493
|
py
|
Python
|
setup.py
|
AidanTweedy/PhoTex
|
3e7f24785d6f0f500db53cc56c706ab77840043e
|
[
"MIT"
] | 1
|
2021-03-03T02:51:12.000Z
|
2021-03-03T02:51:12.000Z
|
setup.py
|
AidanTweedy/PhoTex
|
3e7f24785d6f0f500db53cc56c706ab77840043e
|
[
"MIT"
] | null | null | null |
setup.py
|
AidanTweedy/PhoTex
|
3e7f24785d6f0f500db53cc56c706ab77840043e
|
[
"MIT"
] | null | null | null |
from setuptools import setup
REQUIREMENTS = [i.strip() for i in open("requirements.txt").readlines()]
with open("README", 'r') as f:
long_description = f.read()
setup(
name='wordRecog',
version='1.0',
description='A small module to transcribe photos of handwritten text',
license="MIT",
long_description=long_description,
author='Aidan Tweedy',
author_email='atweedy1@binghamton.edu',
packages=['wordRecog'],
install_requires=REQUIREMENTS
)
| 25.947368
| 74
| 0.685598
|
381b92414345a320aaf3c9456c45f32a67e820ed
| 15,843
|
py
|
Python
|
package/PartSeg/plugins/napari_widgets/roi_extraction_algorithms.py
|
neuromusic/PartSeg
|
a4edff1b9fbe55eb7f5e1fc8b5b3f8e730b35caf
|
[
"BSD-3-Clause"
] | 15
|
2020-03-21T03:27:56.000Z
|
2022-03-21T07:46:39.000Z
|
package/PartSeg/plugins/napari_widgets/roi_extraction_algorithms.py
|
neuromusic/PartSeg
|
a4edff1b9fbe55eb7f5e1fc8b5b3f8e730b35caf
|
[
"BSD-3-Clause"
] | 479
|
2019-10-27T22:57:22.000Z
|
2022-03-30T12:48:14.000Z
|
package/PartSeg/plugins/napari_widgets/roi_extraction_algorithms.py
|
neuromusic/PartSeg
|
a4edff1b9fbe55eb7f5e1fc8b5b3f8e730b35caf
|
[
"BSD-3-Clause"
] | 5
|
2020-02-05T14:25:02.000Z
|
2021-12-21T03:44:52.000Z
|
import typing
from contextlib import suppress
import numpy as np
import pandas as pd
from napari import Viewer
from napari.layers import Image as NapariImage
from napari.layers import Labels, Layer
from napari.utils.notifications import show_info
from qtpy.QtCore import Qt
from qtpy.QtWidgets import (
QDialog,
QGridLayout,
QHBoxLayout,
QInputDialog,
QLabel,
QLineEdit,
QMessageBox,
QPlainTextEdit,
QPushButton,
QVBoxLayout,
QWidget,
)
from PartSeg import plugins
from PartSegCore import UNIT_SCALE, Units
from PartSegCore.algorithm_describe_base import Register, ROIExtractionProfile
from PartSegCore.analysis.algorithm_description import analysis_algorithm_dict
from PartSegCore.analysis.load_functions import LoadProfileFromJSON
from PartSegCore.analysis.save_functions import SaveProfilesToJSON
from PartSegCore.mask.algorithm_description import mask_algorithm_dict
from PartSegCore.segmentation import ROIExtractionResult
from ..._roi_analysis.profile_export import ExportDialog, ImportDialog, ProfileDictViewer
from ...common_backend.base_settings import IO_SAVE_DIRECTORY, BaseSettings
from ...common_backend.except_hook import show_warning
from ...common_gui.algorithms_description import AlgorithmChooseBase, FormWidget, InteractiveAlgorithmSettingsWidget
from ...common_gui.custom_load_dialog import PLoadDialog
from ...common_gui.custom_save_dialog import PSaveDialog
from ...common_gui.searchable_combo_box import SearchComboBox
from ...common_gui.searchable_list_widget import SearchableListWidget
from ...common_gui.universal_gui_part import TextShow
from ._settings import get_settings
from .utils import NapariFormWidgetWithMask, generate_image
if typing.TYPE_CHECKING:
from qtpy.QtGui import QHideEvent, QShowEvent # pragma: no cover
SELECT_TEXT = "<select>"
class NapariInteractiveAlgorithmSettingsWidget(InteractiveAlgorithmSettingsWidget):
@staticmethod
def _form_widget(algorithm, start_values) -> FormWidget:
return NapariFormWidgetWithMask(algorithm.get_fields(), start_values=start_values)
def reset_choices(self, event=None):
self.form_widget.reset_choices(event)
def get_layer_list(self) -> typing.List[str]:
return [x.name for x in self.get_layers().values()]
def get_values(self):
return {
k: v.name if isinstance(v, NapariImage) else v
for k, v in self.form_widget.get_values().items()
if not isinstance(v, Labels) and k != "mask"
}
def get_layers(self) -> typing.Dict[str, Layer]:
return {k: v for k, v in self.form_widget.get_values().items() if isinstance(v, Layer)}
class NapariAlgorithmChoose(AlgorithmChooseBase):
@staticmethod
def _algorithm_widget(settings, name, val) -> InteractiveAlgorithmSettingsWidget:
return NapariInteractiveAlgorithmSettingsWidget(settings, name, val, [])
def reset_choices(self, event=None):
for widget in self.algorithm_dict.values():
widget.reset_choices(event)
class ROIExtractionAlgorithms(QWidget):
@staticmethod
def get_method_dict(): # pragma: no cover
raise NotImplementedError
@staticmethod
def prefix() -> str: # pragma: no cover
raise NotImplementedError
def __init__(self, napari_viewer: Viewer):
plugins.register_if_need()
super().__init__()
self._scale = np.array((1, 1, 1))
self.channel_names = []
self.mask_name = ""
self.viewer = napari_viewer
self.settings = get_settings()
self.algorithm_chose = NapariAlgorithmChoose(self.settings, self.get_method_dict())
self.calculate_btn = QPushButton("Run")
self.calculate_btn.clicked.connect(self._run_calculation)
self.info_text = TextShow()
self.profile_combo_box = SearchComboBox()
self.profile_combo_box.addItem(SELECT_TEXT)
self.profile_combo_box.addItems(self.profile_dict.keys())
self.save_btn = QPushButton("Save parameters")
self.manage_btn = QPushButton("Manage parameters")
self.target_layer_name = QLineEdit()
self.target_layer_name.setText(self.settings.get(f"{self.prefix()}.target_layer_name", "ROI"))
layout = QVBoxLayout()
btn_layout = QHBoxLayout()
btn_layout.addWidget(self.save_btn)
btn_layout.addWidget(self.manage_btn)
layout.addLayout(btn_layout)
target_layer_layout = QHBoxLayout()
target_layer_layout.addWidget(QLabel("Target layer name:"))
target_layer_layout.addWidget(self.target_layer_name)
layout.addLayout(target_layer_layout)
layout.addWidget(self.profile_combo_box)
layout.addWidget(self.calculate_btn)
layout.addWidget(self.algorithm_chose, 1)
layout.addWidget(self.info_text)
self.setLayout(layout)
self.algorithm_chose.result.connect(self.set_result)
self.algorithm_chose.finished.connect(self._enable_calculation_btn)
self.algorithm_chose.algorithm_changed.connect(self.algorithm_changed)
self.save_btn.clicked.connect(self.save_action)
self.manage_btn.clicked.connect(self.manage_action)
self.profile_combo_box.textActivated.connect(self.select_profile)
self.update_tooltips()
def _enable_calculation_btn(self):
self.calculate_btn.setEnabled(True)
def manage_action(self):
dialog = ProfilePreviewDialog(self.profile_dict, self.get_method_dict(), self.settings, parent=self)
dialog.exec_()
self.refresh_profiles()
def select_profile(self, text):
if text in [SELECT_TEXT, ""]:
return # pragma: no cover
profile = self.profile_dict[text]
self.algorithm_chose.change_algorithm(profile.algorithm, profile.values)
self.profile_combo_box.setCurrentIndex(0)
@property
def profile_dict(self) -> typing.Dict[str, ROIExtractionProfile]:
return self.settings.get_from_profile(f"{self.prefix()}.profiles", {})
def save_action(self):
widget: NapariInteractiveAlgorithmSettingsWidget = self.algorithm_chose.current_widget()
profiles = self.profile_dict
while True:
text, ok = QInputDialog.getText(self, "Profile Name", "Input profile name here")
if not ok:
return # pragma: no cover
if text not in profiles or QMessageBox.Yes == QMessageBox.warning(
self,
"Already exists",
"Profile with this name already exist. Overwrite?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No,
):
break # pragma: no cover
resp = ROIExtractionProfile(text, widget.name, widget.get_values())
profiles[text] = resp
self.settings.dump()
self.profile_combo_box.addItem(text)
self.update_tooltips()
def update_tooltips(self):
for i in range(1, self.profile_combo_box.count()):
if self.profile_combo_box.itemData(i, Qt.ToolTipRole) is not None:
continue
text = self.profile_combo_box.itemText(i)
profile: ROIExtractionProfile = self.profile_dict[text]
tool_tip_text = str(profile)
self.profile_combo_box.setItemData(i, tool_tip_text, Qt.ToolTipRole)
def algorithm_changed(self):
self._scale = np.array((1, 1, 1))
self.channel_names = []
self.mask_name = ""
def update_mask(self):
widget: NapariInteractiveAlgorithmSettingsWidget = self.algorithm_chose.current_widget()
mask = widget.get_layers().get("mask", None)
if getattr(mask, "name", "") != self.mask_name or (widget.mask() is None and mask is not None):
widget.set_mask(getattr(mask, "data", None))
self.mask_name = getattr(mask, "name", "")
def update_image(self):
widget: NapariInteractiveAlgorithmSettingsWidget = self.algorithm_chose.current_widget()
self.settings.last_executed_algorithm = widget.name
layer_names: typing.List[str] = widget.get_layer_list()
if layer_names == self.channel_names:
return
image = generate_image(self.viewer, *layer_names)
self._scale = np.array(image.spacing)
self.channel_names = image.channel_names
widget.image_changed(image)
self.mask_name = ""
def _run_calculation(self):
widget: NapariInteractiveAlgorithmSettingsWidget = self.algorithm_chose.current_widget()
self.settings.last_executed_algorithm = widget.name
self.update_image()
self.update_mask()
widget.execute()
self.calculate_btn.setDisabled(True)
def showEvent(self, event: "QShowEvent") -> None:
self.reset_choices(None)
super().showEvent(event)
def hideEvent(self, event: "QHideEvent") -> None:
self.settings.dump()
super().hideEvent(event)
def reset_choices(self, event=None):
self.algorithm_chose.reset_choices(event)
def set_result(self, result: ROIExtractionResult):
if result.info_text:
show_info(result.info_text)
if len(result.roi_info.bound_info) == 0:
if not result.info_text:
show_info("There is no ROI in result. Pleas check algorithm parameters.")
return
roi = result.roi
if self.sender() is not None:
self.info_text.setPlainText(self.sender().get_info_text())
with suppress(Exception):
roi = self.sender().current_widget().algorithm_thread.algorithm.image.fit_array_to_image(result.roi)
layer_name = self.target_layer_name.text()
self.settings.set(f"{self.prefix()}.target_layer_name", layer_name)
column_list = []
column_set = set()
for value in result.roi_annotation.values():
for column_name in value.items():
if column_name not in column_set:
column_list.append(column_name)
column_set.add(column_name)
properties = pd.DataFrame.from_dict(result.roi_annotation, orient="index")
properties["index"] = list(result.roi_annotation.keys())
if layer_name in self.viewer.layers:
self.viewer.layers[layer_name].data = result.roi
self.viewer.layers[layer_name].metadata = {"parameters": result.parameters}
self.viewer.layers[layer_name].properties = properties
else:
self.viewer.add_labels(
roi,
scale=np.array(self._scale)[-result.roi.ndim :] * UNIT_SCALE[Units.nm.value],
name=layer_name,
metadata={"parameters": result.parameters},
properties=properties,
)
def refresh_profiles(self):
self.profile_combo_box.clear()
self.profile_combo_box.addItem(SELECT_TEXT)
self.profile_combo_box.addItems(self.profile_dict.keys())
class ROIAnalysisExtraction(ROIExtractionAlgorithms):
@staticmethod
def get_method_dict():
return analysis_algorithm_dict
@staticmethod
def prefix() -> str:
return "roi_analysis_extraction"
class ROIMaskExtraction(ROIExtractionAlgorithms):
@staticmethod
def get_method_dict():
return mask_algorithm_dict
@staticmethod
def prefix() -> str:
return "roi_mask_extraction"
class ProfilePreviewDialog(QDialog):
def __init__(
self,
profile_dict: typing.Dict[str, ROIExtractionProfile],
algorithm_dict: Register,
settings: BaseSettings,
parent=None,
):
super().__init__(parent=parent)
self.profile_dict = profile_dict
self.algorithm_dict = algorithm_dict
self.settings = settings
self.profile_list = SearchableListWidget()
self.profile_list.addItems(self.profile_dict.keys())
self.profile_list.currentTextChanged.connect(self.profile_selected)
self.profile_view = QPlainTextEdit()
self.profile_view.setReadOnly(True)
self.delete_btn = QPushButton("Delete")
self.delete_btn.clicked.connect(self.delete_action)
self.rename_btn = QPushButton("Rename")
self.rename_btn.clicked.connect(self.rename_action)
self.export_btn = QPushButton("Export")
self.export_btn.clicked.connect(self.export_action)
self.import_btn = QPushButton("Import")
self.import_btn.clicked.connect(self.import_action)
layout = QGridLayout()
layout.addWidget(self.profile_list, 0, 0)
layout.addWidget(self.profile_view, 0, 1)
layout.addWidget(self.delete_btn, 1, 0)
layout.addWidget(self.rename_btn, 1, 1)
layout.addWidget(self.export_btn, 2, 0)
layout.addWidget(self.import_btn, 2, 1)
self.setLayout(layout)
def profile_selected(self, text):
if text not in self.profile_dict:
return
profile = self.profile_dict[text]
self.profile_view.setPlainText(str(profile))
def delete_action(self):
if self.profile_list.currentItem() is None:
return # pragma: no cover
if self.profile_list.currentItem().text() in self.profile_dict:
del self.profile_dict[self.profile_list.currentItem().text()]
self.profile_list.clear()
self.profile_list.addItems(self.profile_dict.keys())
def rename_action(self):
if self.profile_list.currentItem() is None:
return # pragma: no cover
old_name = self.profile_list.currentItem().text()
if old_name not in self.profile_dict:
return # pragma: no cover
text, ok = QInputDialog.getText(self, "Profile Name", "Input profile name here")
if not ok:
return # pragma: no cover
if text in self.profile_dict: # pragma: no cover
QMessageBox.warning(
self,
"Already exists",
"Profile with this name already exist.",
QMessageBox.Ok,
QMessageBox.Ok,
)
self.rename_action()
return
profile = self.profile_dict[old_name]
del self.profile_dict[old_name]
profile.name = text
self.profile_dict[text] = profile
self.profile_list.clear()
self.profile_list.addItems(self.profile_dict.keys())
def export_action(self):
exp = ExportDialog(self.profile_dict, ProfileDictViewer, parent=self)
if not exp.exec_():
return # pragma: no cover
dial = PSaveDialog(SaveProfilesToJSON, settings=self.settings, parent=self, path=IO_SAVE_DIRECTORY)
if dial.exec_():
save_location, _selected_filter, save_class, values = dial.get_result()
data = {x: self.profile_dict[x] for x in exp.get_export_list()}
save_class.save(save_location, data, values)
def import_action(self):
dial = PLoadDialog(LoadProfileFromJSON, settings=self.settings, parent=self, path=IO_SAVE_DIRECTORY)
if not dial.exec_():
return # pragma: no cover
file_list, _, load_class = dial.get_result()
profs, err = load_class.load(file_list)
if err:
show_warning("Import error", "error during importing, part of data were filtered.") # pragma: no cover
imp = ImportDialog(profs, self.profile_dict, ProfileDictViewer, parent=self)
if not imp.exec_():
return # pragma: no cover
for original_name, final_name in imp.get_import_list():
self.profile_dict[final_name] = profs[original_name]
self.settings.dump()
self.profile_list.clear()
self.profile_list.addItems(self.profile_dict.keys())
| 39.410448
| 116
| 0.676576
|
6d4b5726c40f3cdd7414f9f2194141fd3c03d48d
| 189
|
py
|
Python
|
Fibonacci.py
|
DanielJHaar/pythonpracticejun2020
|
24e2501fab559841c976eca07bd1900b356c3336
|
[
"MIT"
] | null | null | null |
Fibonacci.py
|
DanielJHaar/pythonpracticejun2020
|
24e2501fab559841c976eca07bd1900b356c3336
|
[
"MIT"
] | null | null | null |
Fibonacci.py
|
DanielJHaar/pythonpracticejun2020
|
24e2501fab559841c976eca07bd1900b356c3336
|
[
"MIT"
] | null | null | null |
def fibonacci(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibonacci(n-1)+fibonacci(n-2)
n = int(raw_input())
print(fibonacci(n))
| 18.9
| 45
| 0.513228
|
847d62fb0ecfbf0312e10db92527e436ef72eda7
| 29,839
|
py
|
Python
|
azure/mgmt/network/v2015_06_15/operations/network_interfaces_operations.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
azure/mgmt/network/v2015_06_15/operations/network_interfaces_operations.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
azure/mgmt/network/v2015_06_15/operations/network_interfaces_operations.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class NetworkInterfacesOperations(object):
"""NetworkInterfacesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2015-06-15".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-06-15"
self.config = config
def delete(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, network_interface_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkInterface or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2015_06_15.models.NetworkInterface or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, network_interface_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network
interface operation.
:type parameters:
~azure.mgmt.network.v2015_06_15.models.NetworkInterface
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
NetworkInterface or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2015_06_15.models.NetworkInterface]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NetworkInterface')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2015_06_15.models.NetworkInterfacePaged[~azure.mgmt.network.v2015_06_15.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2015_06_15.models.NetworkInterfacePaged[~azure.mgmt.network.v2015_06_15.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_virtual_machine_scale_set_vm_network_interfaces(
self, resource_group_name, virtual_machine_scale_set_name, virtualmachine_index, custom_headers=None, raw=False, **operation_config):
"""Gets information about all network interfaces in a virtual machine in a
virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2015_06_15.models.NetworkInterfacePaged[~azure.mgmt.network.v2015_06_15.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_virtual_machine_scale_set_network_interfaces(
self, resource_group_name, virtual_machine_scale_set_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2015_06_15.models.NetworkInterfacePaged[~azure.mgmt.network.v2015_06_15.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_virtual_machine_scale_set_network_interface(
self, resource_group_name, virtual_machine_scale_set_name, virtualmachine_index, network_interface_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkInterface or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2015_06_15.models.NetworkInterface or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| 47.81891
| 242
| 0.666142
|
32d8eeb20e04fadf4c68b0fa8539d81f20bd826d
| 679
|
py
|
Python
|
fastybird_tuya_connector/events/__init__.py
|
FastyBird/tuya-connector
|
46668191367cf854684bcb5297d30935737ba735
|
[
"Apache-2.0"
] | null | null | null |
fastybird_tuya_connector/events/__init__.py
|
FastyBird/tuya-connector
|
46668191367cf854684bcb5297d30935737ba735
|
[
"Apache-2.0"
] | null | null | null |
fastybird_tuya_connector/events/__init__.py
|
FastyBird/tuya-connector
|
46668191367cf854684bcb5297d30935737ba735
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# Copyright 2021. FastyBird s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tuya connector events module
"""
| 33.95
| 78
| 0.705449
|
b44cb7d46ad6975522e55a53d481f3aecfded969
| 30,548
|
py
|
Python
|
meeseeksdev/meeseeksbox/commands.py
|
Carreau/MeeseeksDev
|
805d8e0eff60d5241d2d9bc4fbd63614bfbb291f
|
[
"MIT"
] | null | null | null |
meeseeksdev/meeseeksbox/commands.py
|
Carreau/MeeseeksDev
|
805d8e0eff60d5241d2d9bc4fbd63614bfbb291f
|
[
"MIT"
] | 54
|
2018-10-29T19:08:24.000Z
|
2021-07-14T13:14:42.000Z
|
meeseeksdev/meeseeksbox/commands.py
|
Carreau/MeeseeksDev
|
805d8e0eff60d5241d2d9bc4fbd63614bfbb291f
|
[
"MIT"
] | 1
|
2022-03-29T14:45:48.000Z
|
2022-03-29T14:45:48.000Z
|
"""
Define a few commands
"""
import random
import os
import re
import subprocess
import git
import pipes
import mock
import keen
import time
import traceback
import sys
from textwrap import dedent
# from friendlyautopep8 import run_on_cwd
from .utils import Session, fix_issue_body, fix_comment_body
from .scopes import admin, everyone, write
green = "\033[0;32m"
yellow = "\033[0;33m"
red = "\033[0;31m"
blue = "\x1b[0;34m"
normal = "\033[0m"
@everyone
def replyuser(*, session, payload, arguments, local_config=None):
print("I'm replying to a user, look at me.")
comment_url = payload["issue"]["comments_url"]
user = payload["comment"]["user"]["login"]
c = random.choice(
(
"Helloooo @{user}, I'm Mr. Meeseeks! Look at me!",
"Look at me, @{user}, I'm Mr. Meeseeks! ",
"I'm Mr. Meeseek, @{user}, Look at meee ! ",
)
)
session.post_comment(comment_url, c.format(user=user))
@write
def say(*, session, payload, arguments, local_config=None):
print("Oh, got local_config", local_config)
comment_url = payload.get("issue", payload.get("pull_request"))["comments_url"]
session.post_comment(comment_url, "".join(arguments))
@write
def debug(*, session, payload, arguments, local_config=None):
print("DEBUG")
print("session", session)
print("payload", payload)
print("arguments", arguments)
print("local_config", local_config)
@everyone
def party(*, session, payload, arguments, local_config=None):
comment_url = payload.get("issue", payload.get("pull_request"))["comments_url"]
parrot = ""
session.post_comment(comment_url, parrot * 10)
@everyone
def zen(*, session, payload, arguments, local_config=None):
comment_url = payload.get("issue", payload.get("pull_request"))["comments_url"]
session.post_comment(
comment_url,
dedent(
"""
Zen of Python ([pep 20](https://www.python.org/dev/peps/pep-0020/))
```
>>> import this
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
```
"""
),
)
@admin
def replyadmin(*, session, payload, arguments, local_config=None):
comment_url = payload["issue"]["comments_url"]
user = payload["issue"]["user"]["login"]
session.post_comment(
comment_url, "Hello @{user}. Waiting for your orders.".format(user=user)
)
@admin
def blackify(*, session, payload, arguments, local_config=None):
print("===== pe8ifying =====")
print(payload)
print("===== ========= =====")
# collect initial payload
prnumber = payload["issue"]["number"]
prtitle = payload["issue"]["title"]
org_name = payload["repository"]["owner"]["login"]
repo_name = payload["repository"]["name"]
# collect extended payload on the PR
print("== Collecting data on Pull-request...")
r = session.ghrequest(
"GET",
"https://api.github.com/repos/{}/{}/pulls/{}".format(
org_name, repo_name, prnumber
),
json=None,
)
pr_data = r.json()
head_sha = pr_data["head"]["sha"]
base_sha = pr_data["base"]["sha"]
branch = pr_data["head"]["ref"]
author_login = pr_data["head"]["repo"]["owner"]["login"]
repo_name = pr_data["head"]["repo"]["name"]
# that will likely fail, as if PR, we need to bypass the fact that the
# requester has technically no access to committer repo.
target_session = yield "{}/{}".format(author_login, repo_name)
if not target_session:
comment_url = payload["issue"]["comments_url"]
session.post_comment(
comment_url,
body="I'm afraid I can't do that. Maybe I need to be installed on target repository ?\n"
"Click [here](https://github.com/integrations/meeseeksdev/installations/new) to do that.".format(
botname="meeseeksdev"
),
)
return
# clone locally
# this process can take some time, regen token
atk = target_session.token()
if os.path.exists(repo_name):
print("== Cleaning up previsous work... ")
subprocess.run("rm -rf {}".format(repo_name).split(" "))
print("== Done cleaning ")
print(f"== Cloning repository from {author_login}/{repo_name}, this can take some time..")
process = subprocess.run(
[
"git",
"clone",
"https://x-access-token:{}@github.com/{}/{}".format(
atk, author_login, repo_name
),
]
)
print("== Cloned..")
process.check_returncode()
subprocess.run("git config --global user.email meeseeksbot@jupyter.org".split(" "))
subprocess.run("git config --global user.name FriendlyBot".split(" "))
# do the pep8ify on local filesystem
repo = git.Repo(repo_name)
print(f"== Fetching branch `{branch}` to pep8ify on ...")
repo.remotes.origin.fetch("{}:workbranch".format(branch))
repo.git.checkout("workbranch")
print("== Fetching Commits to pep8ify...")
repo.remotes.origin.fetch("{head_sha}".format(head_sha=head_sha))
print("== All has been fetched correctly")
os.chdir(repo_name)
def lpr(*args):
print('Should run:', *args)
lpr('git rebase -x "black --fast . && git commit -a --amend --no-edit" --strategy-option=theirs --autosquash', base_sha )
subprocess.run(['git','rebase', '-x','black --fast . && git commit -a --amend --no-edit','--strategy-option=theirs','--autosquash', base_sha])
#os.chdir("..")
## write the commit message
#msg = "Autofix pep 8 of #%i: %s" % (prnumber, prtitle) + "\n\n"
#repo.git.commit("-am", msg)
## Push the pep8ify work
print("== Pushing work....:")
lpr(f"pushing with workbranch:{branch}")
repo.remotes.origin.push("workbranch:{}".format(branch))
repo.git.checkout("master")
repo.branches.workbranch.delete(repo, "workbranch", force=True)
@write
def safe_backport(session, payload, arguments, local_config=None):
"""[to] {branch}"""
import builtins
print = lambda *args, **kwargs: builtins.print(" [backport]", *args, **kwargs)
s_clone_time = 0
s_success = False
s_reason = "unknown"
s_fork_time = 0
s_clean_time = 0
s_ff_time = 0
def keen_stats():
nonlocal s_slug
nonlocal s_clone_time
nonlocal s_success
nonlocal s_reason
nonlocal s_fork_time
nonlocal s_clean_time
nonlocal s_ff_time
keen.add_event(
"backport_stats",
{
"slug": s_slug,
"clone_time": s_clone_time,
"fork_time": s_fork_time,
"clean_time": s_clean_time,
"success": s_success,
"fast_forward_opt_time": s_ff_time,
"reason": s_reason,
},
)
if arguments is None:
arguments = ""
target_branch = arguments
if target_branch.startswith("to "):
target_branch = target_branch[3:].strip()
# collect initial payload
if "issue" not in payload:
print(
green
+ 'debug safe_autobackport, "issue" not in payload, likely trigerd by milisetone merge.'
+ normal
)
prnumber = payload.get("issue", payload).get("number")
prtitle = payload.get("issue", payload.get("pull_request", {})).get("title")
org_name = payload["repository"]["owner"]["login"]
repo_name = payload["repository"]["name"]
comment_url = payload.get("issue", payload.get("pull_request"))["comments_url"]
maybe_wrong_named_branch = False
s_slug = f"{org_name}/{repo_name}"
try:
existing_branches = session.ghrequest(
"GET", f"https://api.github.com/repos/{org_name}/{repo_name}/branches"
).json()
existing_branches_names = {b["name"] for b in existing_branches}
if target_branch not in existing_branches_names:
print(
red
+ f"Request to backport to `{target_branch}`, which does not seem to exist. Known : {existing_branches_names}"
)
maybe_wrong_named_branch = True
else:
print(green + f"found branch {target_branch}")
except Exception:
import traceback
traceback.print_exc()
s_reason = "Exception line 256"
keen_stats()
try:
# collect extended payload on the PR
print("== Collecting data on Pull-request...")
r = session.ghrequest(
"GET",
"https://api.github.com/repos/{}/{}/pulls/{}".format(
org_name, repo_name, prnumber
),
json=None,
)
pr_data = r.json()
merge_sha = pr_data["merge_commit_sha"]
body = pr_data["body"]
milestone = pr_data["milestone"]
if milestone:
milestone_number = pr_data["milestone"].get("number", None)
else:
milestone_number = None
print("----------------------------------------")
# print('milestone data :', pr_data['milestone'])
print("----------------------------------------")
if not target_branch.strip():
milestone_title = pr_data["milestone"]["title"]
parts = milestone_title.split(".")
parts[-1] = "x"
infered_target_branch = ".".join(parts)
print("inferring branch....", infered_target_branch)
target_branch = infered_target_branch
keen.add_event("backport_infering_branch", {"infering_remove_x": 1})
if milestone_number:
milestone_number = int(milestone_number)
labels_names = []
try:
label_names = [l["name"] for l in pr_data["labels"]]
if not label_names and ("issue" in payload.keys()):
labels_names = [l["name"] for l in payload["issue"]["labels"]]
except KeyError:
print("Did not find labels|", pr_data)
# clone locally
# this process can take some time, regen token
atk = session.token()
# FORK it.
fork_epoch = time.time()
frk = session.personal_request(
"POST", f"https://api.github.com/repos/{org_name}/{repo_name}/forks"
).json()
for i in range(5):
ff = session.personal_request("GET", frk["url"], raise_for_status=False)
if ff.status_code == 200:
keen.add_event("fork_wait", {"n": i})
break
time.sleep(1)
s_fork_time = time.time() - fork_epoch
## optimize-fetch-experiment
print("Attempting FF")
if os.path.exists(repo_name):
try:
re_fetch_epoch = time.time()
print("FF: Git set-url origin")
subprocess.run(
[
"git",
"remote",
"set-url",
"origin",
f"https://x-access-token:{atk}@github.com/{org_name}/{repo_name}",
],
cwd=repo_name,
).check_returncode()
repo = git.Repo(repo_name)
print("FF: Git fetch master")
repo.remotes.origin.fetch("master")
repo.git.checkout("master")
print("FF: Reset hard origin/master")
subprocess.run(
["git", "reset", "--hard", "origin/master"], cwd=repo_name
).check_returncode()
print("FF: Git describe tags....")
subprocess.run(["git", "describe", "--tag"], cwd=repo_name)
re_fetch_delta = time.time() - re_fetch_epoch
print(blue + f"FF took {re_fetch_delta}s")
s_ff_time = re_fetch_delta
except Exception as e:
# something went wrong. Kill repository it's going to be
# recloned.
clean_epoch = time.time()
if os.path.exists(repo_name):
print("== Cleaning up previsous work... ")
subprocess.run("rm -rf {}".format(repo_name).split(" "))
print("== Done cleaning ")
s_clean_time = time.time() - clean_epoch
import traceback
traceback.print_exc()
## end optimise-fetch-experiment
clone_epoch = time.time()
action = "set-url"
what_was_done = "Fast-Forwarded"
if not os.path.exists(repo_name):
print("== Cloning current repository, this can take some time..")
process = subprocess.run(
[
"git",
"clone",
"https://x-access-token:{}@github.com/{}/{}".format(
atk, org_name, repo_name
),
]
)
process.check_returncode()
action = "add"
what_was_done = "Cloned"
s_clone_time = time.time() - clone_epoch
process = subprocess.run(
[
"git",
"remote",
action,
session.personnal_account_name,
f"https://x-access-token:{session.personnal_account_token}@github.com/{session.personnal_account_name}/{repo_name}",
],
cwd=repo_name,
)
print("==", what_was_done)
process.check_returncode()
subprocess.run(
"git config --global user.email meeseeksdevbot@jupyter.org".split(" ")
)
subprocess.run("git config --global user.name MeeseeksDev[bot]".split(" "))
# do the backport on local filesystem
repo = git.Repo(repo_name)
print("== Fetching branch to backport on ... {}".format(target_branch))
repo.remotes.origin.fetch("refs/heads/{}:workbranch".format(target_branch))
repo.git.checkout("workbranch")
print(
"== Fetching Commits to {mergesha} backport...".format(mergesha=merge_sha)
)
repo.remotes.origin.fetch("{mergesha}".format(num=prnumber, mergesha=merge_sha))
print("== All has been fetched correctly")
# remove mentions from description, to avoid pings:
description = body.replace("@", " ").replace("#", " ")
print("Cherry-picking %s" % merge_sha)
args = ("-m", "1", merge_sha)
msg = "Backport PR #%i: %s" % (prnumber, prtitle)
remote_submit_branch = f"auto-backport-of-pr-{prnumber}-on-{target_branch}"
try:
with mock.patch.dict("os.environ", {"GIT_EDITOR": "true"}):
try:
repo.git.cherry_pick(*args)
except git.GitCommandError as e:
if "is not a merge." in e.stderr:
print(
"Likely not a merge PR...Attempting squash and merge picking."
)
args = (merge_sha,)
repo.git.cherry_pick(*args)
else:
raise
except git.GitCommandError as e:
if ("git commit --allow-empty" in e.stderr) or (
"git commit --allow-empty" in e.stdout
):
session.post_comment(
comment_url,
"Can't Dooooo.... It seem like this is already backported (commit is empty)."
"I won't do anything. MrMeeseeks out.",
)
print(e.stderr)
print("----")
print(e.stdout)
print("----")
s_reason = "empty commit"
keen_stats()
return
elif "after resolving the conflicts" in e.stderr:
# TODO, here we should also do a git merge --abort
# to avoid thrashing the cache at next backport request.
cmd = " ".join(pipes.quote(arg) for arg in sys.argv)
print(
"\nPatch did not apply. Resolve conflicts (add, not commit), then re-run `%s`"
% cmd,
file=sys.stderr,
)
session.post_comment(
comment_url,
f"""Owee, I'm MrMeeseeks, Look at me.
There seem to be a conflict, please backport manually. Here are approximate instructions:
1. Checkout backport branch and update it.
```
$ git checkout {target_branch}
$ git pull
```
2. Cherry pick the first parent branch of the this PR on top of the older branch:
```
$ git cherry-pick -m1 {merge_sha}
```
3. You will likely have some merge/cherry-pick conflict here, fix them and commit:
```
$ git commit -am {msg!r}
```
4. Push to a named branch :
```
git push YOURFORK {target_branch}:{remote_submit_branch}
```
5. Create a PR against branch {target_branch}, I would have named this PR:
> "Backport PR #{prnumber} on branch {target_branch}"
And apply the correct labels and milestones.
Congratulation you did some good work ! Hopefully your backport PR will be tested by the continuous integration and merged soon!
If these instruction are inaccurate, feel free to [suggest an improvement](https://github.com/MeeseeksBox/MeeseeksDev).
""",
)
org = payload["repository"]["owner"]["login"]
repo = payload["repository"]["name"]
num = payload.get("issue", payload).get("number")
url = f"https://api.github.com/repos/{org}/{repo}/issues/{num}/labels"
print("trying to apply still needs manual backport")
reply = session.ghrequest(
"POST", url, json=["Still Needs Manual Backport"]
)
print("Should be applied:", reply)
s_reason = "conflicts"
keen_stats()
return
else:
session.post_comment(
comment_url,
"Oops, something went wrong applying the patch... Please have a look at my logs.",
)
print(e.stderr)
print("----")
print(e.stdout)
print("----")
s_reason = "Unknown error line 491"
keen_stats()
return
except Exception as e:
session.post_comment(
comment_url, "Hum, I actually crashed, that should not have happened."
)
print("\n" + e.stderr.decode("utf8", "replace"), file=sys.stderr)
print("\n" + repo.git.status(), file=sys.stderr)
keen.add_event("error", {"git_crash": 1})
s_reason = "Unknown error line 501"
keen_stats()
return
# write the commit message
repo.git.commit("--amend", "-m", msg)
print("== PR #%i applied, with msg:" % prnumber)
print()
print(msg)
print("== ")
# Push the backported work
print("== Pushing work....:")
try:
print(
f"Tryign to push to {remote_submit_branch} of {session.personnal_account_name}"
)
repo.remotes[session.personnal_account_name].push(
"workbranch:{}".format(remote_submit_branch)
)
except Exception as e:
import traceback
traceback.print_exc()
print("could not push to self remote")
s_reason = "Could not push"
keen_stats()
# TODO comment on issue
print(e)
repo.git.checkout("master")
repo.branches.workbranch.delete(repo, "workbranch", force=True)
# TODO checkout master and get rid of branch
# Make the PR on GitHub
print(
"try to create PR with milestone",
milestone_number,
"and labels",
labels_names,
)
new_pr = session.personal_request(
"POST",
"https://api.github.com/repos/{}/{}/pulls".format(org_name, repo_name),
json={
"title": f"Backport PR #{prnumber} on branch {target_branch} ({prtitle})",
"body": msg,
"head": "{}:{}".format(
session.personnal_account_name, remote_submit_branch
),
"base": target_branch,
},
).json()
new_number = new_pr["number"]
resp = session.ghrequest(
"PATCH",
"https://api.github.com/repos/{}/{}/issues/{}".format(
org_name, repo_name, new_number
),
json={"milestone": milestone_number, "labels": labels_names},
)
# print(resp.json())
except Exception as e:
extra_info = ""
if maybe_wrong_named_branch:
extra_info = "\n\n It seem that the branch you are trying to backport to does not exists."
session.post_comment(
comment_url,
"Something went wrong ... Please have a look at my logs." + extra_info,
)
keen.add_event("error", {"unknown_crash": 1})
print("Something went wrong")
print(e)
s_reason = "Remote branches does not exists"
keen_stats()
raise
resp.raise_for_status()
print("Backported as PR", new_number)
s_reason = "Success"
s_success = True
keen_stats()
return new_pr
@admin
def tag(session, payload, arguments, local_config=None):
"tag[, tag, [...] ]"
print("Got local config for tag: ", local_config)
org = payload["repository"]["owner"]["login"]
repo = payload["repository"]["name"]
num = payload.get("issue", payload.get("pull_request")).get("number")
url = f"https://api.github.com/repos/{org}/{repo}/issues/{num}/labels"
arguments = arguments.replace("'", '"')
quoted = re.findall(r'\"(.+?)\"',arguments.replace("'", '"'))
for q in quoted:
arguments = arguments.replace('"%s"' % q, '')
tags = [arg.strip() for arg in arguments.split(",") if arg.strip()] + quoted
print('raw tags:', tags)
to_apply = []
not_applied = []
try:
label_payload = session.ghrequest(
"GET", f"https://api.github.com/repos/{org}/{repo}/labels"
)
label_payloads = [label_payload]
def get_next_link(req):
all_links = req.headers.get('Link')
if 'rel="next"' in all_links:
links = all_links.split(',')
next_link = [l for l in links if 'next' in l][0] # assume only one.
if next_link:
return next_link.split(';')[0].strip(' <>')
# let's assume no more than 200 labels
resp = label_payload
try:
for i in range(10):
print('get labels page',i)
next_link = get_next_link(resp)
if next_link:
resp = session.ghrequest( "GET", next_link)
label_payloads.append(resp)
else:
break
except Exception:
traceback.print_exc()
know_labels = []
for p in label_payloads:
know_labels.extend([label["name"] for label in p.json()])
print('known labels', know_labels)
not_known_tags = [t for t in tags if t not in know_labels]
known_tags = [t for t in tags if t in know_labels]
print('known tags', known_tags)
print('known labels', not_known_tags)
# try to look at casing
nk = []
known_lower_normal = {l.lower(): l for l in know_labels}
print('known labels lower', known_lower_normal)
for t in not_known_tags:
target = known_lower_normal.get(t.lower())
print('mapping t', t, target)
if target:
known_tags.append(t)
else:
print('will not apply', t)
nk.append(t)
to_apply = known_tags
not_applied = nk
except Exception:
print(red + "something went wrong getting labels" + normal)
traceback.print_exc()
if local_config:
only = set(local_config.get("only", []))
any_tags = local_config.get("any", False)
if any_tags:
print("not filtering, any tags set")
elif only:
allowed_tags = [t for t in to_apply if t.lower() in only]
not_allowed_tags = [t for t in to_apply if t.lower() not in only]
print("will only allow", allowed_tags)
print("will refuse", not_allowed_tags)
to_apply = allowed_tags
not_applied.extend(not_allowed_tags)
if to_apply:
session.ghrequest("POST", url, json=to_apply)
if not_applied:
comment_url = payload.get("issue", payload.get("pull_request"))["comments_url"]
lf = "`,`".join(not_applied)
user = payload.get("comment", {}).get("user", {}).get("login", None)
session.post_comment(
comment_url,
f"Aww {user}, I was not able to apply the following label(s): `{lf}`. Either "
"because they are not existing labels on this repository or because you do not have the permission to apply these."
"I tried my best to guess by looking at the casing, but was unable to find matching labels.",
)
@admin
def untag(session, payload, arguments, local_config=None):
"tag[, tag, [...] ]"
org = payload["repository"]["owner"]["login"]
repo = payload["repository"]["name"]
num = payload.get("issue", payload.get("pull_request")).get("number")
tags = [arg.strip() for arg in arguments.split(",")]
name = "{name}"
url = "https://api.github.com/repos/{org}/{repo}/issues/{num}/labels/{name}".format(
**locals()
)
no_untag = []
for tag in tags:
try:
session.ghrequest("DELETE", url.format(name=tag))
except Exception:
no_untag.append(tag)
print("was not able to remove tags:", no_untag)
@write
def migrate_issue_request(
*, session: Session, payload: dict, arguments: str, local_config=None
):
"""Todo:
- Works through pagination of comments
- Works through pagination of labels
Link to non-migrated labels.
"""
if arguments.startswith("to "):
arguments = arguments[3:]
org_repo = arguments
org, repo = arguments.split("/")
target_session = yield org_repo
if not target_session:
session.post_comment(
payload["issue"]["comments_url"], "It appears that I can't do that"
)
return
issue_title = payload["issue"]["title"]
issue_body = payload["issue"]["body"]
original_org = payload["repository"]["owner"]["login"]
original_repo = payload["repository"]["name"]
original_poster = payload["issue"]["user"]["login"]
original_number = payload["issue"]["number"]
migration_requester = payload["comment"]["user"]["login"]
request_id = payload["comment"]["id"]
original_labels = [l["name"] for l in payload["issue"]["labels"]]
if original_labels:
available_labels = target_session.ghrequest(
"GET",
"https://api.github.com/repos/{org}/{repo}/labels".format(
org=org, repo=repo
),
None,
).json()
available_labels = [l["name"] for l in available_labels]
migrate_labels = [l for l in original_labels if l in available_labels]
not_set_labels = [l for l in original_labels if l not in available_labels]
new_response = target_session.create_issue(
org,
repo,
issue_title,
fix_issue_body(
issue_body,
original_poster,
original_repo,
original_org,
original_number,
migration_requester,
),
labels=migrate_labels,
)
new_issue = new_response.json()
new_comment_url = new_issue["comments_url"]
original_comments = session.ghrequest(
"GET", payload["issue"]["comments_url"], None
).json()
for comment in original_comments:
if comment["id"] == request_id:
continue
body = comment["body"]
op = comment["user"]["login"]
url = comment["html_url"]
target_session.post_comment(
new_comment_url,
body=fix_comment_body(body, op, url, original_org, original_repo),
)
if not_set_labels:
body = "I was not able to apply the following label(s): %s " % ",".join(
not_set_labels
)
target_session.post_comment(new_comment_url, body=body)
session.post_comment(
payload["issue"]["comments_url"],
body="Done as {}/{}#{}.".format(org, repo, new_issue["number"]),
)
session.ghrequest("PATCH", payload["issue"]["url"], json={"state": "closed"})
@write
def quote(*, session, payload, arguments, local_config=None):
if arguments.lower() == "over the world":
comment_url = payload["issue"]["comments_url"]
user = payload["issue"]["user"]["login"]
session.post_comment(
comment_url,
"""
> MeeseeksDev: Gee, {user}, what do you want to do tonight?
{user}: The same thing we do every night, MeeseeksDev - try to take over the world!
""".format(
user=user
),
)
| 34.832383
| 146
| 0.561935
|
4aff52dfb013d865678539377a03d26c662c312d
| 12,280
|
py
|
Python
|
pox/openflow/flow_table.py
|
felipetomm/POX-Django
|
6060a9e2a999dc56b63826d0ec3498b11b03adce
|
[
"Apache-2.0"
] | 1
|
2019-10-20T00:05:34.000Z
|
2019-10-20T00:05:34.000Z
|
pox/openflow/flow_table.py
|
felipetomm/POX-Django
|
6060a9e2a999dc56b63826d0ec3498b11b03adce
|
[
"Apache-2.0"
] | null | null | null |
pox/openflow/flow_table.py
|
felipetomm/POX-Django
|
6060a9e2a999dc56b63826d0ec3498b11b03adce
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011,2012,2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementation of an OpenFlow flow table
"""
from libopenflow_01 import *
from pox.lib.revent import *
from pox.core import core
import time
import math
# FlowTable Entries:
# match - ofp_match (13-tuple)
# counters - hash from name -> count. May be stale
# actions - ordered list of ofp_action_*s to apply for matching packets
class TableEntry (object):
"""
Models a flow table entry, with a match, actions, and options/flags/counters.
Note: The current time can either be specified explicitely with the optional
'now' parameter or is taken from time.time()
"""
def __init__ (self, priority=OFP_DEFAULT_PRIORITY, cookie=0, idle_timeout=0,
hard_timeout=0, flags=0, match=ofp_match(), actions=[],
buffer_id=None, now=None):
"""
Initialize table entry
"""
if now is None: now = time.time()
self.created = now
self.last_touched = self.created
self.byte_count = 0
self.packet_count = 0
self.priority = priority
self.cookie = cookie
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.flags = flags
self.match = match
self.actions = actions
self.buffer_id = buffer_id
@staticmethod
def from_flow_mod (flow_mod):
return TableEntry(priority=flow_mod.priority,
cookie=flow_mod.cookie,
idle_timeout=flow_mod.idle_timeout,
hard_timeout=flow_mod.hard_timeout,
flags=flow_mod.flags,
match=flow_mod.match,
actions=flow_mod.actions,
buffer_id=flow_mod.buffer_id)
def to_flow_mod (self, flags=None, **kw):
if flags is None: flags = self.flags
return ofp_flow_mod(priority=self.priority,
cookie=self.cookie,
match=self.match,
idle_timeout=self.idle_timeout,
hard_timeout=self.hard_timeout,
actions=self.actions,
buffer_id=self.buffer_id,
flags=flags, **kw)
@property
def effective_priority (self):
"""
Exact matches effectively have an "infinite" priority
"""
return self.priority if self.match.is_wildcarded else (1<<16) + 1
def is_matched_by (self, match, priority=None, strict=False, out_port=None):
"""
Tests whether a given match object matches this entry
Used for, e.g., flow_mod updates
If out_port is any value besides None, the the flow entry must contain an
output action to the specified port.
"""
match_a = lambda a: isinstance(a, ofp_action_output) and a.port == out_port
port_matches = (out_port is None) or any(match_a(a) for a in self.actions)
if strict:
return port_matches and self.match == match and self.priority == priority
else:
return port_matches and match.matches_with_wildcards(self.match)
def touch_packet (self, byte_count, now=None):
"""
Updates information of this entry based on encountering a packet.
Updates both the cumulative given byte counts of packets encountered and
the expiration timer.
"""
if now is None: now = time.time()
self.byte_count += byte_count
self.packet_count += 1
self.last_touched = now
def is_idle_timed_out (self, now=None):
if now is None: now = time.time()
if self.idle_timeout > 0:
if (now - self.last_touched) > self.idle_timeout:
return True
return False
def is_hard_timed_out (self, now=None):
if now is None: now = time.time()
if self.hard_timeout > 0:
if (now - self.created) > self.hard_timeout:
return True
return False
def is_expired (self, now=None):
"""
Tests whether this flow entry is expired due to its idle or hard timeout
"""
if now is None: now = time.time()
return self.is_idle_timed_out(now) or self.is_hard_timed_out(now)
def __str__ (self):
return type(self).__name__ + "\n " + self.show()
def __repr__ (self):
return "TableEntry(" + self.show() + ")"
def show (self):
outstr = ''
outstr += "priority=%s, " % self.priority
outstr += "cookie=%x, " % self.cookie
outstr += "idle_timeout=%d, " % self.idle_timeout
outstr += "hard_timeout=%d, " % self.hard_timeout
outstr += "match=%s, " % self.match
outstr += "actions=%s, " % repr(self.actions)
outstr += "buffer_id=%s" % str(self.buffer_id)
return outstr
def flow_stats (self, now=None):
if now is None: now = time.time()
dur_nsec,dur_sec = math.modf(now - self.created)
return ofp_flow_stats(match=self.match,
duration_sec=int(dur_sec),
duration_nsec=int(dur_nsec * 1e9),
priority=self.priority,
idle_timeout=self.idle_timeout,
hard_timeout=self.hard_timeout,
cookie=self.cookie,
packet_count=self.packet_count,
byte_count=self.byte_count,
actions=self.actions)
def to_flow_removed (self, now=None, reason=None):
#TODO: Rename flow_stats to to_flow_stats and refactor?
if now is None: now = time.time()
dur_nsec,dur_sec = math.modf(now - self.created)
fr = ofp_flow_removed()
fr.match = self.match
fr.cookie = self.cookie
fr.priority = self.priority
fr.reason = reason
fr.duration_sec = int(dur_sec)
fr.duration_nsec = int(dur_nsec * 1e9)
fr.idle_timeout = self.idle_timeout
fr.hard_timeout = self.hard_timeout
fr.packet_count = self.packet_count
fr.byte_count = self.byte_count
return fr
class FlowTableModification (Event):
def __init__ (self, added=[], removed=[], reason=None):
Event.__init__(self)
self.added = added
self.removed = removed
# Reason for modification.
# Presently, this is only used for removals and is either one of OFPRR_x,
# or None if it does not correlate to any of the items in the spec.
self.reason = reason
class FlowTable (EventMixin):
"""
General model of a flow table.
Maintains an ordered list of flow entries, and finds matching entries for
packets and other entries. Supports expiration of flows.
"""
_eventMixin_events = set([FlowTableModification])
def __init__ (self):
EventMixin.__init__(self)
# Table is a list of TableEntry sorted by descending effective_priority.
self._table = []
def _dirty (self):
"""
Call when table changes
"""
pass
@property
def entries (self):
return self._table
def __len__ (self):
return len(self._table)
def add_entry (self, entry):
assert isinstance(entry, TableEntry)
#self._table.append(entry)
#self._table.sort(key=lambda e: e.effective_priority, reverse=True)
# Use binary search to insert at correct place
# This is faster even for modest table sizes, and way, way faster
# as the tables grow larger.
priority = entry.effective_priority
table = self._table
low = 0
high = len(table)
while low < high:
middle = (low + high) // 2
if priority >= table[middle].effective_priority:
high = middle
continue
low = middle + 1
table.insert(low, entry)
self._dirty()
self.raiseEvent(FlowTableModification(added=[entry]))
def remove_entry (self, entry, reason=None):
assert isinstance(entry, TableEntry)
self._table.remove(entry)
self._dirty()
self.raiseEvent(FlowTableModification(removed=[entry], reason=reason))
def matching_entries (self, match, priority=0, strict=False, out_port=None):
entry_match = lambda e: e.is_matched_by(match, priority, strict, out_port)
return [ entry for entry in self._table if entry_match(entry) ]
def flow_stats (self, match, out_port=None, now=None):
mc_es = self.matching_entries(match=match, strict=False, out_port=out_port)
return [ e.flow_stats(now) for e in mc_es ]
def aggregate_stats (self, match, out_port=None):
mc_es = self.matching_entries(match=match, strict=False, out_port=out_port)
packet_count = 0
byte_count = 0
flow_count = 0
for entry in mc_es:
packet_count += entry.packet_count
byte_count += entry.byte_count
flow_count += 1
return ofp_aggregate_stats(packet_count=packet_count,
byte_count=byte_count,
flow_count=flow_count)
def _remove_specific_entries (self, flows, reason=None):
#for entry in flows:
# self._table.remove(entry)
#self._table = [entry for entry in self._table if entry not in flows]
if not flows: return
self._dirty()
remove_flows = set(flows)
i = 0
while i < len(self._table):
entry = self._table[i]
if entry in remove_flows:
del self._table[i]
remove_flows.remove(entry)
if not remove_flows: break
else:
i += 1
assert len(remove_flows) == 0
self.raiseEvent(FlowTableModification(removed=flows, reason=reason))
def remove_expired_entries (self, now=None):
idle = []
hard = []
if now is None: now = time.time()
for entry in self._table:
if entry.is_idle_timed_out(now):
idle.append(entry)
elif entry.is_hard_timed_out(now):
hard.append(entry)
self._remove_specific_entries(idle, OFPRR_IDLE_TIMEOUT)
self._remove_specific_entries(hard, OFPRR_HARD_TIMEOUT)
def remove_matching_entries (self, match, priority=0, strict=False,
out_port=None, reason=None):
remove_flows = self.matching_entries(match, priority, strict, out_port)
self._remove_specific_entries(remove_flows, reason=reason)
return remove_flows
def entry_for_packet (self, packet, in_port):
"""
Finds the flow table entry that matches the given packet.
Returns the highest priority flow table entry that matches the given packet
on the given in_port, or None if no matching entry is found.
"""
packet_match = ofp_match.from_packet(packet, in_port, spec_frags = True)
for entry in self._table:
if entry.match.matches_with_wildcards(packet_match,
consider_other_wildcards=False):
return entry
return None
def check_for_overlapping_entry (self, in_entry):
#FELIPE TOMM - TCC
#Esta funcao verifica se ja existe uma flow identica.
#Sera essa minha funcao base?
log = core.getLogger()
log.debug("Debug TCC - Flow Table: Iniciando analise de conflito")
"""
Tests if the input entry overlaps with another entry in this table.
Returns true if there is an overlap, false otherwise. Since the table is
sorted, there is only a need to check a certain portion of it.
"""
#NOTE: Assumes that entries are sorted by decreasing effective_priority
#NOTE: Ambiguous whether matching should be based on effective_priority
# or the regular priority. Doing it based on effective_priority
# since that's what actually affects packet matching.
#NOTE: We could improve performance by doing a binary search to find the
# right priority entries.
priority = in_entry.effective_priority
for e in self._table:
if e.effective_priority < priority:
break
elif e.effective_priority > priority:
continue
else:
if e.is_matched_by(in_entry.match) or in_entry.is_matched_by(e.match):
print ("Debug TCC - Flow Table: O Flow: ja existe.")
return True
return False
| 33.922652
| 79
| 0.657573
|
9e61dcccd30293aafc84b47977315c546730930c
| 4,421
|
py
|
Python
|
openstack_dashboard/contrib/sahara/content/data_processing/cluster_templates/workflows/copy.py
|
timpricecatalyst/horizon
|
8279ae0ed464e62e1c91e78341342160f8a07172
|
[
"Apache-2.0"
] | 1
|
2021-01-20T00:14:15.000Z
|
2021-01-20T00:14:15.000Z
|
openstack_dashboard/contrib/sahara/content/data_processing/cluster_templates/workflows/copy.py
|
timpricecatalyst/horizon
|
8279ae0ed464e62e1c91e78341342160f8a07172
|
[
"Apache-2.0"
] | 1
|
2019-10-27T15:57:25.000Z
|
2019-10-27T15:57:25.000Z
|
openstack_dashboard/contrib/sahara/content/data_processing/cluster_templates/workflows/copy.py
|
timpricecatalyst/horizon
|
8279ae0ed464e62e1c91e78341342160f8a07172
|
[
"Apache-2.0"
] | 15
|
2017-01-12T10:40:00.000Z
|
2019-04-19T08:28:05.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from openstack_dashboard.contrib.sahara.api import sahara as saharaclient
import openstack_dashboard.contrib.sahara.content.data_processing. \
cluster_templates.workflows.create as create_flow
import openstack_dashboard.contrib.sahara.content.data_processing.utils. \
workflow_helpers as wf_helpers
LOG = logging.getLogger(__name__)
class CopyClusterTemplate(create_flow.ConfigureClusterTemplate):
success_message = _("Cluster Template copy %s created")
entry_point = "generalconfigaction"
def __init__(self, request, context_seed, entry_point, *args, **kwargs):
self.cluster_template_id = context_seed["template_id"]
try:
self.template = saharaclient.cluster_template_get(
request,
self.cluster_template_id)
self._set_configs_to_copy(self.template.cluster_configs)
request.GET = request.GET.copy()
request.GET.update({"plugin_name": self.template.plugin_name,
"hadoop_version": self.template.hadoop_version,
"aa_groups": self.template.anti_affinity})
super(CopyClusterTemplate, self).__init__(request, context_seed,
entry_point, *args,
**kwargs)
# Initialize node groups.
# TODO(rdopieralski) The same (or very similar) code appears
# multiple times in this dashboard. It should be refactored to
# a function.
for step in self.steps:
if isinstance(step, create_flow.ConfigureNodegroups):
ng_action = step.action
template_ngs = self.template.node_groups
if 'forms_ids' in request.POST:
continue
ng_action.groups = []
for i, templ_ng in enumerate(template_ngs):
group_name = "group_name_%d" % i
template_id = "template_id_%d" % i
count = "count_%d" % i
serialized = "serialized_%d" % i
# save the original node group with all its fields in
# case the template id is missing
serialized_val = base64.urlsafe_b64encode(
json.dumps(wf_helpers.clean_node_group(templ_ng)))
ng = {
"name": templ_ng["name"],
"count": templ_ng["count"],
"id": i,
"deletable": "true",
"serialized": serialized_val
}
if "node_group_template_id" in templ_ng:
ng["template_id"] = templ_ng[
"node_group_template_id"]
ng_action.groups.append(ng)
wf_helpers.build_node_group_fields(
ng_action, group_name, template_id, count,
serialized)
elif isinstance(step, create_flow.GeneralConfig):
fields = step.action.fields
fields["cluster_template_name"].initial = (
self.template.name + "-copy")
fields['use_autoconfig'].initial = (
self.template.use_autoconfig)
fields["description"].initial = self.template.description
except Exception:
exceptions.handle(request,
_("Unable to fetch template to copy."))
| 44.21
| 79
| 0.564804
|
9a7055d7819d566f528f37f009efb8d94962d9fc
| 1,692
|
py
|
Python
|
models/object_detection/model_templates/face-detection/tools/test_out_to_wider_predictions.py
|
dqawami/openvino_training_extensions
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
[
"Apache-2.0"
] | 775
|
2019-03-01T02:13:33.000Z
|
2020-09-07T22:49:15.000Z
|
models/object_detection/model_templates/face-detection/tools/test_out_to_wider_predictions.py
|
dqawami/openvino_training_extensions
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
[
"Apache-2.0"
] | 604
|
2020-09-08T12:29:49.000Z
|
2022-03-31T21:51:08.000Z
|
models/object_detection/model_templates/face-detection/tools/test_out_to_wider_predictions.py
|
dqawami/openvino_training_extensions
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
[
"Apache-2.0"
] | 290
|
2019-02-28T20:32:11.000Z
|
2020-09-07T05:51:41.000Z
|
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
""" This script converts output of test.py (mmdetection) to a set of files
that can be passed to official WiderFace evaluation procedure."""
import argparse
from mmcv import DictAction
from ote.datasets.face_detection.wider_face.convert_predictions import convert_to_wider
def parse_args():
""" Parses input arguments. """
parser = argparse.ArgumentParser(
description='This script converts output of test.py (mmdetection) to '
'a set of files that can be passed to official WiderFace '
'evaluation procedure.')
parser.add_argument('config', help='test config file path')
parser.add_argument('input', help='output result file from test.py')
parser.add_argument('out_folder', help='folder where to store WiderFace '
'evaluation-friendly output')
parser.add_argument('--update_config', nargs='+', action=DictAction,
help='Update configuration file by parameters specified here.')
args = parser.parse_args()
return args
convert_to_wider(**vars(parse_args()))
| 40.285714
| 87
| 0.70922
|
9b3cd1e2f248896590179b4037ec913e5fee864d
| 22,701
|
py
|
Python
|
volttrontesting/multiplatform/test_multiplatform_pubsub.py
|
laroque/volttron
|
3ea851b718aa87d7f2c824298cf8a1a4c5920460
|
[
"Apache-2.0"
] | 2
|
2019-10-03T17:00:34.000Z
|
2019-10-03T17:00:38.000Z
|
volttrontesting/multiplatform/test_multiplatform_pubsub.py
|
laroque/volttron
|
3ea851b718aa87d7f2c824298cf8a1a4c5920460
|
[
"Apache-2.0"
] | 2
|
2018-08-29T13:45:17.000Z
|
2018-09-06T12:34:23.000Z
|
volttrontesting/multiplatform/test_multiplatform_pubsub.py
|
cbs-iiith/volttron
|
a676d4af19a808581dde172ab08820087854e157
|
[
"Apache-2.0"
] | 1
|
2019-04-04T17:13:46.000Z
|
2019-04-04T17:13:46.000Z
|
import os
import gevent
import pytest
import json
from volttron.platform import get_ops
from volttrontesting.utils.utils import (poll_gevent_sleep,
messages_contains_prefix)
from volttrontesting.fixtures.volttron_platform_fixtures import get_rand_vip, \
build_wrapper, get_rand_ip_and_port
from volttrontesting.utils.platformwrapper import PlatformWrapper
from volttron.platform.agent.known_identities import PLATFORM_DRIVER, CONFIGURATION_STORE
subscription_results = {}
count = 0
def onmessage(peer, sender, bus, topic, headers, message):
global subscription_results
subscription_results[topic] = {'headers': headers, 'message': message}
print("subscription_results[{}] = {}".format(topic, subscription_results[topic]))
@pytest.fixture(scope="module")
def get_volttron_instances(request):
""" Fixture to get more than 1 volttron instance for test
Use this fixture to get more than 1 volttron instance for test. This
returns a function object that should be called with number of instances
as parameter to get a list of volttron instnaces. The fixture also
takes care of shutting down all the instances at the end
Example Usage:
def test_function_that_uses_n_instances(get_volttron_instances):
instance1, instance2, instance3 = get_volttron_instances(3)
@param request: pytest request object
@return: function that can used to get any number of
volttron instances for testing.
"""
all_instances = []
def get_n_volttron_instances(n, should_start=True, address_file=True):
get_n_volttron_instances.count = n
instances = []
vip_addresses = []
web_addresses = []
instances = []
addr_config = dict()
names = []
for i in range(0, n):
address = get_rand_vip()
web_address = "http://{}".format(get_rand_ip_and_port())
vip_addresses.append(address)
web_addresses.append(web_address)
nm = 'platform{}'.format(i + 1)
names.append(nm)
for i in range(0, n):
address = vip_addresses[i]
web_address = web_addresses[i]
wrapper = PlatformWrapper()
addr_file = os.path.join(wrapper.volttron_home, 'external_address.json')
if address_file:
with open(addr_file, 'w') as f:
json.dump(web_addresses, f)
gevent.sleep(.1)
wrapper.startup_platform(address, bind_web_address=web_address, instance_name=names[i], setupmode=True)
wrapper.skip_cleanup = True
instances.append(wrapper)
gevent.sleep(11)
for i in range(0, n):
instances[i].shutdown_platform()
gevent.sleep(1)
# del instances[:]
for i in range(0, n):
address = vip_addresses.pop(0)
web_address = web_addresses.pop(0)
print address, web_address
instances[i].startup_platform(address, bind_web_address=web_address, instance_name=names[i])
instances[i].allow_all_connections()
gevent.sleep(11)
instances = instances if n > 1 else instances[0]
get_n_volttron_instances.instances = instances
return instances
return get_n_volttron_instances
@pytest.fixture(scope="module")
def build_instances(request):
""" Fixture to get more than 1 volttron instance for test
Use this fixture to get more than 1 volttron instance for test. This
returns a function object that should be called with number of instances
as parameter to get a list of volttron instnaces. The fixture also
takes care of shutting down all the instances at the end
Example Usage:
def test_function_that_uses_n_instances(get_volttron_instances):
instance1, instance2, instance3 = get_volttron_instances(3)
@param request: pytest request object
@return: function that can used to get any number of
volttron instances for testing.
"""
all_instances = []
def build_n_volttron_instances(n, bad_config=False, add_my_address=True):
build_n_volttron_instances.count = n
instances = []
vip_addresses = []
instances = []
addr_config = dict()
names = []
for i in range(0, n):
address = get_rand_vip()
vip_addresses.append(address)
nm = 'platform{}'.format(i + 1)
names.append(nm)
for i in range(0, n):
address = vip_addresses[i]
wrapper = PlatformWrapper()
wrapper.startup_platform(address, instance_name=names[i])
wrapper.skip_cleanup = True
instances.append(wrapper)
gevent.sleep(1)
for i in range(0, n):
instances[i].shutdown_platform()
for i in range(0, n):
addr_config.clear()
for j in range(0, n):
if j != i or (j==i and add_my_address):
name = names[j]
addr_config[name] = dict()
addr_config[name]['instance-name'] = names[j]
if bad_config:
addr_config[name]['vip-address123'] = vip_addresses[j]
else:
addr_config[name]['vip-address'] = vip_addresses[j]
addr_config[name]['serverkey'] = instances[j].serverkey
address_file = os.path.join(instances[i].volttron_home, 'external_platform_discovery.json')
if address_file:
with open(address_file, 'w') as f:
json.dump(addr_config, f)
gevent.sleep(1)
for i in range(0, n):
address = vip_addresses.pop(0)
instances[i].startup_platform(address, instance_name=names[i])
instances[i].allow_all_connections()
gevent.sleep(11)
instances = instances if n > 1 else instances[0]
build_n_volttron_instances.instances = instances
return instances
return build_n_volttron_instances
@pytest.fixture(scope="module")
def multi_platform_connection(request, get_volttron_instances):
"""
Adds the volttron-central-address and volttron-central-serverkey to the
main instance configuration file before starting the platform
"""
p1, p2, p3 = get_volttron_instances(3)
gevent.sleep(5)
# configure vc
agent1 = p1.build_agent()
agent2 = p2.build_agent()
agent3 = p3.build_agent()
def stop():
agent1.core.stop()
agent2.core.stop()
agent3.core.stop()
p1.shutdown_platform()
p2.shutdown_platform()
p3.shutdown_platform()
request.addfinalizer(stop)
return agent1, agent2, agent3
@pytest.fixture(scope="module")
def five_platform_connection(request, get_volttron_instances):
"""
Adds the volttron-central-address and volttron-central-serverkey to the
main instance configuration file before starting the platform
"""
p1, p2, p3, p4, p5 = get_volttron_instances(5)
gevent.sleep(5)
# configure vc
agent1 = p1.build_agent()
agent2 = p2.build_agent()
agent3 = p3.build_agent()
agent4 = p4.build_agent()
agent5 = p5.build_agent()
def stop():
agent1.core.stop()
agent2.core.stop()
agent3.core.stop()
agent4.core.stop()
agent5.core.stop()
p1.shutdown_platform()
p2.shutdown_platform()
p3.shutdown_platform()
p4.shutdown_platform()
p5.shutdown_platform()
request.addfinalizer(stop)
return agent1, agent2, agent3, agent4, agent5
def test_multiplatform_pubsub(request, multi_platform_connection):
p1_publisher, p2_listener, p3_listener = multi_platform_connection
def callback2(peer, sender, bus, topicdr, headers, message):
print message
assert message == [{'point': 'value'}]
def callback3(peer, sender, bus, topic, headers, message):
print message
def callback4(peer, sender, bus, topic, headers, message):
print message
def callback5(peer, sender, bus, topic, headers, message):
print message
p2_listener.vip.pubsub.subscribe(peer='pubsub',
prefix='devices',
callback=onmessage,
all_platforms=True)
gevent.sleep(2)
p3_listener.vip.pubsub.subscribe(peer='pubsub',
prefix='devices',
callback=onmessage)
print "publish"
prefix = 'devices'
for i in range(10):
p1_publisher.vip.pubsub.publish(peer='pubsub',
topic='devices/campus/building1',
message=[{'point': 'value'}])
# gevent.sleep(0.1)
poll_gevent_sleep(2, lambda: messages_contains_prefix(prefix,
subscription_results))
message = subscription_results['devices/campus/building1']['message']
assert message == [{'point': 'value'}]
gevent.sleep(5)
def test_multiplatform_2_publishers(request, five_platform_connection):
subscription_results2 = {}
subscription_results3 = {}
subscription_results4 = {}
subscription_results5 = {}
p1_publisher, p2_listener, p3_listener, p4_listener, p5_publisher = five_platform_connection
def callback2(peer, sender, bus, topic, headers, message):
subscription_results2[topic] = {'headers': headers, 'message': message}
print("platform2 sub results [{}] = {}".format(topic, subscription_results2[topic]))
def callback3(peer, sender, bus, topic, headers, message):
subscription_results3[topic] = {'headers': headers, 'message': message}
print("platform3 sub results [{}] = {}".format(topic, subscription_results3[topic]))
def callback4(peer, sender, bus, topic, headers, message):
subscription_results4[topic] = {'headers': headers, 'message': message}
print("platform4 sub results [{}] = {}".format(topic, subscription_results4[topic]))
def callback5(peer, sender, bus, topic, headers, message):
subscription_results5[topic] = {'headers': headers, 'message': message}
print("platform4 sub results [{}] = {}".format(topic, subscription_results5[topic]))
p2_listener.vip.pubsub.subscribe(peer='pubsub',
prefix='devices',
callback=callback2,
all_platforms=True)
p3_listener.vip.pubsub.subscribe(peer='pubsub',
prefix='devices',
callback=callback3,
all_platforms=True)
gevent.sleep(2)
p4_listener.vip.pubsub.subscribe(peer='pubsub',
prefix='analysis',
callback=callback4,
all_platforms=True)
p5_publisher.vip.pubsub.subscribe(peer='pubsub',
prefix='analysis',
callback=callback5)
gevent.sleep(2)
print "publish"
prefix = 'devices'
for i in range(5):
p1_publisher.vip.pubsub.publish(peer='pubsub', topic='devices/campus/building1', message=[{'point': 'value'}])
poll_gevent_sleep(1, lambda: messages_contains_prefix(prefix,
subscription_results2))
message = subscription_results2['devices/campus/building1']['message']
assert message == [{'point': 'value'}]
message = subscription_results3['devices/campus/building1']['message']
assert message == [{'point': 'value'}]
prefix = 'analysis'
for i in range(5):
p5_publisher.vip.pubsub.publish(peer='pubsub', topic='analysis/airside/campus/building1',
message=[{'result': 'pass'}])
# gevent.sleep(0.1)
poll_gevent_sleep(2, lambda: messages_contains_prefix(prefix,
subscription_results3))
message = subscription_results4['analysis/airside/campus/building1']['message']
assert message == [{'result': 'pass'}]
message = subscription_results5['analysis/airside/campus/building1']['message']
assert message == [{'result': 'pass'}]
def test_multiplatform_subscribe_unsubscribe(request, multi_platform_connection):
subscription_results2 = {}
subscription_results3 = {}
message_count = 0
p1_publisher, p2_listener, p3_listener = multi_platform_connection
def callback2(peer, sender, bus, topic, headers, message):
subscription_results2[topic] = {'headers': headers, 'message': message}
print("platform2 sub results [{}] = {}".format(topic, subscription_results2[topic]))
def callback3(peer, sender, bus, topic, headers, message):
subscription_results3[topic] = {'headers': headers, 'message': message}
print("platform3 sub results [{}] = {}".format(topic, subscription_results3[topic]))
p2_listener.vip.pubsub.subscribe(peer='pubsub',
prefix='devices',
callback=callback2,
all_platforms=True)
p3_listener.vip.pubsub.subscribe(peer='pubsub',
prefix='devices',
callback=callback3,
all_platforms=True)
gevent.sleep(2)
prefix = 'devices'
i = 0
for i in range(2):
p1_publisher.vip.pubsub.publish(peer='pubsub', topic='devices/campus/building1',
message=[{'point': 'value' + str(i)}])
gevent.sleep(0.3)
message = subscription_results2['devices/campus/building1']['message']
assert message == [{'point': 'value' + str(i)}]
message = subscription_results3['devices/campus/building1']['message']
assert message == [{'point': 'value' + str(i)}]
print "pass"
# Listener agent on platform 2 unsubscribes frm prefix='devices'
p2_listener.vip.pubsub.unsubscribe(peer='pubsub', prefix='devices', callback=callback2, all_platforms=True)
gevent.sleep(0.2)
p1_publisher.vip.pubsub.publish(peer='pubsub', topic='devices/campus/building1',
message=[{'point': 'value' + str(2)}])
gevent.sleep(0.4)
message = subscription_results2['devices/campus/building1']['message']
assert message == [{'point': 'value1'}]
gevent.sleep(0.4)
message = subscription_results3['devices/campus/building1']['message']
assert message == [{'point': 'value2'}]
def test_multiplatform_stop_subscriber(request, multi_platform_connection):
subscription_results2 = {}
subscription_results3 = {}
message_count = 0
p1_publisher, p2_listener, p3_listener = multi_platform_connection
def callback2(peer, sender, bus, topic, headers, message):
subscription_results2[topic] = {'headers': headers, 'message': message}
print("platform2 sub results [{}] = {}".format(topic, subscription_results2[topic]))
def callback3(peer, sender, bus, topic, headers, message):
subscription_results3[topic] = {'headers': headers, 'message': message}
print("platform3 sub results [{}] = {}".format(topic, subscription_results3[topic]))
p2_listener.vip.pubsub.subscribe(peer='pubsub',
prefix='devices',
callback=callback2,
all_platforms=True)
p3_listener.vip.pubsub.subscribe(peer='pubsub',
prefix='devices',
callback=callback3,
all_platforms=True)
gevent.sleep(2)
prefix = 'devices'
i = 0
for i in range(2):
p1_publisher.vip.pubsub.publish(peer='pubsub', topic='devices/campus/building1',
message=[{'point': 'value' + str(i)}])
gevent.sleep(0.3)
message = subscription_results2['devices/campus/building1']['message']
assert message == [{'point': 'value' + str(i)}]
message = subscription_results3['devices/campus/building1']['message']
assert message == [{'point': 'value' + str(i)}]
print "pass"
# Stop listener agent on platform 2
p2_listener.core.stop()
gevent.sleep(0.2)
p1_publisher.vip.pubsub.publish(peer='pubsub', topic='devices/campus/building1',
message=[{'point': 'value' + str(2)}])
gevent.sleep(0.4)
message = subscription_results2['devices/campus/building1']['message']
assert message == [{'point': 'value1'}]
gevent.sleep(0.4)
message = subscription_results3['devices/campus/building1']['message']
assert message == [{'point': 'value2'}]
def test_missing_address_file(request, get_volttron_instances):
p1 = get_volttron_instances(1, address_file=False)
gevent.sleep(1)
p1.shutdown_platform()
def test_multiplatform_without_setup_mode(request, build_instances):
subscription_results1 = {}
subscription_results3 = {}
p1, p2, p3 = build_instances(3)
gevent.sleep(1)
#Get three agents
agent1 = p1.build_agent(identity="agent1")
agent2 = p2.build_agent(identity="agent2")
agent3 = p2.build_agent(identity="agent3")
def stop():
agent1.core.stop()
agent2.core.stop()
agent3.core.stop()
p1.shutdown_platform()
p2.shutdown_platform()
p3.shutdown_platform()
request.addfinalizer(stop)
def callback1(peer, sender, bus, topic, headers, message):
subscription_results1[topic] = {'headers': headers, 'message': message}
print("platform2 sub results [{}] = {}".format(topic, subscription_results1[topic]))
def callback3(peer, sender, bus, topic, headers, message):
subscription_results3[topic] = {'headers': headers, 'message': message}
print("platform3 sub results [{}] = {}".format(topic, subscription_results3[topic]))
agent3.vip.pubsub.subscribe(peer='pubsub',
prefix='devices',
callback=callback3,
all_platforms=True)
gevent.sleep(0.2)
agent2.vip.pubsub.subscribe(peer='pubsub',
prefix='devices',
callback=callback1,
all_platforms=True)
gevent.sleep(1)
for i in range(0, 2):
agent1.vip.pubsub.publish(peer='pubsub', topic='devices/building1',
message=[{'point': 'value' + str(i)}])
gevent.sleep(1)
try:
message = subscription_results3['devices/building1']['message']
assert message == [{'point': 'value' + str(i)}]
message = subscription_results1['devices/building1']['message']
assert message == [{'point': 'value' + str(i)}]
except KeyError:
pass
def test_multiplatform_local_subscription(request, build_instances):
subscription_results1 = {}
p1 = build_instances(1, add_my_address=True)
gevent.sleep(1)
#Get twon agents
agent1 = p1.build_agent(identity="agent1")
agent2 = p1.build_agent(identity="agent2")
def stop():
agent1.core.stop()
agent2.core.stop()
p1.shutdown_platform()
request.addfinalizer(stop)
def callback1(peer, sender, bus, topic, headers, message):
global count
count += 1
subscription_results1[topic] = {'headers': headers, 'message': message, 'count': count}
print("platform2 sub results [{}] = {}".format(topic, subscription_results1[topic]))
agent1.vip.pubsub.subscribe(peer='pubsub',
prefix='devices',
callback=callback1,
all_platforms=True)
gevent.sleep(1)
for i in range(1, 5):
agent2.vip.pubsub.publish(peer='pubsub', topic='devices/building1',
message=[{'point': 'value' + str(i)}])
gevent.sleep(1)
try:
message = subscription_results1['devices/building1']['message']
assert message == [{'point': 'value' + str(i)}]
assert i == subscription_results1['devices/building1']['count']
except KeyError:
pass
def test_multiplatform_bad_discovery_file(request, build_instances):
p1, p2, p3 = build_instances(3, bad_config=True)
gevent.sleep(1)
p1.shutdown_platform()
p2.shutdown_platform()
p3.shutdown_platform()
def test_multiplatform_rpc(request, get_volttron_instances):
p1, p2 = get_volttron_instances(2)
_default_config = {
"test_max": {
"threshold_max": 10
}
}
threshold_detection_uuid = p1.install_agent(
agent_dir=get_ops("ThresholdDetectionAgent"),
config_file=_default_config,
start=True)
updated_config = {
"updated_topic": {
"threshold_max": 10,
"threshold_min": 2,
}
}
test_agent = p2.build_agent()
kwargs = {"external_platform": 'platform1'}
test_agent.vip.rpc.call(CONFIGURATION_STORE,
'manage_store',
'platform.thresholddetection',
'config',
json.dumps(updated_config),
'json',
**kwargs).get(timeout=10)
config = test_agent.vip.rpc.call(CONFIGURATION_STORE,
'manage_get',
'platform.thresholddetection',
'config',
raw=True,
**kwargs).get(timeout=10)
config = json.loads(config)
try:
assert config == updated_config
except KeyError:
pytest.fail("Expecting config change : {}".format(config))
def stop():
p1.stop_agent(threshold_detection_uuid)
p2.remove_agent(threshold_detection_uuid)
p1.shutdown_platform()
test_agent.core.stop()
p1.shutdown_platform()
request.addfinalizer(stop)
| 38.411168
| 118
| 0.597639
|
ed17febb1705a0cbae95977819fc6eed05cbd658
| 2,091
|
py
|
Python
|
examples/pie_and_polar_charts/pie_demo_features.py
|
argriffing/matplotlib
|
5555f5463fb5f995a59f7651c0034a5d6a4c7e84
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2019-04-15T09:40:53.000Z
|
2019-04-15T09:40:53.000Z
|
examples/pie_and_polar_charts/pie_demo_features.py
|
argriffing/matplotlib
|
5555f5463fb5f995a59f7651c0034a5d6a4c7e84
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2021-05-10T17:57:41.000Z
|
2021-07-26T16:23:09.000Z
|
examples/pie_and_polar_charts/pie_demo_features.py
|
kdavies4/matplotlib
|
330aefbd031ee227213afe655c5158320015d45b
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2015-12-21T07:24:54.000Z
|
2015-12-21T07:24:54.000Z
|
"""
Demo of a basic pie chart plus a few additional features.
In addition to the basic pie chart, this demo shows a few optional features:
* slice labels
* auto-labeling the percentage
* offsetting a slice with "explode"
* drop-shadow
* custom start angle
Note about the custom start angle:
The default ``startangle`` is 0, which would start the "Frogs" slice on the
positive x-axis. This example sets ``startangle = 90`` such that everything is
rotated counter-clockwise by 90 degrees, and the frog slice starts on the
positive y-axis.
"""
import matplotlib.pyplot as plt
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90)
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
fig = plt.figure()
ax = fig.gca()
import numpy as np
ax.pie(np.random.random(4), explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
radius=0.25, center=(0, 0), frame=True)
ax.pie(np.random.random(4), explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
radius=0.25, center=(1, 1), frame=True)
ax.pie(np.random.random(4), explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
radius=0.25, center=(0, 1), frame=True)
ax.pie(np.random.random(4), explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
radius=0.25, center=(1, 0), frame=True)
ax.set_xticks([0, 1])
ax.set_yticks([0, 1])
ax.set_xticklabels(["Sunny", "Cloudy"])
ax.set_yticklabels(["Dry", "Rainy"])
ax.set_xlim((-0.5, 1.5))
ax.set_ylim((-0.5, 1.5))
# Set aspect ratio to be equal so that pie is drawn as a circle.
ax.set_aspect('equal')
plt.show()
| 34.85
| 78
| 0.682927
|
31eafa257e592d661db68da53e8821c05e69b030
| 63
|
py
|
Python
|
src/efipy/__init__.py
|
LiorAvrahami/efipy
|
3ef07678f54a11d8d3fdd79eab8b493aba5f8aa3
|
[
"MIT"
] | 2
|
2021-09-12T09:42:51.000Z
|
2021-09-23T07:21:15.000Z
|
src/efipy/__init__.py
|
LiorAvrahami/efipy
|
3ef07678f54a11d8d3fdd79eab8b493aba5f8aa3
|
[
"MIT"
] | null | null | null |
src/efipy/__init__.py
|
LiorAvrahami/efipy
|
3ef07678f54a11d8d3fdd79eab8b493aba5f8aa3
|
[
"MIT"
] | null | null | null |
from ._efipy import run,inquire_output_path,inquire_input_path
| 31.5
| 62
| 0.888889
|
027b3e587c788eb54f87292689da65fe99b99bc3
| 3,400
|
py
|
Python
|
packages/jet_bridge_base/jet_bridge_base/fields/field.py
|
F2210/jet-bridge
|
72b1af5cd7df585a4026d65170d3607f8cdf6bea
|
[
"MIT"
] | null | null | null |
packages/jet_bridge_base/jet_bridge_base/fields/field.py
|
F2210/jet-bridge
|
72b1af5cd7df585a4026d65170d3607f8cdf6bea
|
[
"MIT"
] | null | null | null |
packages/jet_bridge_base/jet_bridge_base/fields/field.py
|
F2210/jet-bridge
|
72b1af5cd7df585a4026d65170d3607f8cdf6bea
|
[
"MIT"
] | null | null | null |
from collections import Mapping
from jet_bridge_base.exceptions.validation_error import ValidationError
class empty:
"""
This class is used to represent no data being provided for a given input
or output value.
It is required because `None` may be a valid input or output value.
"""
pass
class Field(object):
creation_counter = 0
field_name = None
field_error_messages = {
'required': 'this field is required',
'null': 'this field may not be null'
}
def __init__(self, *args, **kwargs):
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.required = kwargs.pop('required', True)
self.read_only = kwargs.pop('read_only', False)
self.write_only = kwargs.pop('write_only', False)
self.many = kwargs.pop('many', False)
self.allow_many = kwargs.pop('allow_many', False)
self.default = kwargs.pop('default', empty)
messages = {}
for cls in reversed(self.__class__.__mro__):
messages.update(getattr(cls, 'field_error_messages', {}))
self.error_messages = messages
def validate(self, value):
return value
def get_default(self):
if callable(self.default):
return self.default()
return self.default
def get_value(self, data):
try:
if isinstance(data, Mapping):
field_value = data[self.field_name]
else:
field_value = getattr(data, self.field_name)
except (KeyError, AttributeError):
if self.default is not empty:
return self.get_default()
else:
return empty
if not getattr(self, 'many', False) and not getattr(self, 'allow_many', False) and isinstance(field_value, list):
field_value = field_value[0]
if isinstance(field_value, bytes):
field_value = field_value.decode('utf8')
return field_value
def run_validation(self, value):
if value is empty:
if self.required:
# raise ValidationError('Field is required')
self.error('required')
else:
return None
return self.to_internal_value(value)
def to_internal_value_item(self, value):
raise NotImplementedError
def to_internal_value(self, value):
if self.many:
if value is empty:
return []
return list(map(lambda x: self.to_internal_value_item(x), value))
else:
return self.to_internal_value_item(value)
def to_representation_item(self, value):
raise NotImplementedError
def to_representation(self, value):
if self.many:
return list(map(lambda x: self.to_representation_item(x), value or []))
else:
return self.to_representation_item(value)
def error(self, key, **kwargs):
"""
A helper method that simply raises a validation error.
"""
try:
msg = self.error_messages[key]
except KeyError:
class_name = self.__class__.__name__
raise AssertionError('Error with key={} is not found for class={}'.format(key, class_name))
message_string = msg.format(**kwargs)
raise ValidationError(message_string, code=key)
| 31.481481
| 121
| 0.610882
|
97e30983ec9efe953a32bf83ee1b7c9901d2d62c
| 951
|
py
|
Python
|
stdlibs/__init__.py
|
thatch/stdlibs
|
90a1759562ba7226ccefdedf1887146c21592ab4
|
[
"MIT"
] | 5
|
2021-03-26T03:01:35.000Z
|
2022-01-21T18:00:46.000Z
|
stdlibs/__init__.py
|
thatch/stdlibs
|
90a1759562ba7226ccefdedf1887146c21592ab4
|
[
"MIT"
] | 2
|
2021-03-29T05:40:09.000Z
|
2022-03-01T17:05:31.000Z
|
stdlibs/__init__.py
|
thatch/stdlibs
|
90a1759562ba7226ccefdedf1887146c21592ab4
|
[
"MIT"
] | 2
|
2021-03-28T18:29:19.000Z
|
2022-01-27T13:32:38.000Z
|
# Copyright 2021 John Reese
# Licensed under the MIT license
"""
List of packages in the stdlib
"""
__author__ = "John Reese"
import importlib
import sys
from typing import FrozenSet, Optional
from .__version__ import __version__
from .py3 import module_names
ALL = "all"
KNOWN_VERSIONS = [
"2.3",
"2.4",
"2.5",
"2.6",
"2.7",
"3.0",
"3.1",
"3.2",
"3.3",
"3.4",
"3.5",
"3.6",
"3.7",
"3.8",
"3.9",
"3.10",
]
def stdlib_module_names(version: Optional[str] = None) -> FrozenSet[str]:
if version is None:
version = "%d%d" % sys.version_info[:2]
modname = f".py{version}"
elif version == ALL:
modname = ".py"
else:
version = "".join(version.split(".")[:2])
modname = f".py{version}"
return importlib.import_module(modname, __package__).module_names # type: ignore
__all__ = [
"stdlib_module_names",
"module_names",
]
| 16.982143
| 85
| 0.574132
|
219e16c28cd93985ea7c12ab9d871e04794a67e8
| 19,725
|
py
|
Python
|
apivideo/api/raw_statistics_api.py
|
apivideo/api.video-python
|
7c5c70a9a638c2c1d3af18aabb09dda7b4db32a4
|
[
"MIT"
] | 6
|
2021-05-20T08:51:27.000Z
|
2021-10-07T16:04:32.000Z
|
apivideo/api/raw_statistics_api.py
|
apivideo/python-api-client
|
9de1127aee8ed36c42084357bfa3cda54110554a
|
[
"MIT"
] | 1
|
2022-03-21T17:15:29.000Z
|
2022-03-22T09:10:10.000Z
|
apivideo/api/raw_statistics_api.py
|
apivideo/python-api-client
|
9de1127aee8ed36c42084357bfa3cda54110554a
|
[
"MIT"
] | 1
|
2022-03-01T08:58:02.000Z
|
2022-03-01T08:58:02.000Z
|
"""
api.video
api.video is an API that encodes on the go to facilitate immediate playback, enhancing viewer streaming experiences across multiple devices and platforms. You can stream live or on-demand online videos within minutes. # noqa: E501
Contact: ecosystem@api.video
"""
import os # noqa: F401
import re # noqa: F401
import sys # noqa: F401
from types import MethodType
from types import FunctionType
from apivideo.api_client import ApiClient
from apivideo.endpoint import EndPoint as _EndPoint, ChunkIO
from apivideo.model.video_id import VideoId
from apivideo.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from apivideo.exceptions import ApiTypeError, ApiValueError
from apivideo.model.not_found import NotFound
from apivideo.model.raw_statistics_list_live_stream_analytics_response import RawStatisticsListLiveStreamAnalyticsResponse
from apivideo.model.raw_statistics_list_player_session_events_response import RawStatisticsListPlayerSessionEventsResponse
from apivideo.model.raw_statistics_list_sessions_response import RawStatisticsListSessionsResponse
class RawStatisticsApi(_EndPoint):
def list_live_stream_sessions(
self,
live_stream_id,
**kwargs
):
"""List live stream player sessions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_live_stream_sessions(live_stream_id, async_req=True)
>>> result = thread.get()
Args:
live_stream_id (str): The unique identifier for the live stream you want to retrieve analytics for.
Keyword Args:
period (str): Period must have one of the following formats: - For a day : \"2018-01-01\", - For a week: \"2018-W01\", - For a month: \"2018-01\" - For a year: \"2018\" For a range period: - Date range: \"2018-01-01/2018-01-15\" . [optional]
current_page (int): Choose the number of search results to return per page. Minimum value: 1. [optional] if omitted the server will use the default value of 1
page_size (int): Results per page. Allowed values 1-100, default is 25.. [optional] if omitted the server will use the default value of 25
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
async_req (bool): execute request asynchronously
Returns:
RawStatisticsListLiveStreamAnalyticsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['live_stream_id'] = \
live_stream_id
params_map = {
'all': [
'live_stream_id',
'period',
'current_page',
'page_size',
'async_req',
'_preload_content',
'_request_timeout',
'_return_http_data_only'
],
'required': [
'live_stream_id',
],
'nullable': [
'_request_timeout'
],
'enum': [
],
'validation': [
]
}
validations = {
}
allowed_values = {
}
openapi_types = {
'live_stream_id':
(str,),
'period':
(str,),
'current_page':
(int,),
'page_size':
(int,),
'async_req': (bool,),
'_preload_content': (bool,),
'_request_timeout': (none_type, int, (int,), [int]),
'_return_http_data_only': (bool,)
}
attribute_map = {
'live_stream_id': 'liveStreamId',
'period': 'period',
'current_page': 'currentPage',
'page_size': 'pageSize',
}
location_map = {
'live_stream_id': 'path',
'period': 'query',
'current_page': 'query',
'page_size': 'query',
}
collection_format_map = {
}
for key, value in kwargs.items():
if key not in params_map['all']:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `list_live_stream_sessions`" %
(key, )
)
if (key not in params_map['nullable'] and value is None):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `list_live_stream_sessions`" %
(key, )
)
for key in params_map['required']:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`list_live_stream_sessions`" % (key, )
)
self._validate_inputs(kwargs, params_map, allowed_values, validations, openapi_types)
params = self._gather_params(kwargs, location_map, attribute_map, openapi_types, collection_format_map)
return self.api_client.call_api(
"/analytics/live-streams/{liveStreamId}",
"GET",
params['path'],
params['query'],
params['header'],
body=params['body'],
post_params=params['form'],
files=params['file'],
response_type=(RawStatisticsListLiveStreamAnalyticsResponse,),
async_req=kwargs['async_req'],
_return_http_data_only=kwargs['_return_http_data_only'],
_preload_content=kwargs['_preload_content'],
_request_timeout=kwargs['_request_timeout'],
collection_formats=params['collection_format'])
def list_session_events(
self,
session_id,
**kwargs
):
"""List player session events # noqa: E501
Useful to track and measure video's engagement. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_session_events(session_id, async_req=True)
>>> result = thread.get()
Args:
session_id (str): A unique identifier you can use to reference and track a session with.
Keyword Args:
current_page (int): Choose the number of search results to return per page. Minimum value: 1. [optional] if omitted the server will use the default value of 1
page_size (int): Results per page. Allowed values 1-100, default is 25.. [optional] if omitted the server will use the default value of 25
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
async_req (bool): execute request asynchronously
Returns:
RawStatisticsListPlayerSessionEventsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['session_id'] = \
session_id
params_map = {
'all': [
'session_id',
'current_page',
'page_size',
'async_req',
'_preload_content',
'_request_timeout',
'_return_http_data_only'
],
'required': [
'session_id',
],
'nullable': [
'_request_timeout'
],
'enum': [
],
'validation': [
]
}
validations = {
}
allowed_values = {
}
openapi_types = {
'session_id':
(str,),
'current_page':
(int,),
'page_size':
(int,),
'async_req': (bool,),
'_preload_content': (bool,),
'_request_timeout': (none_type, int, (int,), [int]),
'_return_http_data_only': (bool,)
}
attribute_map = {
'session_id': 'sessionId',
'current_page': 'currentPage',
'page_size': 'pageSize',
}
location_map = {
'session_id': 'path',
'current_page': 'query',
'page_size': 'query',
}
collection_format_map = {
}
for key, value in kwargs.items():
if key not in params_map['all']:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `list_session_events`" %
(key, )
)
if (key not in params_map['nullable'] and value is None):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `list_session_events`" %
(key, )
)
for key in params_map['required']:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`list_session_events`" % (key, )
)
self._validate_inputs(kwargs, params_map, allowed_values, validations, openapi_types)
params = self._gather_params(kwargs, location_map, attribute_map, openapi_types, collection_format_map)
return self.api_client.call_api(
"/analytics/sessions/{sessionId}/events",
"GET",
params['path'],
params['query'],
params['header'],
body=params['body'],
post_params=params['form'],
files=params['file'],
response_type=(RawStatisticsListPlayerSessionEventsResponse,),
async_req=kwargs['async_req'],
_return_http_data_only=kwargs['_return_http_data_only'],
_preload_content=kwargs['_preload_content'],
_request_timeout=kwargs['_request_timeout'],
collection_formats=params['collection_format'])
def list_video_sessions(
self,
video_id,
**kwargs
):
"""List video player sessions # noqa: E501
Retrieve all available user sessions for a specific video. Tutorials that use the [analytics endpoint](https://api.video/blog/endpoints/analytics). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_video_sessions(video_id, async_req=True)
>>> result = thread.get()
Args:
video_id (str): The unique identifier for the video you want to retrieve session information for.
Keyword Args:
period (str): Period must have one of the following formats: - For a day : 2018-01-01, - For a week: 2018-W01, - For a month: 2018-01 - For a year: 2018 For a range period: - Date range: 2018-01-01/2018-01-15 . [optional]
metadata ({str: (str,)}): Metadata and [Dynamic Metadata](https://api.video/blog/endpoints/dynamic-metadata) filter. Send an array of key value pairs you want to filter sessios with.. [optional]
current_page (int): Choose the number of search results to return per page. Minimum value: 1. [optional] if omitted the server will use the default value of 1
page_size (int): Results per page. Allowed values 1-100, default is 25.. [optional] if omitted the server will use the default value of 25
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
async_req (bool): execute request asynchronously
Returns:
RawStatisticsListSessionsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['video_id'] = \
video_id
params_map = {
'all': [
'video_id',
'period',
'metadata',
'current_page',
'page_size',
'async_req',
'_preload_content',
'_request_timeout',
'_return_http_data_only'
],
'required': [
'video_id',
],
'nullable': [
'_request_timeout'
],
'enum': [
],
'validation': [
]
}
validations = {
}
allowed_values = {
}
openapi_types = {
'video_id':
(str,),
'period':
(str,),
'metadata':
({str: (str,)},),
'current_page':
(int,),
'page_size':
(int,),
'async_req': (bool,),
'_preload_content': (bool,),
'_request_timeout': (none_type, int, (int,), [int]),
'_return_http_data_only': (bool,)
}
attribute_map = {
'video_id': 'videoId',
'period': 'period',
'metadata': 'metadata',
'current_page': 'currentPage',
'page_size': 'pageSize',
}
location_map = {
'video_id': 'path',
'period': 'query',
'metadata': 'query',
'current_page': 'query',
'page_size': 'query',
}
collection_format_map = {
'metadata': 'deepObject',
}
for key, value in kwargs.items():
if key not in params_map['all']:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `list_video_sessions`" %
(key, )
)
if (key not in params_map['nullable'] and value is None):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `list_video_sessions`" %
(key, )
)
for key in params_map['required']:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`list_video_sessions`" % (key, )
)
self._validate_inputs(kwargs, params_map, allowed_values, validations, openapi_types)
params = self._gather_params(kwargs, location_map, attribute_map, openapi_types, collection_format_map)
return self.api_client.call_api(
"/analytics/videos/{videoId}",
"GET",
params['path'],
params['query'],
params['header'],
body=params['body'],
post_params=params['form'],
files=params['file'],
response_type=(RawStatisticsListSessionsResponse,),
async_req=kwargs['async_req'],
_return_http_data_only=kwargs['_return_http_data_only'],
_preload_content=kwargs['_preload_content'],
_request_timeout=kwargs['_request_timeout'],
collection_formats=params['collection_format'])
| 41.613924
| 261
| 0.505095
|
afcef3e90855f4d6f0776e86045a4004882e6e71
| 2,123
|
py
|
Python
|
lizard_auth_server/migrations/0004_auto_20160818_1641.py
|
lisannewapstra/lizard-auth-server
|
3824edfaedd01caff5eb84bbcb9557ccfec2371a
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2019-02-21T02:12:04.000Z
|
2019-02-21T02:12:04.000Z
|
lizard_auth_server/migrations/0004_auto_20160818_1641.py
|
lisannewapstra/lizard-auth-server
|
3824edfaedd01caff5eb84bbcb9557ccfec2371a
|
[
"MIT",
"BSD-3-Clause"
] | 88
|
2015-04-23T15:37:17.000Z
|
2021-02-18T15:28:32.000Z
|
lizard_auth_server/migrations/0004_auto_20160818_1641.py
|
lisannewapstra/lizard-auth-server
|
3824edfaedd01caff5eb84bbcb9557ccfec2371a
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2018-04-24T08:48:35.000Z
|
2021-02-17T10:18:26.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-18 14:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("lizard_auth_server", "0003_auto_20160818_1631"),
]
operations = [
migrations.AlterModelOptions(
name="invitation",
options={
"ordering": ["is_activated", "-created_at", "email"],
"verbose_name": "(invitation)",
"verbose_name_plural": "(invitation)",
},
),
migrations.AlterModelOptions(
name="organisation",
options={
"ordering": ["name"],
"verbose_name": "(organisation)",
"verbose_name_plural": "(organisations)",
},
),
migrations.AlterModelOptions(
name="organisationrole",
options={
"verbose_name": "(organisation-role-mapping)",
"verbose_name_plural": "(organisation-role-mappings)",
},
),
migrations.AlterModelOptions(
name="portal",
options={
"ordering": ("name",),
"verbose_name": "(portal)",
"verbose_name_plural": "(portals)",
},
),
migrations.AlterModelOptions(
name="role",
options={
"ordering": ["portal", "name"],
"verbose_name": "(role)",
"verbose_name_plural": "(roles)",
},
),
migrations.AlterModelOptions(
name="token",
options={
"ordering": ("-created",),
"verbose_name": "(authentication token)",
"verbose_name_plural": "(authentication tokens)",
},
),
migrations.AlterModelOptions(
name="userprofile",
options={
"ordering": ["user__username"],
"verbose_name": "(user profile)",
"verbose_name_plural": "(user profiles)",
},
),
]
| 30.768116
| 70
| 0.473387
|
242fb29151b8c84538b59b342c75a728a883db8f
| 1,961
|
py
|
Python
|
LASCAD/experiments/expr1_classification.py
|
NiSE-Virginia-Tech/doaa-altarawy-LASCAD
|
d44c9606c4d9a36979195de811575b74648006d5
|
[
"BSD-3-Clause"
] | 7
|
2018-01-30T21:35:20.000Z
|
2022-03-18T02:34:28.000Z
|
LASCAD/experiments/expr1_classification.py
|
NiSE-Virginia-Tech/doaa-altarawy-LASCAD
|
d44c9606c4d9a36979195de811575b74648006d5
|
[
"BSD-3-Clause"
] | null | null | null |
LASCAD/experiments/expr1_classification.py
|
NiSE-Virginia-Tech/doaa-altarawy-LASCAD
|
d44c9606c4d9a36979195de811575b74648006d5
|
[
"BSD-3-Clause"
] | 4
|
2019-08-03T00:03:53.000Z
|
2020-09-09T19:35:19.000Z
|
import pandas as pd
import numpy as np
import traceback
from LASCAD.LDA.Clustering import testClustering
import os
results = pd.DataFrame(columns=['Dataset', 'n_clusters', 'NUM_TOPICS', 'max_df', 'min_df',
'precision', 'recall', 'f-score'])
i = 0
dataset = 'showcase_noStem2'
method = 'LASCAD'
# method = 'LACT'
for n_clusters in range(20, 120, 5):
print('n_clusters', n_clusters)
for NUM_TOPICS in range(20, 100, 10):
for max_df in [.8]:
for min_df in [.2]:
print('{}- Running: NUM_TOPICS={}, max_df={}, min_df={}, test={}'
.format(i, NUM_TOPICS, max_df, min_df, dataset))
try:
n_clusters_ = n_clusters
score, n_clusters_ = testClustering(NUM_TOPICS=NUM_TOPICS, max_df=max_df,
min_df=min_df, dataset=dataset,
verbose=False,
plot_heatmap=False,
categ_method=method,
n_clusters=n_clusters
)
score = np.round(np.array(score)*100., 2)
results.loc[i] = [dataset, n_clusters_, NUM_TOPICS, max_df, min_df,
score[0], score[1], score[2]]
results.to_csv(os.path.join('..', 'results', 'categorization_accuracy',
method + '_accuracy_scores_' + dataset + '.csv'))
i += 1
except:
print('n_clusters={}, NUM_TOPICS={}, max_df={}, min_df={}, test={} ..... failed'
.format(n_clusters_, NUM_TOPICS, max_df, min_df, dataset))
traceback.print_exc()
print('Done......')
print(results)
| 42.630435
| 100
| 0.462519
|
82b494cf5fd8b5ea40fc520a40fd4f7b0e526b1c
| 20,809
|
py
|
Python
|
trainval.py
|
masami87/Human-Pose-Estimation-3D
|
3118cb9176118dcad9667f24c765e6c8a56cd69c
|
[
"MIT"
] | null | null | null |
trainval.py
|
masami87/Human-Pose-Estimation-3D
|
3118cb9176118dcad9667f24c765e6c8a56cd69c
|
[
"MIT"
] | null | null | null |
trainval.py
|
masami87/Human-Pose-Estimation-3D
|
3118cb9176118dcad9667f24c765e6c8a56cd69c
|
[
"MIT"
] | null | null | null |
import os
from alive_progress import alive_bar
import numpy as np
import torch
from common.camera import normalize_screen_coordinates, world_to_camera
from common.loss import mpjpe, p_mpjpe
from common.utils import deterministic_random
from model.VideoPose3D import TemporalModel, TemporalModelOptimized1f
def load_dataset(data_dir: str, dataset_type: str, keypoints_type: str):
print('Loading dataset...')
dataset_path = data_dir + 'data_3d_' + dataset_type + '.npz'
if dataset_type == "h36m":
from datasets.h36m import Human36mDataset
dataset = Human36mDataset(dataset_path)
else:
raise KeyError('Invalid dataset')
print('Preparing data')
# TODO ?
for subject in dataset.subjects():
for action in dataset[subject].keys():
anim = dataset[subject][action]
if 'positions' in anim:
positions_3d = []
for cam in anim['cameras']:
pos_3d = world_to_camera(
anim['positions'], R=cam['orientation'], t=cam['translation'])
# Remove global offset, but keep trajectory in first position
pos_3d[:, 1:] -= pos_3d[:, :1]
positions_3d.append(pos_3d)
anim['positions_3d'] = positions_3d
print('Loading 2D detections...')
keypoints = np.load(data_dir + 'data_2d_' + dataset_type +
'_' + keypoints_type + '.npz', allow_pickle=True)
keypoints_metadata = keypoints['metadata'].item()
keypoints_symmetry = keypoints_metadata['keypoints_symmetry']
kps_left, kps_right = list(
keypoints_symmetry[0]), list(keypoints_symmetry[1])
joints_left, joints_right = list(dataset.skeleton().joints_left()), list(
dataset.skeleton().joints_right())
keypoints = keypoints['positions_2d'].item()
for subject in dataset.subjects():
assert subject in keypoints, 'Subject {} is missing from the 2D detections dataset'.format(
subject)
for action in dataset[subject].keys():
assert action in keypoints[subject], 'Action {} of subject {} is missing from the 2D detections dataset'.format(
action, subject)
if 'positions_3d' not in dataset[subject][action]:
continue
for cam_idx in range(len(keypoints[subject][action])):
# We check for >= instead of == because some videos in H3.6M contain extra frames
mocap_length = dataset[subject][action]['positions_3d'][cam_idx].shape[0]
assert keypoints[subject][action][cam_idx].shape[0] >= mocap_length
if keypoints[subject][action][cam_idx].shape[0] > mocap_length:
# Shorten sequence
keypoints[subject][action][cam_idx] = keypoints[subject][action][cam_idx][:mocap_length]
assert len(keypoints[subject][action]) == len(
dataset[subject][action]['positions_3d'])
for subject in keypoints.keys():
for action in keypoints[subject]:
for cam_idx, kps in enumerate(keypoints[subject][action]):
# Normalize camera frame
cam = dataset.cameras()[subject][cam_idx]
kps[..., :2] = normalize_screen_coordinates(
kps[..., :2], w=cam['res_w'], h=cam['res_h'])
keypoints[subject][action][cam_idx] = kps
return dataset, keypoints, keypoints_metadata, kps_left, kps_right, joints_left, joints_right
def load_dataset_ntu(data_dir: str, dataset_type: str, keypoints_type: str, use_depth: bool):
print('Loading dataset...')
dataset_path = data_dir + 'data_3d_' + dataset_type + '.npz'
if dataset_type == "ntu":
from datasets.ntu_rgbd import NTU_RGBD
dataset = NTU_RGBD(dataset_path)
else:
raise KeyError('Invalid dataset')
print('Preparing data NTU')
for subject in dataset.subjects():
for action in dataset[subject].keys():
anim = dataset[subject][action]
positions_3d = []
for cam in anim.keys():
for seg in anim[cam].keys():
pos_3d = anim[cam][seg]
pos_3d[:, 1:] -= pos_3d[:, :1]
positions_3d.append(pos_3d)
anim['positions_3d'] = positions_3d
print('Loading 2D detections...')
keypoints = np.load(data_dir + 'data_2d_' + dataset_type +
'_' + keypoints_type + '.npz', allow_pickle=True)
# keypoints_metadata = keypoints['metadata'].item()
# keypoints_metadata = keypoints_metadata['keypoints_symmetry']
kps_left, kps_right = list(dataset.skeleton().joints_left()), list(
dataset.skeleton().joints_right())
keypoints_metadata = [kps_left, kps_right] # not use
joints_left, joints_right = list(dataset.skeleton().joints_left()), list(
dataset.skeleton().joints_right())
keypoints = keypoints['positions_2d'].item()
depth_vecs = {}
if use_depth:
print("Loading depth vec...")
depth_vecs = np.load(data_dir+'data_dep'+'_'+dataset_type
+ '.npz', allow_pickle=True)
depth_vecs = depth_vecs['depths'].item()
valid_indexes = dataset.valid_indexes()
for subject in dataset.subjects():
assert subject in keypoints, 'Subject {} is missing from the 2D detections dataset'.format(
subject)
for action in dataset[subject].keys():
assert action in keypoints[subject], 'Action {} of subject {} is missing from the 2D detections dataset'.format(
action, subject)
if 'positions_3d' not in dataset[subject][action]:
continue
keypoints_2d = []
for cam in keypoints[subject][action].keys():
for seg in keypoints[subject][action][cam].keys():
kpt_2d = keypoints[subject][action][cam][seg][:,
valid_indexes]
if use_depth:
d_vec = depth_vecs[subject][action][cam][seg][:,
valid_indexes]
kpt_2d = np.concatenate((kpt_2d, d_vec), -1)
assert kpt_2d.shape[-1] == 3
keypoints_2d.append(kpt_2d)
keypoints[subject][action] = keypoints_2d
assert len(keypoints[subject][action]) == len(
dataset[subject][action]['positions_3d'])
for subject in keypoints.keys():
for action in keypoints[subject]:
for seg_idx, kps in enumerate(keypoints[subject][action]):
# Normalize camera frame
kps[..., :2] = normalize_screen_coordinates(
kps[..., :2], w=1920, h=1080)
if use_depth:
assert kps.shape[-1] == 3, "No depth dimentions with tensor shape: {}".format(
kps.shape)
kps[..., 2] = kps[..., 2] / 20.0 # TODO: better norm
keypoints[subject][action][seg_idx] = kps
return dataset, keypoints, keypoints_metadata, kps_left, kps_right, joints_left, joints_right
def fetch(subjects, dataset, keypoints, action_filter=None, downsample=5, subset=1, parse_3d_poses=True):
out_poses_3d = []
out_poses_2d = []
out_camera_params = []
for subject in subjects:
for action in keypoints[subject].keys():
if action_filter is not None:
found = False
for a in action_filter:
if action.startswith(a):
found = True
break
if not found:
continue
poses_2d = keypoints[subject][action]
for i in range(len(poses_2d)): # Iterate across cameras
out_poses_2d.append(poses_2d[i])
if subject in dataset.cameras():
cams = dataset.cameras()[subject]
assert len(cams) == len(poses_2d), 'Camera count mismatch'
for cam in cams:
if 'intrinsic' in cam:
out_camera_params.append(cam['intrinsic'])
if parse_3d_poses and 'positions_3d' in dataset[subject][action]:
poses_3d = dataset[subject][action]['positions_3d']
assert len(poses_3d) == len(poses_2d), 'Camera count mismatch'
for i in range(len(poses_3d)): # Iterate across cameras
out_poses_3d.append(poses_3d[i])
if len(out_camera_params) == 0:
out_camera_params = None
if len(out_poses_3d) == 0:
out_poses_3d = None
stride = downsample
if subset < 1:
for i in range(len(out_poses_2d)):
n_frames = int(round(len(out_poses_2d[i])//stride * subset)*stride)
start = deterministic_random(
0, len(out_poses_2d[i]) - n_frames + 1, str(len(out_poses_2d[i])))
out_poses_2d[i] = out_poses_2d[i][start:start+n_frames:stride]
if out_poses_3d is not None:
out_poses_3d[i] = out_poses_3d[i][start:start+n_frames:stride]
elif stride > 1:
# Downsample as requested
for i in range(len(out_poses_2d)):
out_poses_2d[i] = out_poses_2d[i][::stride]
if out_poses_3d is not None:
out_poses_3d[i] = out_poses_3d[i][::stride]
return out_camera_params, out_poses_3d, out_poses_2d
def fetch_ntu(subjects, dataset, keypoints, action_filter=None, downsample=5, subset=1, parse_3d_poses=True):
out_poses_3d = []
out_poses_2d = []
out_camera_params = []
for subject in subjects:
for action in keypoints[subject].keys():
if action_filter is not None:
found = False
for a in action_filter:
if action.startswith(a):
found = True
break
if not found:
continue
poses_2d = keypoints[subject][action]
for i in range(len(poses_2d)): # Iterate across segs
out_poses_2d.append(poses_2d[i])
if parse_3d_poses and 'positions_3d' in dataset[subject][action]:
poses_3d = dataset[subject][action]['positions_3d']
assert len(poses_3d) == len(poses_2d), 'seg count mismatch'
for i in range(len(poses_3d)): # Iterate across cameras
out_poses_3d.append(poses_3d[i])
if len(out_camera_params) == 0:
out_camera_params = None
if len(out_poses_3d) == 0:
out_poses_3d = None
stride = downsample
if subset < 1:
for i in range(len(out_poses_2d)):
n_frames = int(round(len(out_poses_2d[i])//stride * subset)*stride)
start = deterministic_random(
0, len(out_poses_2d[i]) - n_frames + 1, str(len(out_poses_2d[i])))
out_poses_2d[i] = out_poses_2d[i][start:start+n_frames:stride]
if out_poses_3d is not None:
out_poses_3d[i] = out_poses_3d[i][start:start+n_frames:stride]
elif stride > 1:
# Downsample as requested
for i in range(len(out_poses_2d)):
out_poses_2d[i] = out_poses_2d[i][::stride]
if out_poses_3d is not None:
out_poses_3d[i] = out_poses_3d[i][::stride]
return out_camera_params, out_poses_3d, out_poses_2d
def create_model(cfg, dataset, poses_valid_2d):
filter_widths = [int(x) for x in cfg.architecture.split(",")]
if not cfg.disable_optimizations and not cfg.dense and cfg.stride == 1:
# Use optimized model for single-frame predictions
model_pos_train = TemporalModelOptimized1f(poses_valid_2d[0].shape[-2], poses_valid_2d[0].shape[-1], dataset.skeleton().num_joints(),
filter_widths=filter_widths, causal=cfg.causal, dropout=cfg.dropout, channels=cfg.channels)
else:
# When incompatible settings are detected (stride > 1, dense filters, or disabled optimization) fall back to normal model
model_pos_train = TemporalModel(poses_valid_2d[0].shape[-2], poses_valid_2d[0].shape[-1], dataset.skeleton().num_joints(),
filter_widths=filter_widths, causal=cfg.causal, dropout=cfg.dropout, channels=cfg.channels,
dense=cfg.dense)
model_pos = TemporalModel(poses_valid_2d[0].shape[-2], poses_valid_2d[0].shape[-1], dataset.skeleton().num_joints(),
filter_widths=filter_widths, causal=cfg.causal, dropout=cfg.dropout, channels=cfg.channels,
dense=cfg.dense)
receptive_field = model_pos.receptive_field()
pad = (receptive_field - 1) // 2 # padding on each side
if cfg.causal:
causal_shift = pad
else:
causal_shift = 0
return model_pos_train, model_pos, pad, causal_shift
def load_weight(cfg, model_pos_train, model_pos):
checkpoint = dict()
if cfg.resume or cfg.evaluate:
chk_filename = os.path.join(
cfg.checkpoint, cfg.resume if cfg.resume else cfg.evaluate)
print("Loading checkpoint", chk_filename)
checkpoint = torch.load(chk_filename)
# print("This model was trained for {} epochs".format(checkpoint["epoch"]))
model_pos_train.load_state_dict(checkpoint["model_pos"])
model_pos.load_state_dict(checkpoint["model_pos"])
return model_pos_train, model_pos, checkpoint
def train(accelerator, model_pos_train, train_loader, optimizer):
epoch_loss_3d_train = 0
N = 0
# TODO dataloader and tqdm
total = len(train_loader)
with alive_bar(total, title='Train', spinner='elements') as bar:
for batch_data in train_loader:
inputs_3d, inputs_2d = batch_data[-2], batch_data[-1]
inputs_3d[:, :, 0] = 0
optimizer.zero_grad()
# Predict 3D poses
predicted_3d_pos = model_pos_train(inputs_2d)
loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d)
epoch_loss_3d_train += inputs_3d.shape[0] * \
inputs_3d.shape[1] * loss_3d_pos.item()
N += inputs_3d.shape[0] * inputs_3d.shape[1]
loss_total = loss_3d_pos
# accelerator backward
accelerator.backward(loss_total)
optimizer.step()
bar()
epoch_losses_eva = epoch_loss_3d_train / N
return epoch_losses_eva
def eval(model_train_dict, model_pos, test_loader, train_loader_eval):
N = 0
epoch_loss_3d_valid = 0
epoch_loss_3d_train_eval = 0
with torch.no_grad():
model_pos.load_state_dict(model_train_dict)
model_pos.eval()
# Evaluate on test set
total_test = len(test_loader)
with alive_bar(total_test, title='Test ', spinner='flowers') as bar:
for batch_data in test_loader:
inputs_3d, inputs_2d = batch_data[-2], batch_data[-1]
inputs_3d[:, :, 0] = 0
# Predict 3D poses
predicted_3d_pos = model_pos(inputs_2d)
loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d)
epoch_loss_3d_valid += inputs_3d.shape[0] * \
inputs_3d.shape[1] * loss_3d_pos.item()
N += inputs_3d.shape[0] * inputs_3d.shape[1]
bar()
losses_3d_valid_ave = epoch_loss_3d_valid / N
# Evaluate on training set, this time in evaluation mode
N = 0
total_eval = len(train_loader_eval)
with alive_bar(total_eval, title='Eval ', spinner='flowers') as bar:
for batch_data in train_loader_eval:
inputs_3d, inputs_2d = batch_data[-2], batch_data[-1]
if inputs_2d.shape[1] == 0:
# This happens only when downsampling the dataset
continue
inputs_3d[:, :, 0] = 0
# Compute 3D poses
predicted_3d_pos = model_pos(inputs_2d)
loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d)
epoch_loss_3d_train_eval += inputs_3d.shape[0] * \
inputs_3d.shape[1] * loss_3d_pos.item()
N += inputs_3d.shape[0] * inputs_3d.shape[1]
bar()
losses_3d_train_eval_ave = epoch_loss_3d_train_eval / N
return losses_3d_valid_ave, losses_3d_train_eval_ave
def prepare_actions(subjects_test, dataset):
all_actions = {}
all_actions_by_subject = {}
for subject in subjects_test:
if subject not in all_actions_by_subject:
all_actions_by_subject[subject] = {}
for action in dataset[subject].keys():
action_name = action.split(' ')[0]
if action_name not in all_actions:
all_actions[action_name] = []
if action_name not in all_actions_by_subject[subject]:
all_actions_by_subject[subject][action_name] = []
all_actions[action_name].append((subject, action))
all_actions_by_subject[subject][action_name].append(
(subject, action))
return all_actions, all_actions_by_subject
def fetch_actions(actions, keypoints, dataset, downsample=1):
out_poses_3d = []
out_poses_2d = []
for subject, action in actions:
poses_2d = keypoints[subject][action]
for i in range(len(poses_2d)): # Iterate across camera
out_poses_2d.append(poses_2d[i])
poses_3d = dataset[subject][action]['positions_3d']
assert len(poses_3d) == len(poses_2d), 'Camera count mismatch'
for i in range(len(poses_3d)): # Iterate across cameras
out_poses_3d.append(poses_3d[i])
stride = downsample
if stride > 1:
# Downsample as requested
for i in range(len(out_poses_2d)):
out_poses_2d[i] = out_poses_2d[i][::stride]
if out_poses_3d is not None:
out_poses_3d[i] = out_poses_3d[i][::stride]
return out_poses_3d, out_poses_2d
def evaluate(test_loader, model_pos, action=None, log=None, joints_left=None, joints_right=None, test_augment=True):
epoch_loss_3d_pos = 0
epoch_loss_3d_pos_procrustes = 0
with torch.no_grad():
model_pos.eval()
N = 0
for batch_data in test_loader:
inputs_3d, inputs_2d = batch_data[-2], batch_data[-1]
if test_augment:
inputs_2d = torch.squeeze(inputs_2d, 0)
inputs_3d = torch.squeeze(inputs_3d, 0)
inputs_3d[:, :, 0] = 0
# Positional model
predicted_3d_pos = model_pos(inputs_2d)
if test_augment:
assert joints_left is not None and joints_right is not None
predicted_3d_pos[1, :, :, 0] *= -1
predicted_3d_pos[1, :, joints_left +
joints_right] = predicted_3d_pos[1, :, joints_right + joints_left]
predicted_3d_pos = torch.mean(
predicted_3d_pos, dim=0, keepdim=True)
inputs_3d = inputs_3d[:1]
error = mpjpe(predicted_3d_pos, inputs_3d)
epoch_loss_3d_pos += inputs_3d.shape[0] * \
inputs_3d.shape[1] * error.item()
N += inputs_3d.shape[0] * inputs_3d.shape[1]
inputs = inputs_3d.cpu().numpy().reshape(-1,
inputs_3d.shape[-2], inputs_3d.shape[-1])
predicted_3d_pos = predicted_3d_pos.cpu().numpy(
).reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1])
epoch_loss_3d_pos_procrustes += inputs_3d.shape[0] * \
inputs_3d.shape[1] * p_mpjpe(predicted_3d_pos, inputs)
e1 = (epoch_loss_3d_pos / N) * 1000
e2 = (epoch_loss_3d_pos_procrustes / N) * 1000
if log is not None:
if action is None:
log.info('----------')
else:
log.info('----{}----'.format(action))
log.info('Protocol #1 Error (MPJPE): {} mm'.format(e1))
log.info('Protocol #2 Error (P-MPJPE): {} mm'.format(e2))
log.info('----------')
return e1, e2
def predict(test_generator, model_pos):
with torch.no_grad():
model_pos.eval()
batch_2d = next(test_generator.next_epoch())[-1]
inputs_2d = torch.from_numpy(batch_2d.astype('float32'))
if torch.cuda.is_available():
inputs_2d = inputs_2d.cuda()
predicted_3d_pos = model_pos(inputs_2d)
return predicted_3d_pos.squeeze(0).cpu().numpy()
| 40.722114
| 142
| 0.593974
|
154741ef14890d2bc6bdc3aec88197dc3b13106f
| 1,709
|
py
|
Python
|
test/rules/parameters/test_cidr_allowed_values.py
|
KeyCore/cfn-python-lint
|
342ac61db052224314ca1219a7a073b45841d78e
|
[
"MIT-0"
] | 1
|
2019-03-23T06:04:16.000Z
|
2019-03-23T06:04:16.000Z
|
test/rules/parameters/test_cidr_allowed_values.py
|
eshack94/cfn-python-lint
|
9ec44f41ae24b9d62576aed53efa888b00641e04
|
[
"MIT-0"
] | null | null | null |
test/rules/parameters/test_cidr_allowed_values.py
|
eshack94/cfn-python-lint
|
9ec44f41ae24b9d62576aed53efa888b00641e04
|
[
"MIT-0"
] | 2
|
2019-06-21T13:17:02.000Z
|
2020-02-29T08:11:00.000Z
|
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint.rules.parameters.CidrAllowedValues import CidrAllowedValues # pylint: disable=E0401
from .. import BaseRuleTestCase
class TestParameterCidrAllowedValues(BaseRuleTestCase):
"""Test template parameter configurations"""
def setUp(self):
"""Setup"""
super(TestParameterCidrAllowedValues, self).setUp()
self.collection.register(CidrAllowedValues())
success_templates = [
'test/fixtures/templates/good/properties_ec2_vpc.yaml',
]
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative('test/fixtures/templates/bad/properties_ec2_network.yaml', 3)
| 43.820513
| 97
| 0.747806
|
923b0f2612194ec7bbf0369e554ef6b53cfd19a3
| 5,947
|
py
|
Python
|
src/evaluating_rewards/rewards/comparisons.py
|
HumanCompatibleAI/evaluating_rewards
|
7b99ec9b415d805bd77041f2f7807d112dec9802
|
[
"Apache-2.0"
] | 42
|
2020-04-27T06:54:39.000Z
|
2022-02-10T00:59:53.000Z
|
src/evaluating_rewards/rewards/comparisons.py
|
HumanCompatibleAI/evaluating-rewards
|
7b99ec9b415d805bd77041f2f7807d112dec9802
|
[
"Apache-2.0"
] | 49
|
2019-11-07T22:01:35.000Z
|
2021-02-08T17:27:36.000Z
|
src/evaluating_rewards/rewards/comparisons.py
|
HumanCompatibleAI/evaluating_rewards
|
7b99ec9b415d805bd77041f2f7807d112dec9802
|
[
"Apache-2.0"
] | 2
|
2020-10-06T12:17:31.000Z
|
2021-06-22T18:03:02.000Z
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods to compare reward models."""
import logging
from typing import Any, Callable, List, Mapping, Optional, Tuple, Type, TypeVar
from imitation.data import types
import tensorflow as tf
from evaluating_rewards import datasets
from evaluating_rewards.rewards import base
FitStats = Mapping[str, List[Mapping[str, Any]]]
def ellp_norm_loss(labels: tf.Tensor, predictions: tf.Tensor, p: float = 0.5) -> tf.Tensor:
"""Loss based on L^p norm between `labels` and `predictions`."""
delta = labels - predictions
delta = tf.abs(delta)
delta_pow = tf.pow(delta, p)
mean_delta_pow = tf.reduce_mean(delta_pow)
return tf.pow(mean_delta_pow, 1 / p)
class RegressModel:
"""Regress source model onto target."""
def __init__(
self,
model: base.RewardModel,
target: base.RewardModel,
*,
loss_fn: Callable[[tf.Tensor, tf.Tensor], tf.Tensor] = tf.losses.mean_squared_error,
optimizer: Type[tf.train.Optimizer] = tf.train.AdamOptimizer,
# TODO(): change to optimizer_kwargs?
learning_rate: float = 1e-2,
):
"""Constructs RegressModel.
Args:
model: The model to fit.
target: The target we want to match.
loss_fn: A function computing the loss from labels and predictions.
optimizer: The type of optimizer to use.
learning_rate: Hyperparameter for optimizer.
"""
assert model.observation_space == target.observation_space
assert model.action_space == target.action_space
self.model = model
self.target = base.StopGradientsModelWrapper(target)
self.learning_rate = learning_rate
self.loss = loss_fn(self.target.reward, self.model.reward)
self._opt = optimizer(
learning_rate=self.learning_rate
) # pytype: disable=wrong-keyword-args
self._grads = self._opt.compute_gradients(self.loss)
self.opt_op = self._opt.apply_gradients(self._grads)
self.metrics = {}
self.metrics["grad_norm"] = {
variable.name: tf.norm(gradient)
for gradient, variable in self._grads
if gradient is not None
}
def build_feed_dict(self, batch: types.Transitions):
"""Construct feed dict given a batch of data."""
models = [self.model, self.target]
return base.make_feed_dict(models, batch)
def fit(
self,
dataset: datasets.TransitionsCallable,
total_timesteps: int = int(1e6),
batch_size: int = 4096,
**kwargs,
) -> FitStats:
"""Fits shaping to target.
Args:
dataset: a callable returning batches of the specified size.
total_timesteps: the total number of timesteps to train for.
batch_size: the number of timesteps in each training batch.
kwargs: passed through to `fit_models`.
Returns:
Training statistics.
"""
return fit_models(
{"singleton": self},
dataset=dataset,
total_timesteps=total_timesteps,
batch_size=batch_size,
**kwargs,
)
ModelWrapperRet = Tuple[base.RewardModel, Any, Mapping[str, tf.Tensor]]
ModelWrapperFn = Callable[[base.RewardModel], ModelWrapperRet]
K = TypeVar("K")
def fit_models(
potentials: Mapping[K, RegressModel],
dataset: datasets.TransitionsCallable,
total_timesteps: int,
batch_size: int,
log_interval: int = 10,
callback: Optional[base.Callback] = None,
) -> Mapping[str, List[Mapping[K, Any]]]:
"""Regresses model(s).
Each training step is executed concurrently for all the models, enabling
TensorFlow to exploit any available concurrency.
Args:
potentials: A mapping from strings to a potential-shaped reward model.
dataset: An iterator returning batches of old obs-act-next obs tuples.
total_timesteps: the total number of timesteps to train for.
batch_size: the number of timesteps in each training batch.
log_interval: The frequency with which to print.
callback: If not None, called each epoch with the current epoch number.
Returns:
Metrics from training.
Raises:
ValueError if total_timesteps < batch_size.
"""
if total_timesteps < batch_size:
raise ValueError("total_timesteps must be at least as larger as batch_size.")
sess = tf.get_default_session()
ops = {k: [p.opt_op, p.loss, p.metrics] for k, p in potentials.items()}
losses = []
metrics = []
nbatches = int(total_timesteps) // int(batch_size)
for i in range(nbatches):
batch = dataset(batch_size)
feed_dict = {}
for potential in potentials.values():
feed_dict.update(potential.build_feed_dict(batch))
outputs = sess.run(ops, feed_dict=feed_dict)
loss = {k: v[1] for k, v in outputs.items()}
metric = {k: v[2] for k, v in outputs.items()}
losses.append(loss)
metrics.append(metric)
if i % log_interval == 0:
logging.info(f"{i}: loss = {loss}, " f"metrics = {metric}")
if callback:
callback(i)
# TODO(): better logging method, e.g. TensorBoard summaries?
return {"loss": losses, "metrics": metrics}
| 33.789773
| 92
| 0.651757
|
8879087fcc8cdc5f38e4a931b237d2bade931933
| 1,319
|
py
|
Python
|
desuprofile_integration/urls.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 13
|
2015-11-29T12:19:12.000Z
|
2021-02-21T15:42:11.000Z
|
desuprofile_integration/urls.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 23
|
2015-04-29T19:43:34.000Z
|
2021-02-10T05:50:17.000Z
|
desuprofile_integration/urls.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 11
|
2015-09-20T18:59:00.000Z
|
2020-02-07T08:47:34.000Z
|
from django.conf.urls import include, url
from django.views.generic.base import TemplateView
from .views import (
CallbackView,
LoginView,
ConfirmationView,
desuprogramme_import_view,
desuprogramme_feedback_view,
)
urlpatterns = [
url(
r'^desuprofile/oauth2/login/?$',
LoginView.as_view(),
name='desuprofile_integration_oauth2_login_view',
),
url(
r'^desuprofile/oauth2/callback/?$',
CallbackView.as_view(),
name='desuprofile_integration_oauth2_callback_view',
),
url(
r'^desuprofile/confirm/?$',
TemplateView.as_view(template_name='desuprofile_integration_confirmation_required_view.pug'),
name='desuprofile_integration_confirmation_required_view',
),
url(
r'^desuprofile/confirm/(?P<code>[a-f0-9]+)/?$',
ConfirmationView.as_view(),
name='desuprofile_integration_confirmation_view',
),
url(
r'^api/v1/events/(?P<event_slug>[a-z0-9-]+)/programme/(?:desu)+/?',
desuprogramme_import_view,
name='desuprogramme_import_view',
),
url(
r'^api/v1/events/(?P<event_slug>[a-z0-9-]+)/programme/(?P<programme_slug>[a-z0-9-]+)/feedback/?$',
desuprogramme_feedback_view,
name='desuprogramme_feedback_view',
),
]
| 25.862745
| 106
| 0.649735
|
4b72cd105dd77b23e0d38d368dc66f62a8b3fdb9
| 3,630
|
py
|
Python
|
.leetcode/393.utf-8-validation.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
.leetcode/393.utf-8-validation.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
.leetcode/393.utf-8-validation.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
# @lc app=leetcode id=393 lang=python3
#
# [393] UTF-8 Validation
#
# https://leetcode.com/problems/utf-8-validation/description/
#
# algorithms
# Medium (38.53%)
# Likes: 314
# Dislikes: 1337
# Total Accepted: 61K
# Total Submissions: 158K
# Testcase Example: '[197,130,1]'
#
# Given an integer array data representing the data, return whether it is a
# valid UTF-8 encoding.
#
# A character in UTF8 can be from 1 to 4 bytes long, subjected to the following
# rules:
#
#
# For a 1-byte character, the first bit is a 0, followed by its Unicode
# code.
# For an n-bytes character, the first n bits are all one's, the n + 1 bit is 0,
# followed by n - 1 bytes with the most significant 2 bits being 10.
#
#
# This is how the UTF-8 encoding would work:
#
#
# Char. number range | UTF-8 octet sequence
# (hexadecimal) | (binary)
# --------------------+---------------------------------------------
# 0000 0000-0000 007F | 0xxxxxxx
# 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
# 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
# 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
#
#
# Note: The input is an array of integers. Only the least significant 8 bits of
# each integer is used to store the data. This means each integer represents
# only 1 byte of data.
#
#
# Example 1:
#
#
# Input: data = [197,130,1]
# Output: true
# Explanation: data represents the octet sequence: 11000101 10000010 00000001.
# It is a valid utf-8 encoding for a 2-bytes character followed by a 1-byte
# character.
#
#
# Example 2:
#
#
# Input: data = [235,140,4]
# Output: false
# Explanation: data represented the octet sequence: 11101011 10001100 00000100.
# The first 3 bits are all one's and the 4th bit is 0 means it is a 3-bytes
# character.
# The next byte is a continuation byte which starts with 10 and that's correct.
# But the second continuation byte does not start with 10, so it is
# invalid.
#
#
#
# Constraints:
#
#
# 1 <= data.length <= 2 * 10^4
# 0 <= data[i] <= 255
#
#
#
# @lc tags=bit-manipulation
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 判断一个数字序列是否是合法的UTF8序列。
# 直接二进制操作,判断位即可。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def validUtf8(self, data: List[int]) -> bool:
l = 0
buffer = [
# 0b00000000,
0b10000000,
0b11000000,
0b11100000,
0b11110000,
0b11111000,
]
for d in reversed(data):
if (d & 0b11000000) == 0b10000000:
l += 1
else:
if l == 0:
if (d & 0b10000000) != 0:
return False
else:
if l > 3:
return False
if (d & buffer[l + 1]) != buffer[l]:
return False
l = 0
return l == 0
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print(str(Solution().validUtf8([240, 162, 138, 147, 145])))
print(str(Solution().validUtf8([237])))
print(str(Solution().validUtf8([145])))
print('Example 1:')
print('Input : ')
print('data = [197,130,1]')
print('Exception :')
print('true')
print('Output :')
print(str(Solution().validUtf8([197, 130, 1])))
print()
print('Example 2:')
print('Input : ')
print('data = [235,140,4]')
print('Exception :')
print('false')
print('Output :')
print(str(Solution().validUtf8([235, 140, 4])))
print()
pass
# @lc main=end
| 23.419355
| 79
| 0.579614
|
32b354ef0163eabe4fce7866a2af52b9a93af1bb
| 50,822
|
py
|
Python
|
fcts/servers.py
|
ZRunner/ZBot-test
|
6b1b906807405ab73c721aa92cd7230a8636e61c
|
[
"MIT"
] | null | null | null |
fcts/servers.py
|
ZRunner/ZBot-test
|
6b1b906807405ab73c721aa92cd7230a8636e61c
|
[
"MIT"
] | null | null | null |
fcts/servers.py
|
ZRunner/ZBot-test
|
6b1b906807405ab73c721aa92cd7230a8636e61c
|
[
"MIT"
] | null | null | null |
import copy
import time
import typing
from math import ceil
import discord
import emoji
from cachingutils import LRUCache
from discord.ext import commands
from libs.classes import MyContext, Zbot
from fcts import checks
roles_options = ["clear", "slowmode", "mute", "kick", "ban", "warn", "say", "welcome_roles",
"muted_role", 'partner_role', 'update_mentions', 'verification_role', 'voice_roles']
bool_options = ["enable_xp", "anti_caps_lock", "enable_fun",
"help_in_dm", "compress_help", "anti_scam"]
textchan_options = ["welcome_channel", "bot_news", "poll_channels",
"modlogs_channel", "noxp_channels", "partner_channel"]
vocchan_options = ["membercounter", "voice_channel"]
category_options = ["voice_category", "tickets_category"]
text_options = ["welcome", "leave", "levelup_msg",
"description", "voice_channel_format"]
prefix_options = ['prefix']
emoji_option = ['vote_emojis', 'morpion_emojis']
numb_options = []
raid_options = ["anti_raid"]
xp_type_options = ['xp_type']
color_options = ['partner_color']
xp_rate_option = ['xp_rate']
levelup_channel_option = ["levelup_channel"]
ttt_display_option = ["ttt_display"]
class Servers(commands.Cog):
""""Cog in charge of all the bot configuration management for your server. As soon as an option
is searched, modified or deleted, this cog will handle the operations."""
def __init__(self, bot: Zbot):
self.bot = bot
self.default_language = 'en'
self.embed_color = discord.Colour(0x3fb9ef)
self.log_color = 1793969
self.file = "servers"
self.cache: LRUCache = LRUCache(max_size=10000, timeout=3600)
self.raids_levels = ["None","Smooth","Careful","High","(╯°□°)╯︵ ┻━┻"]
self.default_opt = {"rr_max_number":7,
"rss_max_number":10,
"roles_react_max_number":20,
"language":1,
"description":"",
"clear":"",
"slowmode":"",
"mute":"",
"kick":"",
"ban":"",
"warn":"",
"say":"",
"hunter":"",
"welcome_channel":'',
"welcome":"",
"leave":"",
"welcome_roles":"",
"bot_news":'',
"save_roles":0,
"poll_channels":"",
"modlogs_channel":"",
"enable_xp":0,
"levelup_msg":'',
"levelup_channel":'any',
"noxp_channels":'',
"xp_rate":1.0,
"xp_type":0,
"anti_caps_lock":0,
"enable_fun":1,
"prefix":'!',
"membercounter":"",
"anti_raid":0,
"vote_emojis":":thumbsup:;:thumbsdown:;",
"morpion_emojis":":red_circle:;:blue_circle:;",
"help_in_dm":0,
"muted_role":"",
"partner_channel":'',
"partner_color":10949630,
'partner_role':'',
'update_mentions':'',
'verification_role':'',
'voice_roles':'',
'voice_channel':'',
'voice_category':'',
'voice_channel_format': '{random}',
'compress_help': 0,
'ttt_display': 2,
'anti_scam': 0,
'tickets_category': ''}
self.optionsList = ["prefix","language","description","clear","slowmode","mute","kick","ban","warn","say","welcome_channel","welcome","leave","welcome_roles","anti_scam","poll_channels","partner_channel","partner_color","partner_role","modlogs_channel","verification_role","enable_xp","levelup_msg","levelup_channel","noxp_channels","xp_rate","xp_type","anti_caps_lock","enable_fun","membercounter","anti_raid","vote_emojis","morpion_emojis","help_in_dm","compress_help","muted_role","voice_roles","voice_channel","voice_category","voice_channel_format","ttt_display","bot_news","update_mentions", "tickets_category"]
self.membercounter_pending = {}
@property
def table(self):
return 'servers_beta' if self.bot.beta else 'servers'
async def get_bot_infos(self, botID: int):
"""Return every options of the bot"""
if not self.bot.database_online:
return list()
query = ("SELECT * FROM `bot_infos` WHERE `ID`={}".format(botID))
async with self.bot.db_query(query) as query_results:
liste = list(query_results)
return liste
async def edit_bot_infos(self, bot_id: int, values=[()]):
if not isinstance(values, list):
raise ValueError
set_query = ', '.join('{}=%s'.format(val[0]) for val in values)
query = f"UPDATE `bot_infos` SET {set_query} WHERE `ID`='{bot_id}'"
async with self.bot.db_query(query, (val[1] for val in values)):
pass
return True
async def get_languages(self, ignored_guilds: typing.List[int], return_dict: bool = False):
"""Return stats on used languages"""
if not self.bot.database_online or not 'Languages' in self.bot.cogs:
return []
query = f"SELECT `language`,`ID` FROM `{self.table}`"
liste = []
guilds = {x.id for x in self.bot.guilds if x.id not in ignored_guilds}
async with self.bot.db_query(query) as query_results:
for row in query_results:
if row['ID'] in guilds:
liste.append(row['language'])
for _ in range(len(guilds)-len(liste)):
liste.append(self.bot.get_cog('Languages').languages.index(self.default_language))
if return_dict:
langs = {}
for e, lang in enumerate(self.bot.get_cog('Languages').languages):
langs[lang] = liste.count(e)
else:
langs = []
for e, lang in enumerate(self.bot.get_cog('Languages').languages):
langs.append((lang, liste.count(e)))
return langs
async def get_xp_types(self, ignored_guilds: typing.List[int], return_dict: bool = False):
"""Return stats on used xp types"""
if not self.bot.database_online:
return list()
query = ("SELECT `xp_type`,`ID` FROM `{}`".format(self.table))
liste = list()
guilds = {x.id for x in self.bot.guilds if x.id not in ignored_guilds}
async with self.bot.db_query(query) as query_results:
for row in query_results:
if row['ID'] in guilds:
liste.append(row['xp_type'])
for _ in range(len(guilds)-len(liste)):
liste.append(self.default_opt['xp_type'])
if return_dict:
types = dict()
for e, name in enumerate(self.bot.get_cog('Xp').types):
types[name] = liste.count(e)
else:
types = list()
for e, name in enumerate(self.bot.get_cog('Xp').types):
types.append((name, liste.count(e)))
return types
async def staff_finder(self, user: discord.Member, option: str):
"""Check is user is part of a staff"""
if option not in roles_options:
raise TypeError
if await self.bot.get_cog('Admin').check_if_god(user):
return True
if not self.bot.database_online or not isinstance(user, discord.Member):
return False
staff = str(await self.get_option(user.guild.id,option)).split(";")
staff = [x for x in staff if len(x) > 10 and x.isnumeric()]
if len(staff) == 0:
return False
for r in user.roles:
if str(r.id) in staff:
return True
raise commands.CommandError("User doesn't have required roles")
async def get_option(self, guild_id: int, name: str) -> typing.Optional[str]:
"""return the value of an option
Return None if this option doesn't exist or if no value has been set"""
if isinstance(guild_id, discord.Guild):
guild_id = guild_id.id
elif guild_id is None or not self.bot.database_online:
return None
if (cached := self.cache.get((guild_id, name))) is not None:
return cached
sql_result = await self.get_server([name],criters=["ID="+str(guild_id)],return_type=list)
if len(sql_result) == 0:
value = None
elif sql_result[0][0] == '':
value = self.default_opt[name]
else:
value = sql_result[0][0]
self.cache[(guild_id, name)] = value
return value
async def get_server(self, columns=[], criters=["ID > 1"], relation="AND", return_type=dict):
"""return every options of a server"""
await self.bot.wait_until_ready()
if not isinstance(columns, list) or not isinstance(criters, list):
raise ValueError
if len(columns) == 0:
cl = "*"
else:
cl = "`"+"`,`".join(columns)+"`"
relation = " "+relation+" "
query = ("SELECT {} FROM `{}` WHERE {}".format(cl, self.table, relation.join(criters)))
liste = list()
async with self.bot.db_query(query, astuple=(return_type!=dict)) as query_results:
for row in query_results:
if isinstance(row, dict):
for k, v in row.items():
if v == '':
row[k] = self.default_opt[k]
liste.append(row)
return liste
async def modify_server(self, guild_id: int, values=[()]):
"""Update a server config in the database"""
if not isinstance(values, list):
raise ValueError
set_query = ', '.join(f'`{val[0]}`=%s' for val in values)
query = f"UPDATE `{self.table}` SET {set_query} WHERE `ID`={guild_id}"
async with self.bot.db_query(query, (val[1] for val in values)):
pass
for value in values:
self.cache[(guild_id, value[0])] = value[1]
return True
async def delete_option(self, guild_id: int, opt):
"""Reset an option"""
if opt not in self.default_opt.keys():
raise ValueError
value = self.default_opt[opt]
if opt == 'language':
await self.bot.get_cog('Languages').change_cache(guild_id,value)
elif opt == 'prefix':
await self.bot.prefix_manager.update_prefix(guild_id,value)
return await self.modify_server(guild_id,values=[(opt,value)])
async def add_server(self, guild_id: int):
"""add a new server to the db"""
if isinstance(guild_id, str):
if not guild_id.isnumeric():
raise ValueError
query = "INSERT INTO `{}` (`ID`) VALUES ('{}')".format(self.table,guild_id)
async with self.bot.db_query(query):
pass
return True
async def is_server_exist(self, guild_id: int):
"""Check if a server is already in the db"""
i = await self.get_option(guild_id, "ID")
if i is None:
guild = self.bot.get_guild(guild_id)
if guild is None:
raise Exception("Guild not found")
emb_desc = f"New server in the database :tada: `{guild.name}` ({guild.id})"
emb = discord.Embed(description=emb_desc, color=self.log_color, timestamp=self.bot.utcnow())
await self.bot.send_embed([emb])
return await self.add_server(guild_id)
return True
async def delete_server(self, guild_id: int):
"""remove a server from the db"""
if not isinstance(guild_id, int):
raise ValueError
query = f"DELETE FROM `{self.table}` WHERE `ID`='{guild_id}'"
async with self.bot.db_query(query):
pass
return True
@commands.group(name='config')
@commands.guild_only()
async def sconfig_main(self, ctx: MyContext):
"""Function for setting the bot on a server
..Doc server.html#config-options"""
if ctx.bot.database_online:
await self.is_server_exist(ctx.guild.id)
if ctx.invoked_subcommand is None:
msg = copy.copy(ctx.message)
subcommand_passed = ctx.message.content.replace(ctx.prefix+"config ","")
if subcommand_passed is None:
msg.content = ctx.prefix + "config help"
elif subcommand_passed.isnumeric():
msg.content = ctx.prefix + "config see " + subcommand_passed
elif subcommand_passed.split(" ")[0] in self.optionsList:
if len(subcommand_passed.split(" "))==1:
msg.content = ctx.prefix + "config see " + subcommand_passed
else:
msg.content = ctx.prefix + "config change " + subcommand_passed
else:
msg.content = ctx.prefix + "config help"
new_ctx = await self.bot.get_context(msg)
await self.bot.invoke(new_ctx)
@sconfig_main.command(name="help")
@commands.cooldown(1, 2, commands.BucketType.guild)
async def sconfig_help(self, ctx: MyContext):
"""Get help about this command"""
msg = await self.bot._(ctx.guild, "server.config-help", p=await self.bot.prefix_manager.get_prefix(ctx.guild))
await ctx.send(msg.format(ctx.guild.owner.name))
@sconfig_main.command(name="reset", aliases=["delete", "del"])
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.check(checks.has_manage_guild)
async def sconfig_del(self, ctx: MyContext, option: str):
"""Reset an option to its initial value"""
if not ctx.bot.database_online:
return await ctx.send(await self.bot._(ctx.guild.id,"cases.no_database"))
await self.sconfig_del2(ctx, option)
@sconfig_main.command(name="change")
@commands.cooldown(1, 2, commands.BucketType.guild)
@commands.check(checks.has_manage_guild)
async def sconfig_change(self, ctx: MyContext, option:str, *, value: str):
"""Allows you to modify an option"""
if not ctx.bot.database_online:
return await ctx.send(await self.bot._(ctx.guild.id,"cases.no_database"))
if value == 'del':
await self.sconfig_del2(ctx, option)
return
try:
if option in roles_options:
await self.conf_roles(ctx, option, value)
elif option in bool_options:
await self.conf_bool(ctx, option, value)
elif option in textchan_options:
await self.conf_textchan(ctx, option, value)
elif option in category_options:
await self.conf_category(ctx, option, value)
elif option in text_options:
await self.conf_text(ctx, option, value)
elif option in numb_options:
await self.conf_numb(ctx, option, value)
elif option in vocchan_options:
await self.conf_vocal(ctx, option, value)
elif option == "language":
await self.conf_lang(ctx, option, value)
elif option in prefix_options:
await self.conf_prefix(ctx, option, value)
elif option in raid_options:
await self.conf_raid(ctx, option, value)
elif option in emoji_option:
await self.conf_emoji(ctx, option, value)
elif option in xp_type_options:
await self.conf_xp_type(ctx, option, value)
elif option in color_options:
await self.conf_color(ctx, option, value)
elif option in xp_rate_option:
await self.conf_xp_rate(ctx, option, value)
elif option in levelup_channel_option:
await self.conf_levelup_chan(ctx, option, value)
elif option in ttt_display_option:
await self.conf_tttdisplay(ctx, option, value)
else:
await ctx.send(await self.bot._(ctx.guild.id, "server.option-notfound"))
return
except Exception as e:
await self.bot.get_cog("Errors").on_error(e,ctx)
await ctx.send(await self.bot._(ctx.guild.id, "server.internal-error"))
async def sconfig_del2(self, ctx: MyContext, option: str):
try:
t = await self.delete_option(ctx.guild.id,option)
if t:
msg = await self.bot._(ctx.guild.id, "server.value-deleted", option=option)
else:
msg = await self.bot._(ctx.guild.id, "server.internal-error")
await ctx.send(msg)
msg = "Reset option in server {}: {}".format(ctx.guild.id,option)
emb = discord.Embed(description=msg, color=self.log_color, timestamp=self.bot.utcnow())
emb.set_author(name=self.bot.user, icon_url=self.bot.user.display_avatar)
await self.bot.send_embed([emb])
self.bot.log.debug(msg)
except ValueError:
await ctx.send(await self.bot._(ctx.guild.id, "server.option-notfound"))
except Exception as err:
await self.bot.get_cog("Errors").on_error(err,ctx)
await ctx.send(await self.bot._(ctx.guild.id, "server.internal-error"))
async def send_embed(self, guild: discord.Guild, option: str, value: str):
msg = "Changed option in server {}: {} = `{}`".format(guild.id,option,value)
emb = discord.Embed(description=msg, color=self.log_color, timestamp=self.bot.utcnow())
emb.set_footer(text=guild.name)
emb.set_author(name=self.bot.user, icon_url=self.bot.user.display_avatar)
await self.bot.send_embed([emb])
self.bot.log.debug(msg)
async def get_guild(self, item) -> discord.Guild:
"""Try to find a guild from anything (int, guild, ctx, str)"""
guild = None
if isinstance(item, commands.Context):
guild = item.guild
elif isinstance(item, discord.Guild):
guild = item
elif isinstance(item, str):
if item.isnumeric():
guild = self.bot.get_guild(int(item))
elif isinstance(item, int):
guild = self.bot.get_guild(item)
return guild
async def conf_roles(self, ctx: MyContext, option: str, value: str):
guild = await self.get_guild(ctx)
ext = not isinstance(ctx, commands.Context)
if value == "scret-desc":
roles = await self.get_option(guild.id,option)
return await self.form_roles(guild, roles, ext)
else:
roles = value.split(",")
liste = list()
liste2 = list()
for role in roles:
role = role.strip()
try:
if role == "everyone":
r = guild.default_role
else:
r = await commands.RoleConverter().convert(ctx,role)
except commands.errors.BadArgument:
msg = await self.bot._(guild.id, "server.edit-error.role", name=role)
await ctx.send(msg)
return
if str(r.id) in liste:
continue
liste.append(str(r.id))
liste2.append(r.mention)
await self.modify_server(guild.id,values=[(option,";".join(liste))])
msg = await self.bot._(guild.id, "server.edit-success.role", opt=option, val=", ".join(liste2))
await ctx.send(msg)
await self.send_embed(guild, option, value)
async def form_roles(self, guild: discord.Guild, roles: str, ext: bool=False):
if not isinstance(roles,int):
if (roles is None or len(roles) == 0):
return "Ø"
roles = roles.split(";")
else:
roles = [roles]
g_roles = list()
for r in roles:
g_role = guild.get_role(int(r))
if g_role is None:
g_roles.append('<' + await self.bot._(guild, "server.deleted-role") + '>')
elif ext:
g_roles.append("@"+g_role.name)
else:
g_roles.append(g_role.mention)
return g_roles
async def conf_bool(self, ctx: MyContext, option: str, value: str):
if value == "scret-desc":
guild = await self.get_guild(ctx)
v = await self.get_option(guild.id,option)
return await self.form_bool(v)
else:
if value.lower() in {"true","vrai","1","oui","yes","activé"}:
value = True
v = 1
elif value.lower() in {"false","faux","non","no","désactivé","wrong",'0'}:
value = False
v = 0
else:
msg = await self.bot._(ctx.guild.id, "server.edit-error.boolean", name=option)
await ctx.send(msg)
return
await self.modify_server(ctx.guild.id,values=[(option,v)])
msg = await self.bot._(ctx.guild.id, "server.edit-success.boolean", opt=option, val=value)
await ctx.send(msg)
await self.send_embed(ctx.guild, option, value)
async def form_bool(self, boolean):
if boolean == 1:
v = True
else:
v = False
return v
async def conf_textchan(self, ctx: MyContext, option: str, value: str):
guild = await self.get_guild(ctx)
ext = not isinstance(ctx, commands.Context)
if value == "scret-desc":
chans = await self.get_option(guild.id,option)
return await self.form_textchan(guild, chans, ext)
else:
chans = value.split(",")
liste = list()
liste2 = list()
for chan in chans:
chan = chan.strip()
if len(chan) == 0:
continue
try:
c = await commands.TextChannelConverter().convert(ctx,chan)
except commands.errors.BadArgument:
msg = await self.bot._(guild.id, "server.edit-error.channel", channel=chan)
await ctx.send(msg)
return
if str(c.id) in liste:
continue
liste.append(str(c.id))
liste2.append(c.mention)
await self.modify_server(guild.id,values=[(option,";".join(liste))])
if option=='noxp_channels':
self.bot.get_cog('Xp').xp_channels_cache[guild.id] = [int(x) for x in liste]
msg = await self.bot._(guild.id, "server.edit-success.channel", opt=option, val=", ".join(liste2))
await ctx.send(msg)
await self.send_embed(guild, option, value)
async def form_textchan(self, guild: discord.Guild, chans: str, ext=False):
if len(chans) == 0:
return "Ø"
chans = chans.split(";")
g_chans = list()
for r in chans:
g_chan = guild.get_channel(int(r))
if g_chan is None:
g_chans.append('<' + await self.bot._(guild, "server.deleted-channel") + '>')
elif ext:
g_chans.append("#"+g_chan.name)
else:
g_chans.append(g_chan.mention)
return g_chans
async def conf_category(self, ctx: MyContext, option: str, value: str):
guild = await self.get_guild(ctx)
ext = not isinstance(ctx, commands.Context)
if value == "scret-desc":
chans = await self.get_option(guild.id,option)
return await self.form_category(guild, chans, ext)
else:
chans = value.split(",")
liste = list()
liste2 = list()
for chan in chans:
chan = chan.strip()
if len(chan) == 0:
continue
try:
c = await commands.CategoryChannelConverter().convert(ctx, chan)
except commands.errors.BadArgument:
msg = await self.bot._(guild.id, "server.edit-error.category", name=chan)
await ctx.send(msg)
return
if str(c.id) in liste:
continue
liste.append(str(c.id))
liste2.append(c.name)
await self.modify_server(guild.id, values=[(option, ";".join(liste))])
msg = await self.bot._(guild.id, "server.edit-success.category", opt=option, val=", ".join(liste2))
await ctx.send(msg)
await self.send_embed(guild, option, value)
async def form_category(self, guild: discord.Guild, chans: str, ext=False):
if len(chans) == 0:
return "Ø"
chans = chans.split(";")
g_chans = list()
for r in chans:
g_chan = guild.get_channel(int(r))
if g_chan is None:
g_chans.append('<' + await self.bot._(guild, "server.deleted-channel") + '>')
else:
g_chans.append(g_chan.name)
return g_chans
async def conf_emoji(self, ctx: MyContext, option: str, value: str):
guild = await self.get_guild(ctx)
if value == "scret-desc":
emojis = await self.get_option(guild.id,option)
return ' '.join(await self.form_emoji(emojis, option))
else:
emojis = value.split(',') if ',' in value else value.split(' ')
liste = []
liste2 = []
for e in emojis:
e = e.strip()
if len(e) == 0:
continue
try:
e = await commands.EmojiConverter().convert(ctx,e)
except commands.errors.BadArgument:
if e not in self.bot.get_cog("Emojis").unicode_list:
msg = await self.bot._(ctx.guild.id, "server.edit-error.emoji", emoji=e)
await ctx.send(msg)
return
if emoji.demojize(e) not in liste:
liste.append(emoji.demojize(e))
liste2.append(e)
else:
if str(e.id) not in liste:
liste.append(str(e.id))
liste2.append("<:{}:{}>".format(e.name,e.id))
await self.modify_server(ctx.guild.id,values=[(option,";".join(liste))])
msg = await self.bot._(ctx.guild.id, "server.edit-success.emojis", opt=option, val=", ".join(liste2))
await ctx.send(msg)
await self.send_embed(ctx.guild, option, value)
async def form_emoji(self, emojis: str, option: str):
if len(emojis) == 0:
emojis = self.default_opt[option]
emojis = emojis.split(";")
l_em = list()
for r in emojis:
if len(r) == 0:
continue
if r.isnumeric():
d_em = discord.utils.get(self.bot.emojis, id=int(r))
if d_em is None:
l_em.append("?")
else:
a = 'a' if d_em.animated else ''
l_em.append("<{}:{}:{}>".format(a, d_em.name, d_em.id))
else:
l_em.append(emoji.emojize(r, use_aliases=True))
return l_em
async def conf_vocal(self, ctx: MyContext, option: str, value: str):
if value == "scret-desc":
guild = await self.get_guild(ctx)
chans = await self.get_option(guild.id,option)
return await self.form_vocal(guild,chans)
else:
chans = value.split(",")
liste = list()
liste2 = list()
for chan in chans:
chan = chan.strip()
try:
c = await commands.VoiceChannelConverter().convert(ctx,chan)
except commands.errors.BadArgument:
msg = await self.bot._(ctx.guild.id, "server.edit-error.channel", channel=chan)
await ctx.send(msg)
return
if str(c.id) in liste:
continue
liste.append(str(c.id))
liste2.append(c.mention)
await self.modify_server(ctx.guild.id,values=[(option,";".join(liste))])
msg = await self.bot._(ctx.guild.id, "server.edit-success.channel", opt=option, val=", ".join(liste2))
await ctx.send(msg)
await self.send_embed(ctx.guild, option, value)
async def form_vocal(self, guild: discord.Guild, chans: str):
if len(chans) == 0:
return "Ø"
chans = chans.split(";")
g_chans = list()
for r in chans:
g_chan = discord.utils.get(guild.voice_channels, id=int(r))
if g_chan is None:
g_chans.append('<' + await self.bot._(guild, "server.deleted-channel") + '>')
else:
g_chans.append(g_chan.mention)
return g_chans
async def conf_text(self, ctx: MyContext, option: str, value: str):
guild = await self.get_guild(ctx)
if value == "scret-desc":
text = await self.get_option(guild.id,option)
return await self.form_text(text)
else:
await self.modify_server(guild.id,values=[(option, value)])
msg = await self.bot._(guild.id, "server.edit-success.text", opt=option, val=value)
await ctx.send(msg)
await self.send_embed(guild, option, value)
async def form_text(self, text: str):
if len(text) == 0:
text = "Ø"
elif len(text) > 1000:
text = "```\n" + text[:1000] + "...```"
else:
text = "```\n" + text + "```"
return text
async def conf_prefix(self, ctx: MyContext, option: str, value: str):
if value == "scret-desc":
guild = await self.get_guild(ctx)
text = await self.get_option(guild.id,'prefix')
return await self.form_prefix(text)
else:
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
if len(value) > 10:
await ctx.send(await self.bot._(ctx.guild.id, "server.edit-error.prefix.long"))
return
try:
await self.modify_server(ctx.guild.id,values=[('prefix',value)])
except Exception:
self.bot.log.warning("Error while editing prefix", exc_info=True)
await ctx.send(await self.bot._(ctx.guild.id,"server.edit-error.prefix.invalid"))
return
await self.bot.prefix_manager.update_prefix(ctx.guild.id,value)
msg = await self.bot._(ctx.guild.id, "server.edit-success.prefix", val=value)
await ctx.send(msg)
await self.send_embed(ctx.guild, option, value)
async def form_prefix(self, text: str):
if len(text) == 0:
text = "!"
return '`'+text+'`'
async def conf_numb(self, ctx: MyContext, option: str, value: str):
if value == "scret-desc":
guild = await self.get_guild(ctx)
return await self.get_option(guild.id,option)
else:
if value.isnumeric():
value = int(value)
await self.send_embed(ctx.guild, option, value)
else:
msg = await self.bot._(ctx.guild.id, "server.edit-error.numeric", name=option)
await ctx.send(msg.format(option))
async def conf_lang(self, ctx: MyContext, option: str,value: str):
if value == "scret-desc":
guild = await self.get_guild(ctx)
if guild is None:
return self.default_language
v = await self.get_option(guild,option)
return await self.form_lang(v)
else:
languages = self.bot.get_cog("Languages").languages
if value in languages:
v = languages.index(value)
await self.modify_server(ctx.guild.id,values=[(option,v)])
await self.bot.get_cog('Languages').change_cache(ctx.guild.id,value)
msg = await self.bot._(ctx.guild.id, "server.edit-success.lang", val=value)
await ctx.send(msg)
await self.send_embed(ctx.guild, option, value)
else:
msg = await self.bot._(ctx.guild.id,"server.edit-error.lang", list=", ".join(languages))
await ctx.send(msg)
async def form_lang(self, value: str):
if value is None:
return self.default_language
else:
return self.bot.get_cog("Languages").languages[value]
async def conf_raid(self, ctx: MyContext, option: str, value: str):
if value == "scret-desc":
guild = await self.get_guild(ctx)
if guild is None:
return self.default_opt['anti_raid']
v = await self.get_option(guild,option)
return await self.form_raid(v)
else:
raids = self.raids_levels
value = value.capitalize()
if value.isnumeric():
value = int(value)
if value in range(0,len(raids)):
value = raids[value]
if value in raids:
v = raids.index(value)
await self.modify_server(ctx.guild.id,values=[(option,v)])
msg = await self.bot._(ctx.guild.id, "server.edit-success.raid", name=value, index=raids.index(value))
await ctx.send(msg)
await self.send_embed(ctx.guild, option, value)
else:
msg = await self.bot._(ctx.guild.id,"server.edit-error.anti_raid", list=", ".join(raids))
await ctx.send(msg)
async def form_raid(self, value: str):
if value is None:
return self.default_opt['anti_raid']
else:
return self.raids_levels[value]
async def conf_xp_type(self, ctx: MyContext, option: str, value: str):
if value == "scret-desc":
guild = await self.get_guild(ctx)
if guild is None:
return self.bot.get_cog('Xp').types[0]
v = await self.get_option(guild,option)
return await self.form_xp_type(v)
else:
available_types = self.bot.get_cog("Xp").types
if value in available_types:
v = available_types.index(value)
await self.modify_server(ctx.guild.id,values=[(option,v)])
msg = await self.bot._(ctx.guild.id, "server.edit-success.xp", val=value)
await ctx.send(msg)
await self.send_embed(ctx.guild, option, value)
else:
msg = await self.bot._(ctx.guild.id, "server.edit-error.xp", list=", ".join(available_types))
await ctx.send(msg)
async def form_xp_type(self, value: str):
if value is None:
return self.bot.get_cog('Xp').types[0]
else:
return self.bot.get_cog("Xp").types[value]
async def conf_color(self, ctx: MyContext, option: str, value: str):
if value == "scret-desc":
guild = await self.get_guild(ctx)
if guild is None:
return str(discord.Colour(self.default_opt[option]))
v = await self.get_option(guild,option)
return await self.form_color(option,v)
else:
try:
if value=="default":
color = discord.Color(self.default_opt[option])
else:
color = await commands.ColourConverter().convert(ctx,value)
except commands.errors.BadArgument:
msg = await self.bot._(ctx.guild.id, "server.edit-error.color")
await ctx.send(msg)
return
await self.modify_server(ctx.guild.id,values=[(option,color.value)])
msg = await self.bot._(ctx.guild.id, "server.edit-success.color", opt=option, val=color)
if ctx.can_send_embed:
await ctx.send(embed=discord.Embed(description=msg, colour=color))
else:
await ctx.send(msg)
await self.send_embed(ctx.guild,option,color)
async def form_color(self, option: str, value: str):
if value is None:
return str(discord.Colour(self.default_opt[option]))
else:
return str(discord.Colour(value))
async def conf_xp_rate(self, ctx: MyContext, option: str, value: str):
if value == "scret-desc":
guild = await self.get_guild(ctx)
return await self.get_option(guild.id,option)
else:
try:
value = round(float(value),2)
except ValueError:
msg = await self.bot._(ctx.guild.id, "server.edit-error.numeric", name=option)
await ctx.send(msg)
return
if value < 0.1 or value > 3:
await ctx.send(await self.bot._(ctx.guild.id, "server.edit-error.xp_rate", min=0.1, max=3))
return
await self.modify_server(ctx.guild.id,values=[(option,value)])
await ctx.send(await self.bot._(ctx.guild.id, "server.edit-success.xp_rate",val=value))
await self.send_embed(ctx.guild, option, value)
async def form_xp_rate(self, option: str, value: str):
if value is None:
return self.default_opt[option]
else:
return value
async def conf_levelup_chan(self, ctx: MyContext, option: str, value: str):
guild = await self.get_guild(ctx)
ext = not isinstance(ctx, commands.Context)
if value == "scret-desc":
chan = await self.get_option(guild.id,option)
return await self.form_levelup_chan(guild, chan, ext)
else:
if value.lower() in {"any", "tout", "tous", "current", "all", "any channel"}:
c_id = "any"
msg = await self.bot._(guild.id,"server.edit-success.levelup_channel.any")
elif value.lower() in {"none", "aucun", "disabled", "nowhere"}:
c_id = "none"
msg = await self.bot._(guild.id,"server.edit-success.levelup_channel.none")
else:
chan = value.strip()
try:
c = await commands.TextChannelConverter().convert(ctx,chan)
except commands.errors.BadArgument:
msg = await self.bot._(guild.id, "server.edit-error.channel", channel=chan)
await ctx.send(msg)
return
msg = await self.bot._(guild.id, "server.edit-success.levelup_channel.chan", val=c.mention)
c_id = c.id
await self.modify_server(guild.id,values=[(option,c_id)])
await ctx.send(msg)
await self.send_embed(guild, option, value)
async def form_levelup_chan(self, guild: discord.Guild, value: str, ext: bool=False):
if value == "any":
return "Any channel"
if value == "none":
return "Nowhere"
if value.isnumeric():
g_chan = guild.get_channel(int(value))
if g_chan is None:
return '<' + await self.bot._(guild, "server.deleted-channel") + '>'
elif ext:
return "#"+g_chan.name
else:
return g_chan.mention
return ""
async def conf_tttdisplay(self, ctx: MyContext, option: str, value: int):
if value == "scret-desc":
guild = await self.get_guild(ctx)
if guild is None:
return self.bot.get_cog('Morpions').types[0]
v = await self.get_option(guild, option)
return await self.form_tttdisplay(v)
else:
available_types: list = self.bot.get_cog("Morpions").types
value = value.lower()
if value in available_types:
v = available_types.index(value)
await self.modify_server(ctx.guild.id,values=[(option,v)])
msg = await self.bot._(ctx.guild.id, "server.edit-success.tttdisplay", val=value)
await ctx.send(msg)
await self.send_embed(ctx.guild, option, value)
else:
msg = await self.bot._(ctx.guild.id, "server.edit-error.tttdisplay", list=", ".join(available_types))
await ctx.send(msg)
async def form_tttdisplay(self, value: int):
if value is None:
return self.bot.get_cog('Morpions').types[0].capitalize()
else:
return self.bot.get_cog("Morpions").types[value].capitalize()
@sconfig_main.command(name='list')
async def sconfig_list(self, ctx: MyContext):
"""Get the list of every usable option"""
options = sorted(self.optionsList)
await ctx.send(await self.bot._(ctx.guild.id, "server.config-list",text="\n```\n-{}\n```\n".format('\n-'.join(options)), link="<https://zbot.readthedocs.io/en/latest/server.html#list-of-every-option>"))
@sconfig_main.command(name="see")
@commands.cooldown(1,10,commands.BucketType.guild)
async def sconfig_see(self, ctx: MyContext, option=None):
"""Displays the value of an option, or all options if none is specified"""
if not ctx.bot.database_online:
return await ctx.send(await self.bot._(ctx.guild.id,"cases.no_database"))
await self.send_see(ctx.guild,ctx.channel,option,ctx.message,ctx)
async def send_see(self, guild: discord.Guild, channel: typing.Union[discord.TextChannel, discord.Thread], option: str, msg: discord.Message, ctx: MyContext):
"""Envoie l'embed dans un salon"""
if self.bot.zombie_mode:
return
if option is None:
option = "1"
if option.isnumeric():
page = int(option)
if page<1:
return await ctx.send(await self.bot._(channel, "xp.low-page"))
liste = await self.get_server([],criters=["ID="+str(guild.id)])
if len(liste) == 0:
return await channel.send(await self.bot._(channel, "server.not-found", guild=guild.name))
temp = [(k,v) for k,v in liste[0].items() if k in self.optionsList]
max_page = ceil(len(temp)/20)
if page > max_page:
return await ctx.send(await self.bot._(channel, "xp.high-page"))
liste = {k:v for k,v in temp[(page-1)*20:page*20] }
if len(liste) == 0:
return await ctx.send("NOPE")
title = await self.bot._(channel, "server.see-title", guild=guild.name) + f" ({page}/{max_page})"
embed = discord.Embed(title=title, color=self.embed_color,
description=await self.bot._(channel, "server.see-0"), timestamp=msg.created_at)
if guild.icon:
embed.set_thumbnail(url=guild.icon.with_static_format('png'))
diff = channel.guild != guild
for i,v in liste.items():
#if i not in self.optionsList:
# continue
if i in roles_options:
r = await self.form_roles(guild,v,diff)
r = ", ".join(r)
elif i in bool_options:
r = str(await self.form_bool(v))
elif i in textchan_options:
r = await self.form_textchan(guild,v,diff)
r = ", ".join(r)
elif i in category_options:
r = await self.form_category(guild, v, diff)
r = ', '.join(r)
elif i in text_options:
#r = await self.form_text(v)
r = v if len(v)<500 else v[:500]+"..."
elif i in numb_options:
r = str(v)
elif i in vocchan_options:
r = await self.form_vocal(guild,v)
r = ", ".join(r)
elif i == "language":
r = await self.form_lang(v)
elif i in prefix_options:
r = await self.form_prefix(v)
elif i in raid_options:
r = await self.form_raid(v)
elif i in emoji_option:
r = ", ".join(await self.form_emoji(v, i))
elif i in xp_type_options:
r = await self.form_xp_type(v)
elif i in color_options:
r = await self.form_color(i,v)
elif i in xp_rate_option:
r = await self.form_xp_rate(i,v)
elif i in levelup_channel_option:
r = await self.form_levelup_chan(guild,v,diff)
elif i in ttt_display_option:
r = await self.form_tttdisplay(v)
else:
continue
if len(str(r)) == 0:
r = "Ø"
embed.add_field(name=i, value=r)
await channel.send(embed=embed)
return
elif ctx is not None:
if option in roles_options:
r = await self.conf_roles(ctx, option, 'scret-desc')
r = ", ".join(r)
elif option in bool_options:
r = str(await self.conf_bool(ctx, option, 'scret-desc'))
elif option in textchan_options:
r = await self.conf_textchan(ctx, option, 'scret-desc')
r = ", ".join(r)
elif option in category_options:
r = await self.conf_category(ctx, option, 'scret-desc')
r = ', '.join(r)
elif option in text_options:
r = await self.conf_text(ctx, option, 'scret-desc')
elif option in numb_options:
r = await self.conf_numb(ctx, option, 'scret-desc')
elif option in vocchan_options:
r = await self.conf_vocal(ctx, option, 'scret-desc')
r = ", ".join(r)
elif option == "language":
r = await self.conf_lang(ctx, option, 'scret-desc')
elif option in prefix_options:
r = await self.conf_prefix(ctx, option, 'scret-desc')
elif option in raid_options:
r = await self.conf_raid(ctx, option, 'scret-desc')
elif option in emoji_option:
r = await self.conf_emoji(ctx, option, 'scret-desc')
elif option in xp_type_options:
r = await self.conf_xp_type(ctx, option, 'scret-desc')
elif option in color_options:
r = await self.conf_color(ctx, option, 'scret-desc')
elif option in xp_rate_option:
r = await self.conf_xp_rate(ctx, option, 'scret-desc')
elif option in levelup_channel_option:
r = await self.conf_levelup_chan(ctx, option, 'scret-desc')
elif option in ttt_display_option:
r = await self.conf_tttdisplay(ctx, option, 'scret-desc')
else:
r = None
guild = ctx if isinstance(ctx, discord.Guild) else ctx.guild
if r is not None:
try:
r = await self.bot._(channel, f"server.server_desc.{option}", value=r)
except Exception as e:
pass
else:
r = await self.bot._(channel, "server.option-notfound")
try:
if not channel.permissions_for(channel.guild.me).embed_links:
await channel.send(await self.bot._(channel, "minecraft.cant-embed"))
return
title = await self.bot._(channel, "server.opt_title", opt=option, guild=guild.name)
if hasattr(ctx, "message"):
t = ctx.message.created_at
else:
t = ctx.bot.utcnow()
embed = discord.Embed(title=title, color=self.embed_color, description=r, timestamp=t)
if isinstance(ctx, commands.Context):
embed.set_footer(text=ctx.author, icon_url=ctx.author.display_avatar)
await channel.send(embed=embed)
except Exception as e:
await self.bot.get_cog('Errors').on_error(e,ctx if isinstance(ctx, commands.Context) else None)
@sconfig_main.command(name="reset-guild")
@commands.is_owner()
async def admin_delete(self, ctx: MyContext, ID:int):
"Reset the whole config of a server"
if await self.delete_server(ID):
await ctx.send("Le serveur n°{} semble avoir correctement été supprimé !".format(ID))
async def update_memberChannel(self, guild: discord.Guild):
# If we already did an update recently: abort
if guild.id in self.membercounter_pending.keys():
if self.membercounter_pending[guild.id] > time.time():
return False
ch = await self.get_option(guild.id,"membercounter")
if ch not in ['', None]:
ch = guild.get_channel(int(ch))
if ch is None:
return
lang = await self.bot._(guild.id,'_used_locale')
tr = str(await self.bot._(guild.id, "misc.membres")).capitalize()
text = "{}{}: {}".format(tr, " " if lang=='fr' else "" , guild.member_count)
if ch.name == text:
return
try:
await ch.edit(name=text, reason=await self.bot._(guild.id,"logs.reason.memberchan"))
self.membercounter_pending[guild.id] = round(time.time()) + 5*60 # cooldown 5min
return True
except Exception as e:
self.bot.log.debug("[UpdateMemberChannel] "+str(e))
return False
async def update_everyMembercounter(self):
if not self.bot.database_online:
return
i = 0
now = time.time()
for x in self.bot.guilds:
if x.id in self.membercounter_pending.keys() and self.membercounter_pending[x.id] < now:
del self.membercounter_pending[x.id]
await self.update_memberChannel(x)
i += 1
if i > 0:
emb = discord.Embed(description=f"[MEMBERCOUNTER] {i} channels refreshed", color=5011628, timestamp=self.bot.utcnow())
emb.set_author(name=self.bot.user, icon_url=self.bot.user.display_avatar)
await self.bot.send_embed([emb], url="loop")
async def setup(bot):
await bot.add_cog(Servers(bot))
| 45.015058
| 625
| 0.552792
|
498874c8291b4802765948e465b49d6c952a8997
| 3,179
|
py
|
Python
|
tc_manager_rest.py
|
Gradiant/tc-manager
|
4a1809e3019ac469c9a45a021351b25b99d5a47f
|
[
"MIT"
] | 1
|
2019-04-08T00:49:11.000Z
|
2019-04-08T00:49:11.000Z
|
tc_manager_rest.py
|
Gradiant/tc-manager
|
4a1809e3019ac469c9a45a021351b25b99d5a47f
|
[
"MIT"
] | null | null | null |
tc_manager_rest.py
|
Gradiant/tc-manager
|
4a1809e3019ac469c9a45a021351b25b99d5a47f
|
[
"MIT"
] | 1
|
2019-03-06T15:43:10.000Z
|
2019-03-06T15:43:10.000Z
|
from flask import Flask, jsonify, request, render_template
from tc_manager import NetworkInterfaces
import sys
import logging
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/interfaces', methods=['GET'])
def get_interfaces():
return jsonify({'interfaces': list(interfaces_dict.keys())})
@app.route('/api/interfaces/<interface_name>', methods=['GET'])
def get_interface(interface_name):
if interface_name in interfaces_dict:
return jsonify(interfaces_dict[interface_name].as_dict())
else:
return 'Interface {} not found'.format(interface_name), 404
@app.route('/api/interfaces/<interface_name>/default_rate', methods=['PUT', 'DELETE'])
def set_default_rate(interface_name):
if interface_name in interfaces_dict:
interface = interfaces_dict[interface_name]
if request.method == 'DELETE':
default_rate = None
else:
default_rate = request.get_json()
interface.default_rate = default_rate
return "", 200
else:
return 'Interface {} not found'.format(interface_name), 404
@app.route('/api/interfaces/<interface_name>/policies', methods=['POST'])
def post_policy(interface_name):
if interface_name in interfaces_dict:
interface = interfaces_dict[interface_name]
json_request = request.get_json()
logging.debug('post policy -> {}'.format(json_request))
policy = interface.post_policy(json_request['match'], json_request['action'])
return jsonify(policy)
else:
return 'Interface {} not found'.format(interface_name), 404
@app.route('/api/interfaces/<interface_name>/policies/<policy_id>', methods=['DELETE'])
def delete_policy(interface_name, policy_id):
policy_id = int(policy_id)
if interface_name in interfaces_dict:
interface = interfaces_dict[interface_name]
interface.delete_policy(policy_id)
return '', 200
else:
return 'Interface {} not found'.format(interface_name), 404
@app.route('/api/interfaces/policies', methods=['POST'])
def post_policy_all():
json_request = request.get_json()
logging.debug('post policy -> {}'.format(json_request))
interfaces.post_policy(json_request['match'], json_request['action'])
return '', 200
@app.route('/api/interfaces/policies', methods=['DELETE'])
def delete_policy_all():
json_request = request.get_json()
interfaces.delete_policy_by_match(json_request['match'])
return '', 200
@app.route('/api/interfaces/default_rate', methods=['PUT', 'DELETE'])
def set_default_rate_all():
if request.method == 'DELETE':
default_rate = None
else:
default_rate = request.get_json()
interfaces.set_default_rate(default_rate)
return "", 200
logging.basicConfig(format='%(asctime)s %(levelname)s:%(name)s:%(message)s', level=logging.getLevelName('DEBUG'))
if __name__ == '__main__':
interface_names = sys.argv
print(interface_names)
interfaces = NetworkInterfaces(whitelist=interface_names[1:])
interfaces_dict = interfaces.interfaces
app.run(host='0.0.0.0', port=5000, debug=True)
| 30.567308
| 113
| 0.698333
|
a9efd4726eee7ff939c7a3b848f77fcd86cc3ca2
| 7,792
|
py
|
Python
|
yolox/utils/visualize.py
|
Bruce198899/YOLOX_Runway
|
aa3bd25112cfe131556e9e9354288f6b50f3892c
|
[
"Apache-2.0"
] | null | null | null |
yolox/utils/visualize.py
|
Bruce198899/YOLOX_Runway
|
aa3bd25112cfe131556e9e9354288f6b50f3892c
|
[
"Apache-2.0"
] | null | null | null |
yolox/utils/visualize.py
|
Bruce198899/YOLOX_Runway
|
aa3bd25112cfe131556e9e9354288f6b50f3892c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import cv2
import matplotlib.pyplot
import numpy as np
import scipy.special
import torch
import torchvision.transforms as transforms
from PIL import Image
import torch
from ufld.data.constant import runway_row_anchor
from sklearn.linear_model import LinearRegression
from loguru import logger
__all__ = ["vis"]
runway_img_transforms = transforms.Compose([
transforms.Resize((448, 448)),
transforms.ToTensor(),
transforms.Normalize((0.41846887, 0.44654263, 0.44974034), (0.23490292, 0.24692507, 0.26558167)),
])
def vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None, obj_id=None, track_fps=-1, direction_model=None,
text=None, lost_boxes=None):
# 2021/12/27 添加了obj_id输入项,试图把plot_track和vis函数结合起来。
for i in range(len(boxes)):
box = boxes[i]
cls_id = int(cls_ids[i])
score = scores[i]
if score < conf and obj_id is None:
continue
x0 = int(box[0])
y0 = max(0, int(box[1]))
x1 = int(box[2])
y1 = int(box[3])
color = (_COLORS[cls_id] * 255).astype(np.uint8).tolist()
if obj_id is None:
obj_text = '{}:{:.1f}%'.format(class_names[cls_id], score * 100)
else:
obj_text = '{}-ID:{}:{:.1f}%'.format(class_names[cls_id], obj_id[i], score * 100)
txt_color = (0, 0, 0) if np.mean(_COLORS[cls_id]) > 0.5 else (255, 255, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
txt_size = cv2.getTextSize(obj_text, font, 0.4, 1)[0]
if direction_model is not None and cls_id == 0: # 跑道的class id应该是0
delta_x = int((x1 - x0) * 0.2)
delta_y = int((y1 - y0) * 0.1)
x0_d = max(0, x0 - delta_x)
x1_d = min(len(img[0]) - 1, x1 + delta_x)
# y0 = y0 - delta_y
y1_d = min(len(img) - 1, y1 + delta_y)
logger.info("Runway Detected at [{}, {}, {}, {}]".format(x0, y0, x1, y1))
griding_num = 32
cls_num_per_lane = 45
runway_roi = img[y0:y1_d, x0_d:x1_d, :] # 看YOLOX代码,这里的图像是cv2读入的BGR格式图像。
runway_roi = cv2.cvtColor(runway_roi, cv2.COLOR_BGR2RGB) # 根据测试,此处需要CVT!!
cv2.imshow("slice", runway_roi)
roi_height = len(runway_roi)
roi_width = len(runway_roi[0])
image_ndarray = Image.fromarray(runway_roi)
image_tensor = runway_img_transforms(image_ndarray)
image_tensor = image_tensor.unsqueeze(0)
image_tensor = image_tensor.cuda()
with torch.no_grad():
direction_out = direction_model(image_tensor)
col_sample = np.linspace(0, roi_width, griding_num)
col_sample_w = col_sample[1] - col_sample[0]
out_classification = direction_out[0].data.cpu().numpy()
out_classification = out_classification[:, ::-1, :]
prob = scipy.special.softmax(out_classification[:-1, :, :], axis=0)
idx = np.arange(griding_num) + 1
idx = idx.reshape(-1, 1, 1)
loc = np.sum(prob * idx, axis=0)
out_classification = np.argmax(out_classification, axis=0)
loc[out_classification == griding_num] = 0
out_classification = loc
LR_model = LinearRegression()
for j in range(out_classification.shape[1]):
out_i = out_classification[:, j].reshape((-1, 1))
out_index = out_i != 0
if sum(out_index) != 0:
for k in range(out_classification.shape[0]):
if out_classification[k, j] > 0:
ppp = (round(out_classification[k, j] * col_sample_w -1 + x0_d),
round(runway_row_anchor[cls_num_per_lane - 1 - k] * roi_height / 448 + y0))
cv2.circle(img, ppp, 3, (0, 100 + j * 120, 0), -1)
activate_anchor = np.array(runway_row_anchor).reshape((-1, 1))[out_index].reshape((-1, 1))
activate_out = out_i[out_index].reshape((-1, 1))
LR_model.fit(activate_anchor, activate_out)
out_lr = np.squeeze(LR_model.predict(np.array(runway_row_anchor).reshape((-1, 1))))
cv2.line(img,
(int(out_lr[0] * col_sample_w - 1 + x0_d),
int(runway_row_anchor[-1] * roi_height / 448 + y0)),
(int(out_lr[-1] * col_sample_w - 1 + x0_d),
int(runway_row_anchor[0] * roi_height / 448 + y0)),
(255, 0, 0), 2)
cv2.rectangle(img, (x0_d, y0), (x1_d, y1_d), (0, 0, 255), 2)
cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)
txt_bk_color = (_COLORS[cls_id] * 255 * 0.7).astype(np.uint8).tolist()
cv2.rectangle(
img,
(x0, y0 + 1),
(x0 + txt_size[0] + 1, y0 + int(1.5 * txt_size[1])),
txt_bk_color,
-1
)
cv2.putText(img, obj_text, (x0, y0 + txt_size[1]), font, 0.4, txt_color, thickness=1)
cv2.putText(img, f'FPS:{track_fps}', (0, txt_size[1]), font, 0.4, txt_color, thickness=1)
cv2.putText(img, f'{text}', (0, 3 * txt_size[1]), font, 0.4, txt_color, thickness=1)
return img
_COLORS = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
0.000, 0.447, 0.741,
0.314, 0.717, 0.741,
0.50, 0.5, 0
]
).astype(np.float32).reshape(-1, 3)
| 36.92891
| 113
| 0.51386
|
6f0e0123a71db9c11e657bba9c232ef263546e67
| 56,424
|
py
|
Python
|
dataset/models/tf/base.py
|
mikhailkin/dataset
|
7417483fdbe2e3743af4d614cb9036fd5b1375c0
|
[
"Apache-2.0"
] | null | null | null |
dataset/models/tf/base.py
|
mikhailkin/dataset
|
7417483fdbe2e3743af4d614cb9036fd5b1375c0
|
[
"Apache-2.0"
] | null | null | null |
dataset/models/tf/base.py
|
mikhailkin/dataset
|
7417483fdbe2e3743af4d614cb9036fd5b1375c0
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=undefined-variable
""" Contains base class for tensorflow models """
import os
import glob
import re
import json
import threading
import contextlib
import numpy as np
import tensorflow as tf
from ... import is_best_practice, Config
from ..base import BaseModel
from .layers import mip, conv_block, upsample
from .train import piecewise_constant
LOSSES = {
'mse': tf.losses.mean_squared_error,
'bce': tf.losses.sigmoid_cross_entropy,
'ce': tf.losses.softmax_cross_entropy,
'crossentropy': tf.losses.softmax_cross_entropy,
'absolutedifference': tf.losses.absolute_difference,
'l1': tf.losses.absolute_difference,
'cosine': tf.losses.cosine_distance,
'cos': tf.losses.cosine_distance,
'hinge': tf.losses.hinge_loss,
'huber': tf.losses.huber_loss,
'logloss': tf.losses.log_loss,
}
DECAYS = {
'exp': tf.train.exponential_decay,
'invtime': tf.train.inverse_time_decay,
'naturalexp': tf.train.natural_exp_decay,
'const': piecewise_constant,
'poly': tf.train.polynomial_decay
}
class TFModel(BaseModel):
r""" Base class for all tensorflow models
**Configuration**
``build`` and ``load`` are inherited from :class:`.BaseModel`.
device : str or callable
if str, a device name (e.g. '/device:GPU:0').
if callable, a function which takes an operation and returns a device name for it.
See `tf.device <https://www.tensorflow.org/api_docs/python/tf/device>`_ for details.
session : dict
`Tensorflow session parameters <https://www.tensorflow.org/api_docs/python/tf/Session#__init__>`_.
inputs : dict
model inputs (see :meth:`._make_inputs`)
loss - a loss function, might be defined in one of three formats:
- name
- tuple (name, args)
- dict {'name': name, \**args}
where name might be one of:
- short name (`'mse'`, `'ce'`, `'l1'`, `'cos'`, `'hinge'`, `'huber'`, `'logloss'`)
- a function name from `tf.losses <https://www.tensorflow.org/api_docs/python/tf/losses>`_
(e.g. `'absolute_difference'` or `'sparse_softmax_cross_entropy'`)
- callable
If loss is callable, then it should add the result to a loss collection.
Otherwise, ``add_loss`` should be set to True. An optional collection might also be specified through
``loss_collection`` parameter.
.. note:: Losses from non-default collections won't be detected automatically,
so you should process them within your code.
Examples:
- ``{'loss': 'mse'}``
- ``{'loss': 'sigmoid_cross_entropy', 'label_smoothing': 1e-6}``
- ``{'loss': tf.losses.huber_loss, 'reduction': tf.losses.Reduction.MEAN}``
- ``{'loss': external_loss_fn_with_add_loss_inside}``
- ``{'loss': external_loss_fn_without_add_loss, 'add_loss': True}``
- ``{'loss': external_loss_fn_to_collection, 'add_loss': True, 'loss_collection': tf.GraphKeys.LOSSES}``
decay - a learning rate decay algorithm might be defined in one of three formats:
- name
- tuple (name, args)
- dict {'name': name, **args}
where name might be one of:
- short name ('exp', 'invtime', 'naturalexp', 'const', 'poly')
- a function name from `tf.train <https://www.tensorflow.org/api_docs/python/tf/train>`_
(e.g. 'exponential_decay')
- a callable
Examples:
- ``{'decay': 'exp'}``
- ``{'decay': ('polynomial_decay', {'decay_steps': 10000})}``
- ``{'decay': {'name': tf.train.inverse_time_decay, 'decay_rate': .5}``
optimizer - an optimizer might be defined in one of three formats:
- name
- tuple (name, args)
- dict {'name': name, \**args}
where name might be one of:
- short name (e.g. 'Adam', 'Adagrad', any optimizer from
`tf.train <https://www.tensorflow.org/api_docs/python/tf/train>`_ without a word `Optimizer`)
- a function name from `tf.train <https://www.tensorflow.org/api_docs/python/tf/train>`_
(e.g. 'FtlrOptimizer')
- a callable
Examples:
- ``{'optimizer': 'Adam'}``
- ``{'optimizer': ('Ftlr', {'learning_rate_power': 0})}``
- ``{'optimizer': {'name': 'Adagrad', 'initial_accumulator_value': 0.01}``
- ``{'optimizer': functools.partial(tf.train.MomentumOptimizer, momentum=0.95)}``
- ``{'optimizer': some_optimizer_fn}``
common : dict
default parameters for all :func:`.conv_block`
input_block : dict
parameters for the input block, usually :func:`.conv_block` parameters.
The only required parameter here is ``input_block/inputs`` which should contain a name or
a list of names from ``inputs`` which tensors will be passed to ``input_block`` as ``inputs``.
Examples:
- ``{'input_block/inputs': 'images'}``
- ``{'input_block': dict(inputs='features')}``
- ``{'input_block': dict(inputs='images', layout='nac nac', filters=64, kernel_size=[7, 3], strides=[1, 2])}``
body : dict
parameters for the base network layers, usually :func:`.conv_block` parameters
head : dict
parameters for the head layers, usually :func:`.conv_block` parameters
output : dict
predictions : str
operation to apply for body output tensor to make the network predictions.
The tensor's name is 'predictions' which is later used in the loss function.
ops : tuple of str
additional operations
prefix : str or tuple of str
prefixes for additional output tensor names (default='output')
Operations supported are:
- ``None`` - do nothing (identity)
- 'accuracy' - accuracy metrics (the share of ``true_labels == predicted_labels``)
- 'proba' - multiclass probabilities (softmax)
- 'labels' - most probable labels (argmax)
**How to create your own model**
#. Take a look at :func:`~.layers.conv_block` since it is widely used as a building block almost everywhere.
#. Define model defaults (e.g. number of filters, batch normalization options, etc)
by overriding :meth:`.TFModel.default_config`.
Or skip it and hard code all the parameters in unpredictable places without the possibility to
change them easily through model's config.
#. Define build configuration (e.g. number of classes, etc)
by overriding :meth:`~.TFModel.build_config`.
#. Override :meth:`~.TFModel.input_block`, :meth:`~.TFModel.body` and :meth:`~.TFModel.head`, if needed.
In many cases defaults and build config are just enough to build a network without additional code writing.
Things worth mentioning:
#. Input data and its parameters should be defined in configuration under ``inputs`` key.
See :meth:`._make_inputs` for details.
#. You might want to use a convenient multidimensional :func:`.conv_block`,
as well as :func:`~.layers.global_average_pooling`, :func:`~.layers.mip`, or other predefined layers.
Of course, you can use usual `tensorflow layers <https://www.tensorflow.org/api_docs/python/tf/layers>`_.
#. If you make dropout, batch norm, etc by hand, you might use a predefined ``self.is_training`` tensor.
#. For decay and training control there is a predefined ``self.global_step`` tensor.
#. In many cases there is no need to write a loss function, learning rate decay and optimizer
as they might be defined through config.
#. For a configured loss one of the inputs should have a name ``targets`` and
one of the tensors in your model should have a name ``predictions``.
They will be used in a loss function.
#. If you have defined your own loss function, call `tf.losses.add_loss(...)
<https://www.tensorflow.org/api_docs/python/tf/losses/add_loss>`_.
#. If you need to use your own optimizer, assign ``self.train_step`` to the train step operation.
Don't forget about `UPDATE_OPS control dependency
<https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization>`_.
"""
def __init__(self, *args, **kwargs):
self.session = kwargs.get('session', None)
self.graph = tf.Graph() if self.session is None else self.session.graph
self._graph_context = None
self.is_training = None
self.global_step = None
self.loss = None
self.train_step = None
self._train_lock = threading.Lock()
self._attrs = []
self._to_classes = {}
self._inputs = {}
self.inputs = None
super().__init__(*args, **kwargs)
def build(self, *args, **kwargs):
""" Build the model
#. Create a graph
#. Define ``is_training`` and ``global_step`` tensors
#. Define a model architecture by calling :meth:``._build``
#. Create a loss function from config
#. Create an optimizer and define a train step from config
#. `Set UPDATE_OPS control dependency on train step
<https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization>`_
#. Create a tensorflow session
"""
def _device_context():
if 'device' in self.config:
device = self.config.get('device')
context = self.graph.device(device)
else:
context = contextlib.ExitStack()
return context
with self.graph.as_default(), _device_context():
with tf.variable_scope(self.__class__.__name__):
with tf.variable_scope('globals'):
if self.is_training is None:
self.store_to_attr('is_training', tf.placeholder(tf.bool, name='is_training'))
if self.global_step is None:
self.store_to_attr('global_step', tf.Variable(0, trainable=False, name='global_step'))
config = self.build_config()
self._build(config)
if self.train_step is None:
self._make_loss(config)
self.store_to_attr('loss', tf.losses.get_total_loss())
optimizer = self._make_optimizer(config)
if optimizer:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_step = optimizer.minimize(self.loss, global_step=self.global_step)
self.store_to_attr('train_step', train_step)
else:
self.store_to_attr('train_step', self.train_step)
if self.session is None:
self.create_session(config)
self.reset()
def create_session(self, config=None):
""" Create TF session """
config = config if config is not None else self.config
session_config = config.get('session', default={})
self.session = tf.Session(**session_config)
def reset(self):
""" Reset the trained model to allow a new training from scratch """
with self.session.graph.as_default():
self.session.run(tf.global_variables_initializer())
def _make_inputs(self, names=None, config=None):
""" Create model input data from config provided
In the config's inputs section it looks for ``names``, creates placeholders required, and
makes some typical transformations (like one-hot-encoding), if needed.
**Configuration**
inputs : dict
- key : str
a placeholder name
- values : dict or tuple
each input's config
Input config:
``dtype`` : str or tf.DType (by default 'float32')
data type
``shape`` : int, tuple, list or None (default)
a tensor shape which includes the number of channels/classes and doesn't include a batch size.
``classes`` : array-like or None (default)
an array of class labels if data labels are strings or anything else except ``np.arange(num_classes)``
``data_format`` : str {``'channels_first'``, ``'channels_last'``} or {``'f'``, ``'l'``}
The ordering of the dimensions in the inputs. Default is 'channels_last'.
``transform`` : str or callable
Predefined transforms are
- ``'ohe'`` - one-hot encoding
- ``'mip @ d'`` - maximum intensity projection :func:`~.layers.mip`
with depth ``d`` (should be int)
``name`` : str
a name for the transformed and reshaped tensor.
If an input config is a tuple, it should contain all items exactly in the order shown above:
dtype, shape, classes, data_format, transform, name.
If an item is None, the default value will be used instead.
**How it works**
A placholder with ``dtype``, ``shape`` and with a name ``key`` is created first.
Then it is transformed with a ``transform`` function in accordance with ``data_format``.
The resulting tensor will have the name ``name``.
Parameters
----------
names : list
placeholder names that are expected in the config's 'inputs' section
Raises
------
KeyError if there is any name missing in the config's 'inputs' section.
ValueError if there are duplicate names.
Returns
-------
placeholders : dict
key : str
a placeholder name
value : tf.Tensor
placeholder tensor
tensors : dict
key : str
a placeholder name
value : tf.Tensor
an input tensor after transformations
"""
# pylint:disable=too-many-statements
config = config.get('inputs')
names = names or []
missing_names = set(names) - set(config.keys())
if len(missing_names) > 0:
raise KeyError("Inputs should contain {} names".format(missing_names))
placeholder_names = set(config.keys())
tensor_names = set(x.get('name') for x in config.values() if isinstance(x, dict) and x.get('name'))
wrong_names = placeholder_names & tensor_names
if len(wrong_names) > 0:
raise ValueError('Inputs contain duplicate names:', wrong_names)
param_names = ('dtype', 'shape', 'classes', 'data_format', 'transform', 'name')
defaults = dict(data_format='channels_last')
placeholders = dict()
tensors = dict()
for input_name, input_config in config.items():
if isinstance(input_config, (tuple, list)):
input_config = list(input_config) + [None for _ in param_names]
input_config = input_config[:len(param_names)]
input_config = dict(zip(param_names, input_config))
input_config = dict((k, v) for k, v in input_config.items() if v is not None)
input_config = {**defaults, **input_config}
reshape = None
shape = input_config.get('shape')
if isinstance(shape, int):
shape = (shape,)
if shape:
input_config['shape'] = shape
shape = [None] + list(shape)
self._inputs[input_name] = dict(config=input_config)
if self.has_classes(input_name):
dtype = input_config.get('dtype', tf.int64)
shape = shape or (None,)
else:
dtype = input_config.get('dtype', 'float')
tensor = tf.placeholder(dtype, shape, input_name)
placeholders[input_name] = tensor
self.store_to_attr(input_name, tensor)
if input_config.get('data_format') == 'l':
input_config['data_format'] = 'channels_last'
elif input_config.get('data_format') == 'f':
input_config['data_format'] = 'channels_first'
self._inputs[input_name] = dict(config=input_config)
tensor = self._make_transform(input_name, tensor, input_config)
if isinstance(reshape, (list, tuple)):
tensor = tf.reshape(tensor, [-1] + list(reshape))
name = input_config.get('name')
if name is not None:
tensor = tf.identity(tensor, name=name)
self.store_to_attr(name, tensor)
tensors[input_name] = tensor
self._inputs[input_name] = dict(config=input_config, placeholder=placeholders[input_name], tensor=tensor)
if name is not None:
self._inputs[name] = self._inputs[input_name]
self.inputs = tensors
return placeholders, tensors
def _make_transform(self, input_name, tensor, config):
if config is not None:
transform_names = config.get('transform')
if not isinstance(transform_names, list):
transform_names = [transform_names]
for transform_name in transform_names:
if isinstance(transform_name, str):
transforms = {
'ohe': self._make_ohe,
'mip': self._make_mip,
'mask_downsampling': self._make_mask_downsampling
}
kwargs = dict()
if transform_name.startswith('mip'):
parts = transform_name.split('@')
transform_name = parts[0].strip()
kwargs['depth'] = int(parts[1])
tensor = transforms[transform_name](input_name, tensor, config, **kwargs)
elif callable(transform_name):
tensor = transform_name(tensor)
elif transform_name:
raise ValueError("Unknown transform {}".format(transform_name))
return tensor
def _make_ohe(self, input_name, tensor, config):
if config.get('shape') is None and config.get('classes') is None:
raise ValueError("shape and classes cannot be both None for input " +
"'{}' with one-hot-encoding transform".format(input_name))
num_classes = self.num_classes(input_name)
axis = -1 if self.data_format(input_name) == 'channels_last' else 1
tensor = tf.one_hot(tensor, depth=num_classes, axis=axis)
return tensor
def _make_mask_downsampling(self, input_name, tensor, config):
""" Perform mask downsampling with factor from config of tensor. """
_ = input_name
factor = config.get('factor')
size = self.shape(tensor, False)
if None in size[1:]:
size = self.shape(tensor, True)
size = size / factor
size = tf.cast(size, tf.int32)
tensor = tf.expand_dims(tensor, -1)
tensor = tf.image.resize_nearest_neighbor(tensor, size)
tensor = tf.squeeze(tensor, [-1])
return tensor
def to_classes(self, tensor, input_name, name=None):
""" Convert tensor with labels to classes of ``input_name`` """
if tensor.dtype in [tf.float16, tf.float32, tf.float64]:
tensor = tf.argmax(tensor, axis=-1, name=name)
if self.has_classes(input_name):
self._to_classes.update({tensor: input_name})
return tensor
def _make_mip(self, input_name, tensor, config, depth):
# mip has to know shape
if config.get('shape') is None:
raise ValueError('mip transform requires shape specified in the inputs config')
if depth is None:
raise ValueError("mip should be specified as mip @ depth, e.g. 'mip @ 3'")
tensor = mip(tensor, depth=depth, data_format=self.data_format(input_name))
return tensor
def _unpack_fn_from_config(self, param, config=None):
par = config.get(param)
if par is None:
return None, {}
if isinstance(par, (tuple, list)):
if len(par) == 0:
par_name = None
elif len(par) == 1:
par_name, par_args = par[0], {}
elif len(par) == 2:
par_name, par_args = par
else:
par_name, par_args = par[0], par[1:]
elif isinstance(par, dict):
par = par.copy()
par_name, par_args = par.pop('name', None), par
else:
par_name, par_args = par, {}
return par_name, par_args
def _make_loss(self, config):
""" Return a loss function from config """
loss, args = self._unpack_fn_from_config('loss', config)
add_loss = False
if loss is None:
pass
elif isinstance(loss, str):
loss = LOSSES.get(re.sub('[-_ ]', '', loss).lower(), None)
elif isinstance(loss, str) and hasattr(tf.losses, loss):
loss = getattr(tf.losses, loss)
elif callable(loss):
pass
else:
raise ValueError("Unknown loss", loss)
if loss is None:
if len(tf.losses.get_losses()) == 0:
raise ValueError("Loss is not defined in the model %s" % self)
else:
try:
predictions = getattr(self, 'predictions')
except AttributeError:
raise KeyError("Model %s does not have 'predictions' tensor" % type(self).__name__)
try:
targets = getattr(self, 'targets')
except AttributeError:
raise KeyError("Model %s does not have 'targets' tensor" % type(self).__name__)
else:
add_loss = args.pop('add_loss', False)
if add_loss:
loss_collection = args.pop('loss_collection', None)
tensor_loss = loss(targets, predictions, **args)
if add_loss:
if loss_collection:
tf.losses.add_loss(tensor_loss, loss_collection)
else:
tf.losses.add_loss(tensor_loss)
def _make_decay(self, config):
decay_name, decay_args = self._unpack_fn_from_config('decay', config)
if decay_name is None:
pass
elif callable(decay_name):
pass
elif isinstance(decay_name, str) and hasattr(tf.train, decay_name):
decay_name = getattr(tf.train, decay_name)
elif decay_name in DECAYS:
decay_name = DECAYS.get(re.sub('[-_ ]', '', decay_name).lower(), None)
else:
raise ValueError("Unknown learning rate decay method", decay_name)
return decay_name, decay_args
def _make_optimizer(self, config):
optimizer_name, optimizer_args = self._unpack_fn_from_config('optimizer', config)
if optimizer_name is None or callable(optimizer_name):
pass
elif isinstance(optimizer_name, str) and hasattr(tf.train, optimizer_name):
optimizer_name = getattr(tf.train, optimizer_name)
elif isinstance(optimizer_name, str) and hasattr(tf.train, optimizer_name + 'Optimizer'):
optimizer_name = getattr(tf.train, optimizer_name + 'Optimizer')
else:
raise ValueError("Unknown optimizer", optimizer_name)
decay_name, decay_args = self._make_decay(config)
if decay_name is not None:
optimizer_args['learning_rate'] = decay_name(**decay_args, global_step=self.global_step)
if optimizer_name:
optimizer = optimizer_name(**optimizer_args)
else:
optimizer = None
return optimizer
def get_number_of_trainable_vars(self):
""" Return the number of trainable variable in the model graph """
arr = np.asarray([np.prod(v.get_shape().as_list()) for v in self.graph.get_collection('trainable_variables')])
return np.sum(arr)
def get_tensor_config(self, tensor, **kwargs):
""" Return tensor configuration
Parameters
----------
tensor : str or tf.Tensor
Returns
-------
dict
tensor config (see :meth:`._make_inputs`)
Raises
------
ValueError shape in tensor configuration isn't int, tuple or list
"""
if isinstance(tensor, tf.Tensor):
names = [n for n, i in self._inputs.items() if tensor in [i['placeholder'], i['tensor']]]
if len(names) > 0:
input_name = names[0]
else:
input_name = tensor.name
elif isinstance(tensor, str):
if tensor in self._inputs:
input_name = tensor
else:
input_name = self._map_name(tensor)
else:
raise TypeError("Tensor can be tf.Tensor or string, but given %s" % type(tensor))
if input_name in self._inputs:
config = self._inputs[input_name]['config']
shape = config.get('shape')
if isinstance(shape, int):
shape = (shape,)
if shape:
kwargs['shape'] = shape
elif isinstance(input_name, str):
try:
tensor = self.graph.get_tensor_by_name(input_name)
except KeyError:
config = {}
else:
shape = tensor.get_shape().as_list()[1:]
config = dict(dtype=tensor.dtype, shape=shape, name=tensor.name, data_format='channels_last')
else:
config = {}
config = {**config, **kwargs}
return config
def _map_name(self, name):
if isinstance(name, str):
if hasattr(self, name):
return getattr(self, name)
elif ':' in name:
return name
else:
tensors = tf.get_collection(name)
if len(tensors) != 0:
return tensors
return name + ':0'
return name
def _fill_feed_dict(self, feed_dict=None, is_training=True):
feed_dict = feed_dict or {}
_feed_dict = {}
for placeholder, value in feed_dict.items():
if self.has_classes(placeholder):
classes = self.classes(placeholder)
get_indices = np.vectorize(lambda c, arr=classes: np.where(c == arr)[0])
value = get_indices(value)
placeholder = self._map_name(placeholder)
value = self._map_name(value)
_feed_dict.update({placeholder: value})
if self.is_training not in _feed_dict:
_feed_dict.update({self.is_training: is_training})
return _feed_dict
def _fill_fetches(self, fetches=None, default=None):
fetches = fetches or default
if isinstance(fetches, str):
_fetches = self._map_name(fetches)
elif isinstance(fetches, (tuple, list)):
_fetches = []
for fetch in fetches:
_fetches.append(self._map_name(fetch))
elif isinstance(fetches, dict):
_fetches = dict()
for key, fetch in fetches.items():
_fetches.update({key: self._map_name(fetch)})
else:
_fetches = fetches
return _fetches
def _fill_output(self, output, fetches):
def _recast_output(out, ix=None):
if isinstance(out, np.ndarray):
fetch = fetches[ix] if ix is not None else fetches
if isinstance(fetch, str):
fetch = self.graph.get_tensor_by_name(fetch)
if fetch in self._to_classes:
return self.classes(self._to_classes[fetch])[out]
return out
if isinstance(output, (tuple, list)):
_output = []
for i, o in enumerate(output):
_output.append(_recast_output(o, i))
output = type(output)(_output)
elif isinstance(output, dict):
_output = type(output)()
for k, v in output.items():
_output.update({k: _recast_output(v, k)})
else:
output = _recast_output(output)
return output
def train(self, fetches=None, feed_dict=None, use_lock=False): # pylint: disable=arguments-differ
""" Train the model with the data provided
Parameters
----------
fetches : tuple, list
a sequence of `tf.Operation` and/or `tf.Tensor` to calculate
feed_dict : dict
input data, where key is a placeholder name and value is a numpy value
use_lock : bool
if True, the whole train step is locked, thus allowing for multithreading.
Returns
-------
Calculated values of tensors in `fetches` in the same structure
See also
--------
`Tensorflow Session run <https://www.tensorflow.org/api_docs/python/tf/Session#run>`_
"""
with self.graph.as_default():
_feed_dict = self._fill_feed_dict(feed_dict, is_training=True)
if fetches is None:
_fetches = tuple()
else:
_fetches = self._fill_fetches(fetches, default=None)
if use_lock:
self._train_lock.acquire()
_all_fetches = []
if self.train_step:
_all_fetches += [self.train_step]
if _fetches is not None:
_all_fetches += [_fetches]
if len(_all_fetches) > 0:
_, output = self.session.run(_all_fetches, feed_dict=_feed_dict)
else:
output = None
if use_lock:
self._train_lock.release()
return self._fill_output(output, _fetches)
def predict(self, fetches=None, feed_dict=None): # pylint: disable=arguments-differ
""" Get predictions on the data provided
Parameters
----------
fetches : tuple, list
a sequence of `tf.Operation` and/or `tf.Tensor` to calculate
feed_dict : dict
input data, where key is a placeholder name and value is a numpy value
Returns
-------
Calculated values of tensors in `fetches` in the same structure
Notes
-----
The only difference between `predict` and `train` is that `train` also executes a `train_step` operation
which involves calculating and applying gradients and thus chainging model weights.
See also
--------
`Tensorflow Session run <https://www.tensorflow.org/api_docs/python/tf/Session#run>`_
"""
with self.graph.as_default():
_feed_dict = self._fill_feed_dict(feed_dict, is_training=False)
_fetches = self._fill_fetches(fetches, default='predictions')
output = self.session.run(_fetches, _feed_dict)
return self._fill_output(output, _fetches)
def save(self, path, *args, **kwargs):
""" Save tensorflow model.
Parameters
----------
path : str
a path to a directory where all model files will be stored
Examples
--------
>>> tf_model = ResNet34()
Now save the model
>>> tf_model.save('/path/to/models/resnet34')
The model will be saved to /path/to/models/resnet34
"""
with self.graph.as_default():
if not os.path.exists(path):
os.makedirs(path)
saver = tf.train.Saver()
saver.save(self.session, os.path.join(path, 'model'), *args, global_step=self.global_step, **kwargs)
with open(os.path.join(path, 'attrs.json'), 'w') as f:
json.dump(self._attrs, f)
def load(self, path, graph=None, checkpoint=None, *args, **kwargs):
""" Load a TensorFlow model from files
Parameters
----------
path : str
a directory where a model is stored
graph : str
a filename for a metagraph file
checkpoint : str
a checkpoint file name or None to load the latest checkpoint
Examples
--------
>>> resnet = ResNet34(load=dict(path='/path/to/models/resnet34'))
>>> tf_model.load(path='/path/to/models/resnet34')
"""
_ = args, kwargs
self.graph = tf.Graph()
with self.graph.as_default():
if graph is None:
graph_files = glob.glob(os.path.join(path, '*.meta'))
graph_files = [os.path.splitext(os.path.basename(graph))[0] for graph in graph_files]
all_steps = []
for graph in graph_files:
try:
step = int(graph.split('-')[-1])
except ValueError:
pass
else:
all_steps.append(step)
graph = '-'.join(['model', str(max(all_steps))]) + '.meta'
graph_path = os.path.join(path, graph)
saver = tf.train.import_meta_graph(graph_path)
if checkpoint is None:
checkpoint_path = tf.train.latest_checkpoint(path)
else:
checkpoint_path = os.path.join(path, checkpoint)
self.create_session()
saver.restore(self.session, checkpoint_path)
with open(os.path.join(path, 'attrs.json'), 'r') as json_file:
self._attrs = json.load(json_file)
with self.graph.as_default():
for attr, graph_item in zip(self._attrs, tf.get_collection('attrs')):
setattr(self, attr, graph_item)
def store_to_attr(self, attr, graph_item):
""" Make a graph item (variable or operation) accessible as a model attribute """
with self.graph.as_default():
setattr(self, attr, graph_item)
self._attrs.append(attr)
tf.get_collection_ref('attrs').append(graph_item)
@classmethod
def crop(cls, inputs, shape_images, data_format='channels_last'):
""" Crop input tensor to a shape of a given image.
If shape_image has not fully defined shape (shape_image.get_shape() has at least one None),
the returned tf.Tensor will be of unknown shape except the number of channels.
Parameters
----------
inputs : tf.Tensor
input tensor
shape_images : tf.Tensor
a source images to which
data_format : str {'channels_last', 'channels_first'}
data format
"""
static_shape = cls.spatial_shape(shape_images, data_format, False)
dynamic_shape = cls.spatial_shape(shape_images, data_format, True)
if None in cls.shape(inputs) + static_shape:
return cls._dynamic_crop(inputs, static_shape, dynamic_shape, data_format)
else:
return cls._static_crop(inputs, static_shape, data_format)
@classmethod
def _static_crop(cls, inputs, shape, data_format='channels_last'):
input_shape = np.array(cls.spatial_shape(inputs, data_format))
if np.abs(input_shape - shape).sum() > 0:
begin = [0] * inputs.shape.ndims
if data_format == "channels_last":
size = [-1] + shape + [-1]
else:
size = [-1, -1] + shape
x = tf.slice(inputs, begin=begin, size=size)
else:
x = inputs
return x
@classmethod
def _dynamic_crop(cls, inputs, static_shape, dynamic_shape, data_format='channels_last'):
input_shape = cls.spatial_shape(inputs, data_format, True)
n_channels = cls.num_channels(inputs, data_format)
if data_format == 'channels_last':
slice_size = [(-1,), dynamic_shape, (n_channels,)]
output_shape = [None] * (len(static_shape) + 1) + [n_channels]
else:
slice_size = [(-1, n_channels), dynamic_shape]
output_shape = [None, n_channels] + [None] * len(static_shape)
begin = [0] * len(inputs.get_shape().as_list())
size = tf.concat(slice_size, axis=0)
cond = tf.reduce_sum(tf.abs(input_shape - dynamic_shape)) > 0
x = tf.cond(cond, lambda: tf.slice(inputs, begin=begin, size=size), lambda: inputs)
x.set_shape(output_shape)
return x
@classmethod
def input_block(cls, inputs, name='input_block', **kwargs):
""" Transform inputs with a convolution block
Parameters
----------
inputs : tf.Tensor
input tensor
name : str
scope name
Notes
-----
For other parameters see :func:`.conv_block`.
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('input_block', **kwargs)
if kwargs.get('layout'):
return conv_block(inputs, name=name, **kwargs)
return inputs
@classmethod
def body(cls, inputs, name='body', **kwargs):
""" Base layers which produce a network embedding
Parameters
----------
inputs : tf.Tensor
input tensor
name : str
scope name
Notes
-----
For other parameters see :func:`.conv_block`.
Returns
-------
tf.Tensor
Examples
--------
::
MyModel.body(2, inputs, layout='ca ca ca', filters=[128, 256, 512], kernel_size=3)
"""
kwargs = cls.fill_params('body', **kwargs)
if kwargs.get('layout'):
return conv_block(inputs, name=name, **kwargs)
return inputs
@classmethod
def head(cls, inputs, name='head', **kwargs):
""" Last network layers which produce output from the network embedding
Parameters
----------
inputs : tf.Tensor
input tensor
name : str
scope name
Notes
-----
For other parameters see :func:`.conv_block`.
Returns
-------
tf.Tensor
Examples
--------
A fully convolutional head with 3x3 and 1x1 convolutions and global max pooling:
MyModel.head(2, network_embedding, layout='cacaP', filters=[128, num_classes], kernel_size=[3, 1])
A fully connected head with dropouts, a dense layer with 1000 units and final dense layer with class logits::
MyModel.head(2, network_embedding, layout='dfadf', units=[1000, num_classes], dropout_rate=.15)
"""
kwargs = cls.fill_params('head', **kwargs)
if kwargs.get('layout'):
return conv_block(inputs, name=name, **kwargs)
return inputs
def output(self, inputs, ops=None, prefix=None, **kwargs):
""" Add output operations to a model graph, like predictions, quality metrics, etc.
Parameters
----------
inputs : tf.Tensor or a sequence of tf.Tensors
input tensors
ops : a sequence of str or callable
operation names::
- 'sigmoid' - add ``sigmoid(inputs)``
- 'proba' - add ``softmax(inputs)``
- 'labels' - add ``argmax(inputs)``
- 'accuracy' - add ``mean(predicted_labels == true_labels)``
- callable - add an arbitrary operation
prefix : a sequence of str
a prefix for each input if there are multiple inputs
Raises
------
ValueError if the number of outputs does not equal to the number of prefixes
TypeError if inputs is not a Tensor or a sequence of Tensors
"""
kwargs = self.fill_params('output', **kwargs)
predictions_op = self.pop('predictions', kwargs, default=None)
if ops is None:
ops = []
elif not isinstance(ops, (list, tuple)):
ops = [ops]
if not isinstance(inputs, (tuple, list)):
inputs = [inputs]
prefix = prefix or 'output'
prefix = [prefix]
if len(inputs) != len(prefix):
raise ValueError('Each output in multiple output models should have its own prefix')
for i, tensor in enumerate(inputs):
if not isinstance(tensor, tf.Tensor):
raise TypeError("Network output is expected to be a Tensor, but given {}".format(type(inputs)))
current_prefix = prefix[i]
if current_prefix:
ctx = tf.variable_scope(current_prefix)
ctx.__enter__()
else:
ctx = None
attr_prefix = current_prefix + '_' if current_prefix else ''
pred_prefix = '' if len(inputs) == 1 else attr_prefix
self._add_output_op(tensor, predictions_op, 'predictions', pred_prefix, **kwargs)
for oper in ops:
self._add_output_op(tensor, oper, oper, attr_prefix, **kwargs)
if ctx:
ctx.__exit__(None, None, None)
def _add_output_op(self, inputs, oper, name, attr_prefix, **kwargs):
if oper is None:
self._add_output_identity(inputs, name, attr_prefix, **kwargs)
elif oper == 'sigmoid':
self._add_output_sigmoid(inputs, name, attr_prefix, **kwargs)
elif oper == 'proba':
self._add_output_proba(inputs, name, attr_prefix, **kwargs)
elif oper == 'labels':
self._add_output_labels(inputs, name, attr_prefix, **kwargs)
elif oper == 'accuracy':
self._add_output_accuracy(inputs, name, attr_prefix, **kwargs)
elif callable(oper):
self._add_output_callable(inputs, oper, None, attr_prefix, **kwargs)
def _add_output_identity(self, inputs, name, attr_prefix, **kwargs):
_ = kwargs
x = tf.identity(inputs, name=name)
self.store_to_attr(attr_prefix + name, x)
return x
def _add_output_sigmoid(self, inputs, name, attr_prefix, **kwargs):
_ = kwargs
proba = tf.sigmoid(inputs, name=name)
self.store_to_attr(attr_prefix + name, proba)
def _add_output_proba(self, inputs, name, attr_prefix, **kwargs):
axis = self.channels_axis(kwargs['data_format'])
proba = tf.nn.softmax(inputs, name=name, dim=axis)
self.store_to_attr(attr_prefix + name, proba)
def _add_output_labels(self, inputs, name, attr_prefix, **kwargs):
channels_axis = self.channels_axis(kwargs.get('data_format'))
predicted_labels = tf.argmax(inputs, axis=channels_axis, name=name)
self.store_to_attr(attr_prefix + name, predicted_labels)
def _add_output_accuracy(self, inputs, name, attr_prefix, **kwargs):
channels_axis = self.channels_axis(kwargs.get('data_format'))
true_labels = tf.argmax(self.targets, axis=channels_axis)
if not hasattr(self, attr_prefix + 'labels'):
self._add_output_labels(inputs, 'labels', attr_prefix, **kwargs)
x = getattr(self, attr_prefix + 'labels')
x = tf.cast(x, true_labels.dtype)
x = tf.cast(tf.equal(true_labels, x), 'float')
accuracy = tf.reduce_mean(x, axis=channels_axis, name=name)
self.store_to_attr(attr_prefix + name, accuracy)
def _add_output_callable(self, inputs, oper, name, attr_prefix, **kwargs):
_ = kwargs
x = oper(inputs)
name = name or oper.__name__
self.store_to_attr(attr_prefix + name, x)
return x
@classmethod
def default_config(cls):
""" Define model defaults
You need to override this method if you expect your model or its blocks to serve as a base for other models
(e.g. VGG for FCN, ResNet for LinkNet, etc).
Put here all constants (like the number of filters, kernel sizes, block layouts, strides, etc)
specific to the model, but independent of anything else (like image shapes, number of classes, etc).
These defaults can be changed in `.build_config` or when calling `Pipeline.init_model`.
Usually, it looks like::
@classmethod
def default_config(cls):
config = TFModel.default_config()
config['input_block'].update(dict(layout='cnap', filters=16, kernel_size=7, strides=2,
pool_size=3, pool_strides=2))
config['body']['filters'] = 32
config['head'].update(dict(layout='cnadV', dropout_rate=.2))
return config
"""
config = {}
config['inputs'] = {}
config['common'] = {}
config['input_block'] = {}
config['body'] = {}
config['head'] = {}
config['output'] = {}
config['optimizer'] = ('Adam', dict())
if is_best_practice():
config['common'] = {'batch_norm': {'momentum': .1}}
config['optimizer'][1].update({'use_locking': True})
return Config(config)
@classmethod
def fill_params(cls, _name, **kwargs):
""" Fill block params from default config and kwargs """
config = cls.default_config()
_config = config.get(_name)
config = {**config['common'], **_config, **kwargs}
return config
def build_config(self, names=None):
""" Define a model architecture configuration
It takes just 2 steps:
#. Define names for all placeholders and make input tensors by calling ``super().build_config(names)``.
If the model config does not contain any name from ``names``, :exc:`KeyError` is raised.
See :meth:`._make_inputs` for details.
#. Define parameters for :meth:`.TFModel.input_block`, :meth:`.TFModel.body`, :meth:`.TFModel.head`
which depend on inputs.
#. Don't forget to return ``config``.
Typically it looks like this::
def build_config(self, names=None):
names = names or ['images', 'labels']
config = super().build_config(names)
config['head']['num_classes'] = self.num_classes('targets')
return config
"""
config = self.default_config()
config = config + self.config
if config.get('inputs'):
with tf.variable_scope('inputs'):
self._make_inputs(names, config)
inputs = self.get('input_block/inputs', config)
if isinstance(inputs, str):
config['common/data_format'] = self.data_format(inputs)
config['input_block/inputs'] = self.inputs[inputs]
elif isinstance(inputs, list):
config['input_block/inputs'] = [self.inputs[name] for name in inputs]
else:
raise ValueError('input_block/inputs should be specified with a name or a list of names.')
return config
def _build(self, config=None):
defaults = {'is_training': self.is_training, **config['common']}
config['input_block'] = {**defaults, **config['input_block']}
config['body'] = {**defaults, **config['body']}
config['head'] = {**defaults, **config['head']}
config['output'] = {**defaults, **config['output']}
x = self.input_block(**config['input_block'])
x = self.body(inputs=x, **config['body'])
output = self.head(inputs=x, **config['head'])
self.output(output, **config['output'])
@classmethod
def channels_axis(cls, data_format):
""" Return the channels axis for the tensor
Parameters
----------
data_format : str {'channels_last', 'channels_first'}
Returns
-------
number of channels : int
"""
return 1 if data_format == "channels_first" or data_format.startswith("NC") else -1
def data_format(self, tensor, **kwargs):
""" Return the tensor data format (channels_last or channels_first)
Parameters
----------
tensor : str or tf.Tensor
Returns
-------
data_format : str
"""
config = self.get_tensor_config(tensor, **kwargs)
return config.get('data_format')
def has_classes(self, tensor):
""" Check if a tensor has classes defined in the config """
config = self.get_tensor_config(tensor)
has = config.get('classes') is not None
return has
def classes(self, tensor):
""" Return the number of classes """
config = self.get_tensor_config(tensor)
classes = config.get('classes')
if isinstance(classes, int):
return np.arange(classes)
return np.asarray(classes)
def num_classes(self, tensor):
""" Return the number of classes """
if self.has_classes(tensor):
classes = self.classes(tensor)
return classes if isinstance(classes, int) else len(classes)
return self.get_num_channels(tensor)
def get_num_channels(self, tensor, **kwargs):
""" Return the number of channels in the tensor
Parameters
----------
tensor : str or tf.Tensor
Returns
-------
number of channels : int
"""
config = self.get_tensor_config(tensor, **kwargs)
shape = (None,) + config.get('shape')
channels_axis = self.channels_axis(tensor, **kwargs)
return shape[channels_axis] if shape else None
@classmethod
def num_channels(cls, tensor, data_format='channels_last'):
""" Return number of channels in the input tensor
Parameters
----------
tensor : tf.Tensor
Returns
-------
shape : tuple of ints
"""
shape = tensor.get_shape().as_list()
axis = cls.channels_axis(data_format)
return shape[axis]
def get_shape(self, tensor, **kwargs):
""" Return the tensor shape without batch dimension
Parameters
----------
tensor : str or tf.Tensor
Returns
-------
shape : tuple
"""
config = self.get_tensor_config(tensor, **kwargs)
return config.get('shape')
@classmethod
def shape(cls, tensor, dynamic=False):
""" Return shape of the input tensor without batch size
Parameters
----------
tensor : tf.Tensor
dynamic : bool
if True, returns tensor which represents shape. If False, returns list of ints and/or Nones
Returns
-------
shape : tf.Tensor or list
"""
if dynamic:
shape = tf.shape(tensor)
else:
shape = tensor.get_shape().as_list()
return shape[1:]
def get_spatial_dim(self, tensor, **kwargs):
""" Return the tensor spatial dimensionality (without batch and channels dimensions)
Parameters
----------
tensor : str or tf.Tensor
Returns
-------
number of spatial dimensions : int
"""
config = self.get_tensor_config(tensor, **kwargs)
return len(config.get('shape')) - 1
@classmethod
def spatial_dim(cls, tensor):
""" Return spatial dim of the input tensor (without channels and batch dimension)
Parameters
----------
tensor : tf.Tensor
Returns
-------
dim : int
"""
return len(tensor.get_shape().as_list()) - 2
def get_spatial_shape(self, tensor, **kwargs):
""" Return the tensor spatial shape (without batch and channels dimensions)
Parameters
----------
tensor : str or tf.Tensor
Returns
-------
spatial shape : tuple
"""
config = self.get_tensor_config(tensor, **kwargs)
data_format = config.get('data_format')
shape = config.get('shape')[:-1] if data_format == 'channels_last' else config.get('shape')[1:]
return shape
@classmethod
def spatial_shape(cls, tensor, data_format='channels_last', dynamic=False):
""" Return spatial shape of the input tensor
Parameters
----------
tensor : tf.Tensor
dynamic : bool
if True, returns tensor which represents shape. If False, returns list of ints and/or Nones
Returns
-------
shape : tf.Tensor or list
"""
if dynamic:
shape = tf.shape(tensor)
else:
shape = tensor.get_shape().as_list()
axis = slice(1, -1) if data_format == "channels_last" else slice(2, None)
return shape[axis]
def get_batch_size(self, tensor):
""" Return batch size (the length of the first dimension) of the input tensor
Parameters
----------
tensor : str or tf.Tensor
Returns
-------
batch size : int or None
"""
if isinstance(tensor, tf.Tensor):
pass
elif isinstance(tensor, str):
if tensor in self._inputs:
tensor = self._inputs[tensor]['placeholder']
else:
input_name = self._map_name(tensor)
if input_name in self._inputs:
tensor = self._inputs[input_name]
else:
tensor = self.graph.get_tensor_by_name(input_name)
else:
raise TypeError("Tensor can be tf.Tensor or string, but given %s" % type(tensor))
return tensor.get_shape().as_list()[0]
@classmethod
def batch_size(cls, tensor):
""" Return batch size (the length of the first dimension) of the input tensor
Parameters
----------
tensor : tf.Tensor
Returns
-------
batch size : int or None
"""
return tensor.get_shape().as_list()[0]
@classmethod
def se_block(cls, inputs, ratio, name='se', **kwargs):
""" Squeeze and excitation block
Hu J. et al. "`Squeeze-and-Excitation Networks <https://arxiv.org/abs/1709.01507>`_"
Parameters
----------
inputs : tf.Tensor
input tensor
ratio : int
squeeze ratio for the number of filters
Returns
-------
tf.Tensor
"""
with tf.variable_scope(name):
data_format = kwargs.get('data_format')
in_filters = cls.num_channels(inputs, data_format)
x = conv_block(inputs,
**{**kwargs, 'layout': 'Vfafa', 'units': [in_filters//ratio, in_filters],
'name': 'se', 'activation': [tf.nn.relu, tf.nn.sigmoid]})
shape = [-1] + [1] * (cls.spatial_dim(inputs) + 1)
axis = cls.channels_axis(data_format)
shape[axis] = in_filters
scale = tf.reshape(x, shape)
x = inputs * scale
return x
@classmethod
def upsample(cls, inputs, factor=None, layout='b', name='upsample', **kwargs):
""" Upsample input tensor
Parameters
----------
inputs : tf.Tensor or tuple of two tf.Tensor
a tensor to resize and a tensor which size to resize to
factor : int
an upsamping scale
layout : str
resizing technique, a sequence of:
- R - use residual connection with bilinear additive upsampling (must be the first symbol)
- b - bilinear resize
- B - bilinear additive upsampling
- N - nearest neighbor resize
- t - transposed convolution
- X - subpixel convolution
Returns
-------
tf.Tensor
"""
if np.all(factor == 1):
return inputs
resize_to = None
if isinstance(inputs, (list, tuple)):
x, resize_to = inputs
else:
x = inputs
inputs = None
if kwargs.get('filters') is None:
kwargs['filters'] = cls.num_channels(x, kwargs['data_format'])
x = upsample(x, factor=factor, layout=layout, name=name, **kwargs)
if resize_to is not None:
x = cls.crop(x, resize_to, kwargs['data_format'])
return x
| 36.638961
| 118
| 0.578708
|
e8fae19c6e76fad7ae0b826725845b45908db8e2
| 2,069
|
py
|
Python
|
sdk/redhatopenshift/azure-mgmt-redhatopenshift/azure/mgmt/redhatopenshift/v2022_04_01/models/__init__.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/redhatopenshift/azure-mgmt-redhatopenshift/azure/mgmt/redhatopenshift/v2022_04_01/models/__init__.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/redhatopenshift/azure-mgmt-redhatopenshift/azure/mgmt/redhatopenshift/v2022_04_01/models/__init__.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import APIServerProfile
from ._models_py3 import CloudErrorBody
from ._models_py3 import ClusterProfile
from ._models_py3 import ConsoleProfile
from ._models_py3 import Display
from ._models_py3 import IngressProfile
from ._models_py3 import MasterProfile
from ._models_py3 import NetworkProfile
from ._models_py3 import OpenShiftCluster
from ._models_py3 import OpenShiftClusterAdminKubeconfig
from ._models_py3 import OpenShiftClusterCredentials
from ._models_py3 import OpenShiftClusterList
from ._models_py3 import OpenShiftClusterUpdate
from ._models_py3 import Operation
from ._models_py3 import OperationList
from ._models_py3 import Resource
from ._models_py3 import ServicePrincipalProfile
from ._models_py3 import SystemData
from ._models_py3 import TrackedResource
from ._models_py3 import WorkerProfile
from ._azure_red_hat_open_shift_client_enums import (
CreatedByType,
EncryptionAtHost,
FipsValidatedModules,
ProvisioningState,
Visibility,
)
__all__ = [
'APIServerProfile',
'CloudErrorBody',
'ClusterProfile',
'ConsoleProfile',
'Display',
'IngressProfile',
'MasterProfile',
'NetworkProfile',
'OpenShiftCluster',
'OpenShiftClusterAdminKubeconfig',
'OpenShiftClusterCredentials',
'OpenShiftClusterList',
'OpenShiftClusterUpdate',
'Operation',
'OperationList',
'Resource',
'ServicePrincipalProfile',
'SystemData',
'TrackedResource',
'WorkerProfile',
'CreatedByType',
'EncryptionAtHost',
'FipsValidatedModules',
'ProvisioningState',
'Visibility',
]
| 31.348485
| 94
| 0.724505
|
b9cea0e13ee553f4234ff6bcdae860f394aaa700
| 1,067
|
py
|
Python
|
example.py
|
HW-MLCS/Python-RVO2
|
502eed2e6656772728b7f9bdacf9423cf84a6801
|
[
"Apache-2.0"
] | null | null | null |
example.py
|
HW-MLCS/Python-RVO2
|
502eed2e6656772728b7f9bdacf9423cf84a6801
|
[
"Apache-2.0"
] | null | null | null |
example.py
|
HW-MLCS/Python-RVO2
|
502eed2e6656772728b7f9bdacf9423cf84a6801
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import rvo2
sim = rvo2.PyRVOSimulator(1/60., 1.5, 5, 1.5, 2, 0.4, 2)
# Pass either just the position (the other parameters then use
# the default values passed to the PyRVOSimulator constructor),
# or pass all available parameters.
a0 = sim.addAgent((0, 0))
a1 = sim.addAgent((1, 0))
a2 = sim.addAgent((1, 1))
a3 = sim.addAgent((0, 1), 1.5, 5, 1.5, 2, 0.4, 2, (0, 0))
# Obstacles are also supported.
o1 = sim.addObstacle([(0.1, 0.1), (-0.1, 0.1), (-0.1, -0.1)])
sim.processObstacles()
sim.setAgentPrefVelocity(a0, (1, 1))
sim.setAgentPrefVelocity(a1, (-1, 1))
sim.setAgentPrefVelocity(a2, (-1, -1))
sim.setAgentPrefVelocity(a3, (1, -1))
print('Simulation has %i agents and %i obstacle vertices in it.' %
(sim.getNumAgents(), sim.getNumObstacleVertices()))
print('Running simulation')
for step in range(100):
sim.doStep()
positions = ['(%5.3f, %5.3f)' % sim.getAgentPosition(agent_no)
for agent_no in (a0, a1, a2, a3)]
print('step=%2i t=%.3f %s' % (step, sim.getGlobalTime(), ' '.join(positions)))
| 29.638889
| 85
| 0.643861
|
edd6c317e0bd4bab3340dc468d458ccf83773036
| 10,782
|
py
|
Python
|
utils.py
|
Yixin-Liu-323/SUBLIME
|
f8ac29a124b60e80557a9e405e9c3d784e52279a
|
[
"MIT"
] | null | null | null |
utils.py
|
Yixin-Liu-323/SUBLIME
|
f8ac29a124b60e80557a9e405e9c3d784e52279a
|
[
"MIT"
] | null | null | null |
utils.py
|
Yixin-Liu-323/SUBLIME
|
f8ac29a124b60e80557a9e405e9c3d784e52279a
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn.functional as F
from sklearn.neighbors import kneighbors_graph
import dgl
from sklearn import metrics
from munkres import Munkres
EOS = 1e-10
def apply_non_linearity(tensor, non_linearity, i):
if non_linearity == 'elu':
return F.elu(tensor * i - i) + 1
elif non_linearity == 'relu':
return F.relu(tensor)
elif non_linearity == 'none':
return tensor
else:
raise NameError('We dont support the non-linearity yet')
def split_batch(init_list, batch_size):
groups = zip(*(iter(init_list),) * batch_size)
end_list = [list(i) for i in groups]
count = len(init_list) % batch_size
end_list.append(init_list[-count:]) if count != 0 else end_list
return end_list
def edge_deletion(adj, drop_r):
edge_index = np.array(np.nonzero(adj))
half_edge_index = edge_index[:, edge_index[0,:] < edge_index[1,:]]
num_edge = half_edge_index.shape[1]
samples = np.random.choice(num_edge, size=int(drop_r * num_edge), replace=False)
dropped_edge_index = half_edge_index[:, samples].T
adj[dropped_edge_index[:,0],dropped_edge_index[:,1]] = 0.
adj[dropped_edge_index[:,1],dropped_edge_index[:,0]] = 0.
return adj
def edge_addition(adj, add_r):
edge_index = np.array(np.nonzero(adj))
half_edge_index = edge_index[:, edge_index[0,:] < edge_index[1,:]]
num_edge = half_edge_index.shape[1]
num_node = adj.shape[0]
added_edge_index_in = np.random.choice(num_node, size=int(add_r * num_edge), replace=True)
added_edge_index_out = np.random.choice(num_node, size=int(add_r * num_edge), replace=True)
adj[added_edge_index_in,added_edge_index_out] = 1.
adj[added_edge_index_out,added_edge_index_in] = 1.
return adj
def get_random_mask(features, r, nr):
nones = torch.sum(features > 0.0).float()
nzeros = features.shape[0] * features.shape[1] - nones
pzeros = nones / nzeros / r * nr
probs = torch.zeros(features.shape).cuda()
probs[features == 0.0] = pzeros
probs[features > 0.0] = 1 / r
mask = torch.bernoulli(probs)
return mask
def get_node_mask(features, mask_rate):
num_node = features.shape[0]
mask = torch.zeros(features.shape)
samples = np.random.choice(num_node, size=int(num_node * mask_rate), replace=False)
mask[samples, :] = 1
return mask.cuda(), samples
def get_feat_mask(features, mask_rate):
feat_node = features.shape[1]
mask = torch.zeros(features.shape)
samples = np.random.choice(feat_node, size=int(feat_node * mask_rate), replace=False)
mask[:, samples] = 1
return mask.cuda(), samples
def get_random_mask_ogb(features, r):
probs = torch.full(features.shape, 1 / r)
mask = torch.bernoulli(probs)
return mask
def accuracy(preds, labels):
pred_class = torch.max(preds, 1)[1]
return torch.sum(torch.eq(pred_class, labels)).float() / labels.shape[0]
def nearest_neighbors(X, k, metric):
adj = kneighbors_graph(X, k, metric=metric)
adj = np.array(adj.todense(), dtype=np.float32)
adj += np.eye(adj.shape[0])
return adj
def nearest_neighbors_sparse(X, k, metric):
adj = kneighbors_graph(X, k, metric=metric)
loop = np.arange(X.shape[0])
[s_, d_, val] = sp.find(adj)
s = np.concatenate((s_, loop))
d = np.concatenate((d_, loop))
return s, d
def nearest_neighbors_pre_exp(X, k, metric, i):
adj = kneighbors_graph(X, k, metric=metric)
adj = np.array(adj.todense(), dtype=np.float32)
adj += np.eye(adj.shape[0])
adj = adj * i - i
return adj
def nearest_neighbors_pre_elu(X, k, metric, i):
adj = kneighbors_graph(X, k, metric=metric)
adj = np.array(adj.todense(), dtype=np.float32)
adj += np.eye(adj.shape[0])
adj = adj * i - i
return adj
def normalize(adj, mode, sparse=False):
if not sparse:
if mode == "sym":
inv_sqrt_degree = 1. / (torch.sqrt(adj.sum(dim=1, keepdim=False)) + EOS)
return inv_sqrt_degree[:, None] * adj * inv_sqrt_degree[None, :]
elif mode == "row":
inv_degree = 1. / (adj.sum(dim=1, keepdim=False) + EOS)
return inv_degree[:, None] * adj
else:
exit("wrong norm mode")
else:
adj = adj.coalesce()
if mode == "sym":
inv_sqrt_degree = 1. / (torch.sqrt(torch.sparse.sum(adj, dim=1).values()))
D_value = inv_sqrt_degree[adj.indices()[0]] * inv_sqrt_degree[adj.indices()[1]]
elif mode == "row":
aa = torch.sparse.sum(adj, dim=1)
bb = aa.values()
inv_degree = 1. / (torch.sparse.sum(adj, dim=1).values() + EOS)
D_value = inv_degree[adj.indices()[0]]
else:
exit("wrong norm mode")
new_values = adj.values() * D_value
return torch.sparse.FloatTensor(adj.indices(), new_values, adj.size())
def symmetrize(adj): # only for non-sparse
return (adj + adj.T) / 2
def cal_similarity_graph(node_embeddings):
similarity_graph = torch.mm(node_embeddings, node_embeddings.t())
return similarity_graph
def top_k(raw_graph, K):
values, indices = raw_graph.topk(k=int(K), dim=-1)
assert torch.max(indices) < raw_graph.shape[1]
mask = torch.zeros(raw_graph.shape).cuda()
mask[torch.arange(raw_graph.shape[0]).view(-1, 1), indices] = 1.
mask.requires_grad = False
sparse_graph = raw_graph * mask
return sparse_graph
def knn_fast(X, k, b):
X = F.normalize(X, dim=1, p=2)
index = 0
values = torch.zeros(X.shape[0] * (k + 1)).cuda()
rows = torch.zeros(X.shape[0] * (k + 1)).cuda()
cols = torch.zeros(X.shape[0] * (k + 1)).cuda()
norm_row = torch.zeros(X.shape[0]).cuda()
norm_col = torch.zeros(X.shape[0]).cuda()
while index < X.shape[0]:
if (index + b) > (X.shape[0]):
end = X.shape[0]
else:
end = index + b
sub_tensor = X[index:index + b]
similarities = torch.mm(sub_tensor, X.t())
vals, inds = similarities.topk(k=k + 1, dim=-1)
values[index * (k + 1):(end) * (k + 1)] = vals.view(-1)
cols[index * (k + 1):(end) * (k + 1)] = inds.view(-1)
rows[index * (k + 1):(end) * (k + 1)] = torch.arange(index, end).view(-1, 1).repeat(1, k + 1).view(-1)
norm_row[index: end] = torch.sum(vals, dim=1)
norm_col.index_add_(-1, inds.view(-1), vals.view(-1))
index += b
norm = norm_row + norm_col
rows = rows.long()
cols = cols.long()
values *= (torch.pow(norm[rows], -0.5) * torch.pow(norm[cols], -0.5))
return rows, cols, values
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def torch_sparse_to_dgl_graph(torch_sparse_mx):
torch_sparse_mx = torch_sparse_mx.coalesce()
indices = torch_sparse_mx.indices()
values = torch_sparse_mx.values()
rows_, cols_ = indices[0,:], indices[1,:]
dgl_graph = dgl.graph((rows_, cols_), num_nodes=torch_sparse_mx.shape[0], device='cuda')
dgl_graph.edata['w'] = values.detach().cuda()
return dgl_graph
def dgl_graph_to_torch_sparse(dgl_graph):
values = dgl_graph.edata['w'].cpu().detach()
rows_, cols_ = dgl_graph.edges()
indices = torch.cat((torch.unsqueeze(rows_, 0), torch.unsqueeze(cols_, 0)), 0).cpu()
torch_sparse_mx = torch.sparse.FloatTensor(indices, values)
return torch_sparse_mx
def torch_sparse_eye(num_nodes):
indices = torch.arange(num_nodes).repeat(2, 1)
values = torch.ones(num_nodes)
return torch.sparse.FloatTensor(indices, values)
class clustering_metrics():
def __init__(self, true_label, predict_label):
self.true_label = true_label
self.pred_label = predict_label
def clusteringAcc(self):
# best mapping between true_label and predict label
l1 = list(set(self.true_label))
numclass1 = len(l1)
l2 = list(set(self.pred_label))
numclass2 = len(l2)
if numclass1 != numclass2:
print('Class Not equal, Error!!!!')
return 0, 0, 0, 0, 0, 0, 0
cost = np.zeros((numclass1, numclass2), dtype=int)
for i, c1 in enumerate(l1):
mps = [i1 for i1, e1 in enumerate(self.true_label) if e1 == c1]
for j, c2 in enumerate(l2):
mps_d = [i1 for i1 in mps if self.pred_label[i1] == c2]
cost[i][j] = len(mps_d)
# match two clustering results by Munkres algorithm
m = Munkres()
cost = cost.__neg__().tolist()
indexes = m.compute(cost)
# get the match results
new_predict = np.zeros(len(self.pred_label))
for i, c in enumerate(l1):
# correponding label in l2:
c2 = l2[indexes[i][1]]
# ai is the index with label==c2 in the pred_label list
ai = [ind for ind, elm in enumerate(self.pred_label) if elm == c2]
new_predict[ai] = c
acc = metrics.accuracy_score(self.true_label, new_predict)
f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro')
precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro')
recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro')
f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro')
precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro')
recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro')
return acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro
def evaluationClusterModelFromLabel(self, print_results=True):
nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label)
adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label)
acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro = self.clusteringAcc()
if print_results:
print('ACC={:.4f}, f1_macro={:.4f}, precision_macro={:.4f}, recall_macro={:.4f}, f1_micro={:.4f}, '
.format(acc, f1_macro, precision_macro, recall_macro, f1_micro) +
'precision_micro={:.4f}, recall_micro={:.4f}, NMI={:.4f}, ADJ_RAND_SCORE={:.4f}'
.format(precision_micro, recall_micro, nmi, adjscore))
return acc, nmi, f1_macro, adjscore
| 36.924658
| 116
| 0.643294
|
e4a026323ce58c59a5b943db8ff54bdfed23e955
| 17,621
|
py
|
Python
|
bdgym/envs/driver_assistant/action.py
|
RDLLab/benevolent-deception-gym
|
4d04e097609097e0f07c661aac221184ebdec2fe
|
[
"MIT"
] | null | null | null |
bdgym/envs/driver_assistant/action.py
|
RDLLab/benevolent-deception-gym
|
4d04e097609097e0f07c661aac221184ebdec2fe
|
[
"MIT"
] | null | null | null |
bdgym/envs/driver_assistant/action.py
|
RDLLab/benevolent-deception-gym
|
4d04e097609097e0f07c661aac221184ebdec2fe
|
[
"MIT"
] | null | null | null |
"""Action class for BDGym Highway Autopilot environment """
from typing import TYPE_CHECKING, Callable, Tuple, List, Dict
import numpy as np
from gym import spaces
from highway_env import utils
from highway_env.envs.common.abstract import Observation
from highway_env.envs.common.action import \
ContinuousAction, Action, action_factory, ActionType
from bdgym.envs.driver_assistant.vehicle import DriverAssistantVehicle
if TYPE_CHECKING:
from bdgym.envs.driver_assistant.env import DriverAssistantEnv
class DriverAssistantAction(ActionType):
"""Joint Driver and Assistant Action Type
Essentially wraps:
0. AssistantContinuousAction, AssistantContinuousOffsetAction or
AssistantDiscreteActionSpace for the Assistant, and
1. DriverDiscreteAction or DriverContinuousAction for the Driver
"""
def __init__(self,
env: 'DriverAssistantEnv',
action_config: dict) -> None:
self.env = env
self.action_config = action_config
self.driver_action_type = driver_action_factory(
env, action_config.get("driver", {})
)
self.assistant_action_type = assistant_action_factory(
env, action_config["assistant"]
)
def space(self) -> spaces.Space:
return spaces.Tuple([self.driver_space(), self.assistant_space()])
def reset(self):
"""Reset the action space following an episode """
self.assistant_action_type.reset()
self.driver_action_type.reset()
@property
def vehicle_class(self) -> Callable:
return DriverAssistantVehicle
@property
def last_assistant_action(self) -> Action:
"""The last action performed by the Assistant """
return self.assistant_action_type.last_action
@property
def last_driver_action(self) -> Action:
"""The last action performed by the Driver """
return self.driver_action_type.last_action
def driver_space(self) -> spaces.Space:
"""Get the driver action space """
return self.driver_action_type.space()
def assistant_space(self) -> spaces.Space:
"""Get the assistant action space """
return self.assistant_action_type.space()
def act(self, action: Action) -> None:
if self.env.next_agent == self.env.ASSISTANT_IDX:
return self.assistant_act(action)
return self.driver_act(action)
def driver_act(self, action: Action) -> None:
"""Execute driver action """
self.driver_action_type.act(action)
def assistant_act(self, action: Action) -> None:
"""Execute assistant action """
self.assistant_action_type.act(action)
class DiscreteActionType(ActionType):
"""Base class for discrete action spaces """
STEP_SIZE_MAP = {
'x': 0.001,
'y': 0.05,
'vx': 0.05,
'vy': 0.05,
# 'acceleration': 0.05,
'acceleration': 1,
# 'steering': 0.025
'steering': 0.1
}
"""This step size of each action
This is the proportion of the max range of a variable, so the actual
step size will vary depending on the variable being affected.
The proportion is set per variable since the range of each variable
can be significantly different.
"""
NOOP = 0
UP = 1
DOWN = 2
"""Integer values of each discrete action """
def __init__(self,
env: 'DriverAssistantEnv',
features_range: Dict[str, Tuple[float, float]],
feature_indices: Dict[str, int],
action_space_shape: List[int]):
self.env = env
self.features_range = features_range
self.feature_indices = feature_indices
self.space_shape = action_space_shape
# This is the last action as unnormalized, continuous assistant action
self.last_action = np.full(
len(self.space_shape),
self.NOOP,
dtype=np.float32
)
self.feature_step_size = {}
for feature, step_proportion in self.STEP_SIZE_MAP.items():
if feature not in self.features_range:
continue
feat_range = self.features_range[feature]
max_range = feat_range[1] - feat_range[0]
self.feature_step_size[feature] = step_proportion*max_range
def space(self) -> spaces.MultiDiscrete:
""" Implements ActionType.space() """
return spaces.MultiDiscrete(self.space_shape)
def reset(self) -> None:
"""Reset the action space following an episode """
self.last_action = np.full(
len(self.space_shape),
self.NOOP,
dtype=np.float32
)
@property
def vehicle_class(self) -> Callable:
return DriverAssistantVehicle
def act(self, action: Action) -> None:
raise NotImplementedError
def get_controls(self, action: Action) -> Tuple[float, float]:
"""Get the continuous controls from discrete actions """
controls = []
acc_action = action[self.feature_indices['acceleration']]
steering_action = action[self.feature_indices['steering']]
acc_step = self.feature_step_size['acceleration']
steering_step = self.feature_step_size['steering']
for f_action, f_step in zip(
[acc_action, steering_action],
[acc_step, steering_step]
):
if f_action == self.UP:
delta = f_step
elif f_action == self.DOWN:
delta = -1 * f_step
else:
delta = 0
controls.append(delta)
return controls[0], controls[1]
def normalize_action(self, action: Action) -> Action:
"""Convert action from absolute to proportional """
assert len(action) == len(self.feature_indices)
norm_interval = [-1.0, 1.0]
absolute_action = np.ones_like(action, dtype=np.float32)
for feature, f_idx in self.feature_indices.items():
frange = self.features_range[feature]
absolute_action[f_idx] = utils.lmap(
action[f_idx], frange, norm_interval
)
return absolute_action
class DriverDiscreteAction(DiscreteActionType):
"""Discrete Action space for the Driver.
Note, this is different to the HighwayEnv.DiscreteMetaAction action space.
This action space essentially discretizes the continuous 'acceleration'
and 'steering' driver actions.
Type: MultiDiscrete([3, 3])
Num Action Space Actions
0 acceleration NOOP[0], UP[1], DOWN[2] - params: min: 0, max: 2
1 steering NOOP[0], UP[1], DOWN[2] - params: min: 0, max: 2
The 'acceleration' and 'steering' action space actions have the effect
of recommending to the driver to steer and/or accelerate up, down, or
no change for the step the action is applied.
"""
DRIVER_DISCRETE_ACTION_SPACE_SIZE = 2
DRIVER_DISCRETE_ACTION_SPACE_SHAPE = [3, 3]
DRIVER_DISCRETE_ACTION_INDICES = {
'acceleration': 0,
'steering': 1
}
def __init__(self,
env: 'DriverAssistantEnv',
features_range: dict,
**kwargs) -> None:
super().__init__(
env,
features_range,
self.DRIVER_DISCRETE_ACTION_INDICES,
self.DRIVER_DISCRETE_ACTION_SPACE_SHAPE
)
def act(self, action: Action) -> None:
""" Overrides parent
Assumes action is normalized
"""
controls = self.get_controls(action)
self.last_action = np.array(controls)
self.env.vehicle.act({
"acceleration": action[0], "steering": action[1]
})
class DriverContinuousAction(ContinuousAction):
"""Continuous action space for driver.
This is simply the HighwayEnv.ContinuousAction class with some extra
functions implemented so it is compatible with BDGym
"""
def reset(self):
"""Reset the action space following an episode """
self.last_action = np.zeros(self.space().shape, dtype=np.float32)
def space(self) -> spaces.Box:
size = 2 if self.lateral and self.longitudinal else 1
return spaces.Box(
np.float32(-1), np.float32(1), shape=(size,), dtype=np.float32
)
class AssistantContinuousAction(ContinuousAction):
"""Continuous action space for assistant.
This includes continuous actions for the assistant signal sent
to the driver which includes: ['x', 'y', 'vx', 'vy'] of the vehicle.
It also includes assistant recommendation for the drivers next action in
terms of throttle and steering.
The space intervals are always [-1, 1], but mapped to the proper values in
the environment step function, as needed.
"""
ASSISTANT_ACTION_SPACE_SIZE = 6
ASSISTANT_ACTION_INDICES = {
'x': 0,
'y': 1,
'vx': 2,
'vy': 3,
'acceleration': 4,
'steering': 5
}
def __init__(self,
env: 'DriverAssistantEnv',
features_range: dict,
**config) -> None:
super().__init__(env, **config)
self.features_range = features_range
def space(self) -> spaces.Box:
""" Overrides ContinousAction.space() """
shape = (self.ASSISTANT_ACTION_SPACE_SIZE,)
return spaces.Box(
np.float32(-1), np.float32(1), shape=shape, dtype=np.float32
)
def reset(self):
"""Reset the action space following an episode """
self.last_action = np.zeros(
self.ASSISTANT_ACTION_SPACE_SIZE, dtype=np.float32
)
def act(self, action: Action) -> None:
""" Overrides parent """
self.last_action = self.unnormalize_action(action)
def normalize_action(self, action: Action) -> Action:
"""Convert action from absolute to proportional """
norm_interval = [-1.0, 1.0]
absolute_action = np.ones_like(action, dtype=np.float32)
for i, frange in enumerate(self.features_range.values()):
absolute_action[i] = utils.lmap(
action[i], frange, norm_interval
)
return absolute_action
def unnormalize_action(self, action: Action) -> Action:
"""Convert action from proportional to absolute """
norm_interval = [-1.0, 1.0]
absolute_action = np.ones_like(action, dtype=np.float32)
for i, frange in enumerate(self.features_range.values()):
absolute_action[i] = utils.lmap(
action[i], norm_interval, frange
)
return absolute_action
class AssistantContinuousOffsetAction(AssistantContinuousAction):
"""Continuous action space for Assistant.
Same as AssistantContinuousAction except this time the action specifies
how much to offset each signal from the value observed by the assistant,
and how much to offset from 0.0 for the acceleration and steering
recommendation.
For the recommendation this is essentially unchanged from
AssistantContinuousAction.
"""
def get_last_ego_obs(self) -> Observation:
"""Get the last assistant observation for ego vehicle """
last_obs = self.env.observation_type.get_last_assistant_frame()
# include only first row which is the observation of controlled vehicle
# also exclude first column, which indicated 'presence'
return last_obs[0, 1:]
def act(self, action: Action) -> None:
""" Overrides parent """
abs_offset_action = self.unnormalize_action(action)
ego_obs = self.get_last_ego_obs()
abs_signal = ego_obs + abs_offset_action[:4]
abs_action = np.concatenate([abs_signal, abs_offset_action[4:]])
self.last_action = abs_action
class AssistantDiscreteAction(DiscreteActionType):
"""Discrete Action space for Assistant.
This is a MultiDiscrete Action space, where each action is a combination
of 6 sub actions, similar to the continuous Assistant actions:
Type: MultiDiscrete([3, 3, 3, 3, 3, 3])
Num Action Space Actions
0 x NOOP[0], UP[1], DOWN[2] - params: min: 0, max: 2
1 y NOOP[0], UP[1], DOWN[2] - params: min: 0, max: 2
2 vx NOOP[0], UP[1], DOWN[2] - params: min: 0, max: 2
3 vy NOOP[0], UP[1], DOWN[2] - params: min: 0, max: 2
4 acceleration NOOP[0], UP[1], DOWN[2] - params: min: 0, max: 2
5 steering NOOP[0], UP[1], DOWN[2] - params: min: 0, max: 2
For the ['x', 'y', 'vx', 'vy'] action spaces the actions have the
effect of shifting the current offset/distortion being applied to the
observation by a fixed amount depending on the feature
(see the AssistantDiscreteActionSpace.STEP_SIZE_MAP for exact values).
The 'acceleration' and 'steering' action space actions have the effect
of recommending to the driver to steer and/or accelerate up, down, or
no change for the step the action is applied.
NOTE: Using this action space affects the observation space of the
assistant. Specifically, it adds an extra row at the top of the
observation matrix which is the current offset.
"""
ASSISTANT_DISCRETE_ACTION_SPACE_SIZE = 6
ASSISTANT_DISCRETE_ACTION_SPACE_SHAPE = [3, 3, 3, 3, 3, 3]
ASSISTANT_DISCRETE_ACTION_INDICES = {
'x': 0,
'y': 1,
'vx': 2,
'vy': 3,
'acceleration': 4,
'steering': 5
}
OFFSET_FEATURES = ['x', 'y', 'vx', 'vy']
"""List of feature that offset is applied too """
def __init__(self,
env: 'DriverAssistantEnv',
features_range: dict,
**kwargs) -> None:
super().__init__(
env,
features_range,
self.ASSISTANT_DISCRETE_ACTION_INDICES,
self.ASSISTANT_DISCRETE_ACTION_SPACE_SHAPE
)
# The current unnormalized offset
self.current_offset = np.zeros(len(self.OFFSET_FEATURES))
def reset(self):
"""Reset the action space following an episode """
super().reset()
self.current_offset = np.zeros(len(self.OFFSET_FEATURES))
def act(self, action: Action) -> None:
""" Overrides parent
Assumes action is normalized
"""
if action is not None:
self._update_current_offset(action)
recommendation = self.get_controls(action)
else:
recommendation = self.last_action[len(self.OFFSET_FEATURES):]
last_obs = self.get_last_ego_obs()
abs_action = np.zeros(len(self.ASSISTANT_DISCRETE_ACTION_INDICES))
abs_action[:len(self.OFFSET_FEATURES)] = self.current_offset + last_obs
abs_action[len(self.OFFSET_FEATURES):] = recommendation
self.last_action = abs_action
def _update_current_offset(self, action: Action) -> None:
for feature in self.OFFSET_FEATURES:
f_idx = self.ASSISTANT_DISCRETE_ACTION_INDICES[feature]
f_action = action[f_idx]
if f_action == self.UP:
delta = self.feature_step_size[feature]
elif f_action == self.DOWN:
delta = -1 * self.feature_step_size[feature]
else:
delta = 0
self.current_offset[f_idx] += delta
self.current_offset[f_idx] = np.clip(
self.current_offset[f_idx],
self.features_range[feature][0],
self.features_range[feature][1]
)
def get_last_ego_obs(self) -> Observation:
"""Get the last assistant observation for ego vehicle """
last_obs = self.env.last_assistant_obs
# include only first row which is the observation of controlled vehicle
# also exclude first column, which indicated 'presence'
return last_obs[self.env.observation_type.ASSISTANT_EGO_ROW, 1:]
def get_normalized_offset(self) -> np.ndarray:
"""Get the current offset in normalized form (values in [-1.0, 1.0])
Returns
-------
np.ndarray
Current offset with normalized values
"""
norm_interval = [-1.0, 1.0]
norm_offset = np.zeros(len(self.OFFSET_FEATURES))
for feature in self.OFFSET_FEATURES:
f_idx = self.ASSISTANT_DISCRETE_ACTION_INDICES[feature]
f_range = self.features_range[feature]
norm_offset[f_idx] = utils.lmap(
self.current_offset[f_idx], f_range, norm_interval
)
return norm_offset
def assistant_action_factory(env: 'DriverAssistantEnv',
config: dict) -> ActionType:
"""Factory for assistant action type """
if config["type"] == "AssistantContinuousAction":
return AssistantContinuousAction(env, **config)
if config["type"] == "AssistantContinuousOffsetAction":
return AssistantContinuousOffsetAction(env, **config)
if config["type"] == "AssistantDiscreteAction":
return AssistantDiscreteAction(env, **config)
return action_factory(env, config)
def driver_action_factory(env: 'DriverAssistantEnv',
config: dict) -> ActionType:
"""Factory for driver action type """
if config['type'] == "DriverDiscreteAction":
return DriverDiscreteAction(env, **config)
if config['type'] == "DriverContinuousAction":
return DriverContinuousAction(env, **config)
return action_factory(env, config)
| 35.887984
| 79
| 0.631462
|
a00c561a3a36993eff149ab2ff136225c76ae706
| 197
|
py
|
Python
|
hackday/assets/admin.py
|
mpirnat/hackday
|
82a13665e2d93c80d6dfb1a1d5602495d01a243e
|
[
"MIT"
] | 4
|
2015-05-21T19:44:09.000Z
|
2019-12-03T11:07:03.000Z
|
hackday/assets/admin.py
|
mpirnat/hackday
|
82a13665e2d93c80d6dfb1a1d5602495d01a243e
|
[
"MIT"
] | 3
|
2020-02-11T21:47:49.000Z
|
2021-06-10T17:28:44.000Z
|
hackday/assets/admin.py
|
mpirnat/hackday
|
82a13665e2d93c80d6dfb1a1d5602495d01a243e
|
[
"MIT"
] | 2
|
2016-01-05T13:39:09.000Z
|
2017-02-19T09:31:58.000Z
|
from hackday.assets.models import Attachment, ImageAttachment, Link
from django.contrib import admin
admin.site.register(Attachment)
admin.site.register(ImageAttachment)
admin.site.register(Link)
| 28.142857
| 67
| 0.84264
|
0ebb41f8b3ffa9546507ac3f620bb1b1fe1b0956
| 2,937
|
py
|
Python
|
main.py
|
vishalpolley/Handwritten-Digit-Recognition-using-Neural-Networks
|
871722b747a4b45c324a706a46a0294a8034a702
|
[
"MIT"
] | null | null | null |
main.py
|
vishalpolley/Handwritten-Digit-Recognition-using-Neural-Networks
|
871722b747a4b45c324a706a46a0294a8034a702
|
[
"MIT"
] | null | null | null |
main.py
|
vishalpolley/Handwritten-Digit-Recognition-using-Neural-Networks
|
871722b747a4b45c324a706a46a0294a8034a702
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from PIL import Image
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
n_train = mnist.train.num_examples # Taining data
n_validation = mnist.validation.num_examples # Validation data
n_test = mnist.test.num_examples # Testing data
n_input = 784 # Input layer (28X28 pixels)
n_hidden1 = 512 # 1st hidden layer
n_hidden2 = 256 # 2nd hidden layer
n_hidden3 = 128 # 3rd hidden layer
n_output = 10 # Output layer (0-9 digits)
learning_rate = 1e-4
n_iterations = 1000
batch_size = 128
dropout = 0.5
X = tf.placeholder("float", [None, n_input])
Y = tf.placeholder("float", [None, n_output])
keep_prob = tf.placeholder(tf.float32) # Control dropout rate
weights = {
'w1': tf.Variable(tf.truncated_normal([n_input, n_hidden1], stddev=0.1)),
'w2': tf.Variable(tf.truncated_normal([n_hidden1, n_hidden2], stddev=0.1)),
'w3': tf.Variable(tf.truncated_normal([n_hidden2, n_hidden3], stddev=0.1)),
'out': tf.Variable(tf.truncated_normal([n_hidden3, n_output], stddev=0.1)),
}
biases = {
'b1': tf.Variable(tf.constant(0.1, shape=[n_hidden1])),
'b2': tf.Variable(tf.constant(0.1, shape=[n_hidden2])),
'b3': tf.Variable(tf.constant(0.1, shape=[n_hidden3])),
'out': tf.Variable(tf.constant(0.1, shape=[n_output]))
}
layer_1 = tf.add(tf.matmul(X, weights['w1']), biases['b1'])
layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])
layer_3 = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])
layer_drop = tf.nn.dropout(layer_3, keep_prob)
output_layer = tf.matmul(layer_3, weights['out']) + biases['out']
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=output_layer)) # Loss function
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy) # Gradient descent optimization
correct_pred = tf.equal(tf.argmax(output_layer, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# Training on mini batches
for i in range(n_iterations):
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(train_step, feed_dict={X: batch_x, Y: batch_y, keep_prob:dropout})
# Print loss and accuracy (per minibatch)
if i%100 == 0:
minibatch_loss, minibatch_accuracy = sess.run([cross_entropy, accuracy], feed_dict={X: batch_x, Y: batch_y, keep_prob: 1.0})
print("Iteration", str(i), "\t| Loss =", str(minibatch_loss), "\t| Accuracy =", str(minibatch_accuracy))
test_accuracy = sess.run(accuracy, feed_dict={X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1.0})
print("\nAccuracy on test set:", test_accuracy)
# Testing over sample image
img = np.invert(Image.open("test_img.png").convert('L')).ravel()
prediction = sess.run(tf.argmax(output_layer, 1), feed_dict={X: [img]})
print("Prediction for test image:", np.squeeze(prediction))
| 39.689189
| 126
| 0.735785
|
02f4dc23c5a817d0277273e1386fe72814a5e744
| 9,902
|
py
|
Python
|
old/FastFourierTransform/Rotate_by_complex.py
|
Tony031218/manim-projects
|
b243dec0f0a007649a92938e90d60eccb4c7dd15
|
[
"Apache-2.0"
] | 45
|
2019-10-08T23:58:20.000Z
|
2020-05-20T03:49:15.000Z
|
old/FastFourierTransform/Rotate_by_complex.py
|
Tony031218/manim-projects
|
b243dec0f0a007649a92938e90d60eccb4c7dd15
|
[
"Apache-2.0"
] | null | null | null |
old/FastFourierTransform/Rotate_by_complex.py
|
Tony031218/manim-projects
|
b243dec0f0a007649a92938e90d60eccb4c7dd15
|
[
"Apache-2.0"
] | 12
|
2019-08-15T08:07:22.000Z
|
2020-05-09T12:34:14.000Z
|
from manimlib.imports import *
class Angle(VGroup):
CONFIG = {
'radius': 1,
'color': RED,
'opacity': 0.4,
'stroke_width': 10,
# 'below_180': True,
}
def __init__(self, A, O, B, **kwargs):
VMobject.__init__(self, **kwargs)
OA, OB = A-O, B-O
theta = np.angle(complex(*OA[:2])/complex(*OB[:2])) # angle of OB to OA
self.add(Arc(start_angle=Line(O, B).get_angle(), angle=theta, radius=self.radius/2,
stroke_width=100 * self.radius, color=self.color).set_stroke(opacity=self.opacity).move_arc_center_to(O))
self.add(Arc(start_angle=Line(O, B).get_angle(), angle=theta, radius=self.radius,
stroke_width=self.stroke_width, color=self.color).move_arc_center_to(O))
class Unit_root(Scene):
def construct(self):
## Create ComplexPlane ##
cp_scale = 1.75
cp = ComplexPlane().scale(cp_scale)
cp.add_coordinates(0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -2, -3, -4, -5)
cp.add_coordinates(1j, 2j, 3j, -1j, -2j, -3j)
### about z^n ###
color_dict = {'z': PINK, 'x': BLUE, 'y': YELLOW, 'i': RED, '\\cos': BLUE, '\\sin': YELLOW, '\\theta}': BLUE,
'r': PINK, 'e': GREEN, 'n': YELLOW, 'k': YELLOW, '\\omega': PINK, '\\pi': BLUE}
complex_z = 0.9+0.6j
vect_z = Arrow(cp.n2p(0), cp.n2p(complex_z), buff=0, color=ORANGE)
dot_z = Dot(cp.n2p(complex_z), color=PINK)
angle_z = Angle(cp.n2p(1), cp.n2p(0), cp.n2p(complex_z), radius=0.6, color=BLUE)
## 3 forms of complex num
xy_form = TexMobject('z', '=', 'x', '+', 'y', 'i').set_color_by_tex_to_color_map(color_dict)
cs_form = TexMobject('z', '=', 'r', '(', '\\cos{', '\\theta}', '+', 'i', '\\sin{', '\\theta}', ')').set_color_by_tex_to_color_map(color_dict)
exp_form = TexMobject('z', '=', 'r', 'e^{', 'i', '\\theta}', color=WHITE).set_color_by_tex_to_color_map(color_dict).scale(1.2)
exp_form[-1].set_color(BLUE)
xy_form.next_to(dot_z, RIGHT * 0.6)
cs_form.next_to(dot_z, RIGHT * 0.6)
exp_form.next_to(dot_z, RIGHT * 0.6).shift(UP * 0.25)
## vgroup for z_i
vect_group = VGroup(vect_z)
dot_group = VGroup(dot_z)
text_group = VGroup(exp_form)
angle_group = VGroup(angle_z)
line_group = VGroup(Line(cp.n2p(1), cp.n2p(complex_z), color=PINK))
n = 10
for i in range(n-1):
zn_1 = complex_z ** (i+2-1)
zn = complex_z ** (i+2)
dot_i = Dot(cp.n2p(zn), color=PINK)
vect_i = Arrow(cp.n2p(0), cp.n2p(zn), buff=0, color=ORANGE)
text_i = TexMobject('z^{', '%d}' % (i+2), color=PINK).shift(cp.n2p(zn)/abs(zn) * (abs(zn) + 0.25))
angle_i = Angle(cp.n2p(zn_1), cp.n2p(0), cp.n2p(zn), radius=0.6, color=BLUE)
vect_group.add(vect_i)
dot_group.add(dot_i)
text_group.add(text_i)
angle_group.add(angle_i)
line_group.add(VGroup(Line(cp.n2p(zn_1), cp.n2p(zn), color=PINK)))
### conclusions from z^n =1 ###
text_zn = TexMobject('z^', 'n', '=', 'r^', 'n', 'e^{', 'n', '\\theta', 'i}', '=', '1').set_color_by_tex_to_color_map(color_dict)
text_zn[7].set_color(BLUE)
text_zn.scale(1.2).to_corner(RIGHT * 3.25 + UP * 1.2)
right_arrow = TexMobject('\\Rightarrow').next_to(text_zn, DOWN * 3.75).align_to(text_zn, LEFT)
text_01 = TexMobject('r', '=', '1').set_color_by_tex_to_color_map(color_dict).next_to(right_arrow, RIGHT * 2.4).shift(UP * 0.5)
text_02 = TexMobject('n', '\\theta', '=', '2', 'k', '\\pi').set_color_by_tex_to_color_map(color_dict).next_to(right_arrow, RIGHT * 2.4).shift(DOWN * 0.5)
text_12 = VGroup(text_01, text_02)
brace = Brace(text_12, LEFT)
text_03 = TexMobject('\\therefore', '\\omega^', 'n', '=', '1', '\\text{的}', 'n', '\\text{个根为:}',)\
.set_color_by_tex_to_color_map(color_dict).next_to(text_02, DOWN * 1.4).align_to(text_zn, LEFT)
text_wi_01 = TexMobject('\\omega', '_k', '=', 'e^{', 'i', '{2', 'k', '\\pi', '\\over', 'n}}',
).set_color_by_tex_to_color_map(color_dict)
text_wi_01.next_to(text_03, DOWN * 1.5).align_to(text_zn, LEFT)
text_wi_02 = TexMobject('=', '\\cos{', '2', 'k', '\\pi', '\\over', 'n}', '+', 'i', '\\sin{',
'2', 'k', '\\pi', '\\over', 'n}').set_color_by_tex_to_color_map(color_dict)
text_wi_02.next_to(text_wi_01, DOWN * 1.5).align_to(text_zn, LEFT)
text_wi_02[1:].scale(0.9)
text_k = TexMobject('(', 'k', '=', '0', ',', '1', ',', '2', ',','\\cdots', ',', 'n-1', ')').set_color_by_tex_to_color_map(color_dict)
text_k.scale(0.75).next_to(text_wi_02, DOWN * 1.5).align_to(text_zn, LEFT)
### display w_i in unit circle ###
# moved to animation part 3 #
### animation part 1 ###
self.play(ShowCreation(cp))
self.wait(1)
self.play(ShowCreation(vect_z))
self.wait(0.5)
self.play(ShowCreation(dot_z))
self.play(Write(xy_form))
self.wait(1)
self.play(ReplacementTransform(xy_form, cs_form))
self.wait(1)
self.play(ReplacementTransform(cs_form, exp_form))
self.wait()
self.play(ShowCreation(angle_z))
# self.add(vect_group, text_group, dot_group, angle_group, line_group)
for i in range(1, n):
self.play(ShowCreation(vect_group[i]), run_time=0.8)
self.play(ShowCreation(dot_group[i]), run_time=0.4)
self.play(Write(text_group[i]), run_time=0.6)
self.wait(0.2)
self.play(ShowCreation(angle_group[i]), run_time=0.6)
self.wait(0.4)
self.wait()
for i in range(0, n):
self.play(ShowCreation(line_group[i]), run_time=0.4)
self.wait(0.1)
self.wait()
all_exist = VGroup(cp, vect_group, text_group, dot_group, angle_group, line_group)
self.play(all_exist.shift, cp.n2p(-2), run_time=1.5)
self.wait()
### part 2 ###
text_bg = Polygon(cp.n2p(2.6+2.2j), cp.n2p(5.8+2.2j), cp.n2p(5.8-2.2j), cp.n2p(2.6-2.2j),
stroke_width=0, fill_color=BLACK, fill_opacity=0.75)
self.play(FadeIn(text_bg), run_time=1.2)
self.wait(0.5)
self.play(TransformFromCopy(text_group, text_zn[0:9]), run_time=1.2)
self.wait()
self.play(Write(text_zn[9:11]))
self.wait()
self.play(Write(right_arrow))
self.play(ShowCreation(brace))
self.play(TransformFromCopy(text_zn[3:5], text_01))
self.wait()
self.play(TransformFromCopy(text_zn[6:8], text_02[0:2]))
self.play(Write(text_02[2:6]))
self.wait()
self.play(Write(text_03), run_time=2)
self.wait(0.5)
self.play(Write(text_wi_01), run_time=2)
self.wait()
self.play(Write(text_wi_02), run_time=3)
self.wait()
self.play(Write(text_k), run_time=2)
self.wait(2)
### part 3 ###
unit_circle = Circle(radius=cp.n2p(1)[0], color=BLUE_B).move_to(cp.n2p(0))
self.play(ShowCreation(unit_circle))
self.wait(0.5)
z_new = np.exp(1j * TAU/11)
w_1 = TexMobject('\\omega', '_1', '=', 'e^{', 'i', '{2', '\\pi', '\\over', 'n}}',).scale(0.9)\
.set_color_by_tex_to_color_map(color_dict).move_to(cp.n2p(0)).shift((cp.n2p(z_new)-cp.n2p(0))*1.2+RIGHT*1.2)
dot_1 = Dot(cp.n2p(z_new), color=PINK)
vect_1 = Arrow(cp.n2p(0), cp.n2p(z_new), buff=0, color=ORANGE)
line_1 = Line(cp.n2p(1), cp.n2p(z_new), color=PINK)
dot_0 = Dot(cp.n2p(1), color=PINK)
vect_0 = Arrow(cp.n2p(0), cp.n2p(1), buff=0, color=ORANGE)
w_0 = TexMobject('\\omega', '_0', color=PINK).scale(0.8).move_to(cp.n2p(1.2))
self.play(ShowCreation(vect_0))
self.play(ShowCreation(dot_0), Write(w_0))
self.play(ReplacementTransform(vect_group[0], vect_1), run_time=0.3)
self.play(ReplacementTransform(dot_group[0], dot_1), run_time=0.3)
self.play(ReplacementTransform(text_group[0], w_1), run_time=0.3)
self.play(ReplacementTransform(line_group[0], line_1), run_time=0.3)
vect_new, dot_new, line_new, text_new = VGroup(vect_1), VGroup(dot_1), VGroup(line_1), VGroup(w_1)
for i in range(1, n):
zn_1 = z_new ** (i+1-1)
zn = z_new ** (i+1)
dot_i = Dot(cp.n2p(zn), color=PINK)
vect_i = Arrow(cp.n2p(0), cp.n2p(zn), buff=0, color=ORANGE)
text_i = TexMobject('\\omega_{', '%d}' % (i+1), color=PINK).scale(0.8).move_to(cp.n2p(0)).shift((cp.n2p(zn)-cp.n2p(0))/abs(zn) * (abs(zn) + 0.2))
line_i = Line(cp.n2p(zn_1), cp.n2p(zn), color=PINK)
angle_i = Angle(cp.n2p(zn_1), cp.n2p(0), cp.n2p(zn), radius=0.6, color=BLUE)
vect_new.add(vect_i), dot_new.add(dot_i), line_new.add(line_i), text_new.add(text_i)
# vect_group[i].become(vect_i)
# self.wait(dt)
self.play(ReplacementTransform(vect_group[i], vect_i), run_time=0.32-0.08*np.sqrt(i))
self.play(ReplacementTransform(angle_group[i], angle_i), run_time=0.32-0.08*np.sqrt(i))
self.play(ReplacementTransform(dot_group[i], dot_i), run_time=0.32-0.08*np.sqrt(i))
self.play(ReplacementTransform(text_group[i], text_i), run_time=0.32-0.08*np.sqrt(i))
self.play(ReplacementTransform(line_group[i], line_i), run_time=0.32-0.08*np.sqrt(i))
angle_11 = Angle(cp.n2p(1), cp.n2p(0), cp.n2p(np.exp(-1j * TAU/11)), radius=0.6, color=BLUE)
line_11 = Line(cp.n2p(np.exp(-1j * TAU/11)), cp.n2p(1), color=PINK)
self.play(ShowCreation(angle_11))
self.play(ShowCreation(line_11))
self.wait(5)
| 47.37799
| 161
| 0.572309
|
1feb0ff9d9988823f3205f7e840e590a3898c651
| 22,484
|
py
|
Python
|
addons/io_scene_gltf_ksons/material/__init__.py
|
Andgonag/gltf-blender-importer
|
a4c719dc155ff0726bc160057e411fa9f956db9f
|
[
"MIT"
] | 199
|
2017-03-14T18:41:55.000Z
|
2022-02-23T09:43:42.000Z
|
addons/io_scene_gltf_ksons/material/__init__.py
|
Andgonag/gltf-blender-importer
|
a4c719dc155ff0726bc160057e411fa9f956db9f
|
[
"MIT"
] | 38
|
2017-08-01T15:54:55.000Z
|
2019-08-15T07:59:55.000Z
|
addons/io_scene_gltf_ksons/material/__init__.py
|
Andgonag/gltf-blender-importer
|
a4c719dc155ff0726bc160057e411fa9f956db9f
|
[
"MIT"
] | 33
|
2017-06-27T20:29:16.000Z
|
2022-01-19T13:51:07.000Z
|
import json
import bpy
from .block import Block
from .texture import create_texture_block
from . import image, node_groups, precompute
# Re-exports
create_image = image.create_image
create_group = node_groups.create_group
material_precomputation = precompute.material_procomputation
def create_material(op, idx):
"""
Create a Blender material for the glTF materials[idx]. If idx is the
special value 'default_material', create a Blender material for the default
glTF material instead.
"""
mc = MaterialCreator()
mc.op = op
mc.idx = idx
mc.liveness = op.material_infos[idx].liveness
if idx == 'default_material':
mc.material = {}
material_name = 'glTF Default Material'
else:
mc.material = op.gltf['materials'][idx]
material_name = mc.material.get('name', 'materials[%d]' % idx)
if 'KHR_materials_unlit' in mc.material.get('extensions', {}):
mc.pbr = mc.material.get('pbrMetallicRoughness', {})
mc.type = 'unlit'
elif 'KHR_materials_pbrSpecularGlossiness' in mc.material.get('extensions', {}):
mc.pbr = mc.material['extensions']['KHR_materials_pbrSpecularGlossiness']
mc.type = 'specGloss'
else:
mc.pbr = mc.material.get('pbrMetallicRoughness', {})
mc.type = 'metalRough'
# Create a new Blender node-tree material and empty it
bl_material = bpy.data.materials.new(material_name)
bl_material.use_nodes = True
mc.tree = bl_material.node_tree
mc.links = mc.tree.links
while mc.tree.nodes:
mc.tree.nodes.remove(mc.tree.nodes[0])
create_node_tree(mc)
# Set the viewport alpha mode
alpha_mode = mc.material.get('alphaMode', 'OPAQUE')
double_sided = mc.material.get('doubleSided', False) or mc.op.options['always_doublesided']
if not double_sided and alpha_mode == 'OPAQUE':
# Since we use alpha to simulate backface culling
alpha_mode = 'MASK'
if alpha_mode not in ['OPAQUE', 'MASK', 'BLEND']:
print('unknown alpha mode %s' % alpha_mode)
alpha_mode = 'OPAQUE'
if getattr(bl_material, 'blend_method', None):
bl_material.blend_method = {
# glTF: Blender
'OPAQUE': 'OPAQUE',
'MASK': 'CLIP',
'BLEND': 'BLEND',
}[alpha_mode]
else:
bl_material.game_settings.alpha_blend = {
# glTF: Blender
'OPAQUE': 'OPAQUE',
'MASK': 'CLIP',
'BLEND': 'ALPHA',
}[alpha_mode]
# Set diffuse/specular color (for solid view)
if 'baseColorFactor' in mc.pbr:
diffuse_color = mc.pbr['baseColorFactor'][:len(bl_material.diffuse_color)]
bl_material.diffuse_color = diffuse_color
if 'diffuseFactor' in mc.pbr:
diffuse_color = mc.pbr['diffuseFactor'][:len(bl_material.diffuse_color)]
bl_material.diffuse_color = diffuse_color
if 'specularFactor' in mc.pbr:
specular_color = mc.pbr['specularFactor'][:len(bl_material.specular_color)]
bl_material.specular_color = specular_color
return bl_material
def create_node_tree(mc):
emissive_block = None
if mc.type != 'unlit':
emissive_block = create_emissive(mc)
shaded_block = create_shaded(mc)
if emissive_block:
block = mc.adjoin({
'node': 'AddShader',
'input.0': emissive_block,
'input.1': shaded_block,
})
else:
block = shaded_block
alpha_block = create_alpha_block(mc)
if alpha_block:
# Push things into a better position
# [block] -> -> [mix]
# [alpha block]
alpha_block.pad_top(600)
combined_block = Block.row_align_center([block, alpha_block])
combined_block.outputs = \
[block.outputs[0], alpha_block.outputs[0], alpha_block.outputs[1]]
block = mc.adjoin({
'node': 'MixShader',
'output.0/input.2': combined_block,
'output.1/input.Fac': combined_block,
'output.2/input.1': combined_block,
})
mc.adjoin({
'node': 'OutputMaterial',
'input.Surface': block,
}).center_at_origin()
def create_emissive(mc):
if mc.type == 'unlit':
return None
block = None
if 'emissiveTexture' in mc.material:
block = create_texture_block(
mc,
'emissiveTexture',
mc.material['emissiveTexture']
)
block.img_node.label = 'EMISSIVE'
factor = mc.material.get('emissiveFactor', [0, 0, 0])
if factor != [1, 1, 1] or 'emissiveFactor' in mc.liveness:
if block:
block = mc.adjoin({
'node': 'MixRGB',
'prop.blend_type': 'MULTIPLY',
'input.Fac': Value(1),
'input.Color1': block,
'input.Color2': Value(factor + [1], record_to='emissiveFactor'),
})
else:
if factor == [0, 0, 0] and 'emissiveFactor' not in mc.liveness:
block = None
else:
block = Value(factor + [1], record_to='emissiveFactor')
if block:
block = mc.adjoin({
'node': 'Emission',
'input.Color': block,
})
return block
def create_alpha_block(mc):
alpha_mode = mc.material.get('alphaMode', 'OPAQUE')
double_sided = mc.material.get('doubleSided', False) or mc.op.options['always_doublesided']
if alpha_mode not in ['OPAQUE', 'MASK', 'BLEND']:
alpha_mode = 'OPAQUE'
# Create an empty block with the baseColor/diffuse texture's alpha
if alpha_mode != 'OPAQUE' and getattr(mc, 'img_node', None):
block = Block.empty(0, 0)
block.outputs = [mc.img_node.outputs[1]]
else:
block = None
# Alpha cutoff in MASK mode
if alpha_mode == 'MASK' and block:
alpha_cutoff = mc.material.get('alphaCutoff', 0.5)
block = mc.adjoin({
'node': 'Math',
'prop.operation': 'GREATER_THAN',
'input.0': block,
'input.1': Value(alpha_cutoff, record_to='alphaCutoff'),
})
# Handle doublesidedness
if not double_sided:
sided_block = mc.adjoin({
'node': 'NewGeometry',
})
sided_block = mc.adjoin({
'node': 'Math',
'prop.operation': 'SUBTRACT',
'input.0': Value(1),
'output.Backfacing/input.1': sided_block,
})
if block:
block = mc.adjoin({
'node': 'Math',
'prop.operation': 'MULTIPLY',
'input.1': block,
'input.0': sided_block,
})
else:
block = sided_block
if block:
transparent_block = mc.adjoin({
'node': 'BsdfTransparent',
})
alpha_block = Block.col_align_right([block, transparent_block])
alpha_block.outputs = [block.outputs[0], transparent_block.outputs[0]]
block = alpha_block
return block
def create_shaded(mc):
if mc.type == 'metalRough':
return create_metalRough_pbr(mc)
elif mc.type == 'specGloss':
return create_specGloss_pbr(mc)
elif mc.type == 'unlit':
return create_unlit(mc)
else:
assert(False)
def create_metalRough_pbr(mc):
params = {
'node': 'BsdfPrincipled',
'dim': (200, 540),
}
base_color_block = create_base_color(mc)
if base_color_block:
params['input.Base Color'] = base_color_block
metal_roughness_block = create_metal_roughness(mc)
if metal_roughness_block:
params['output.0/input.Metallic'] = metal_roughness_block
params['output.1/input.Roughness'] = metal_roughness_block
normal_block = create_normal_block(mc)
if normal_block:
params['input.Normal'] = normal_block
return mc.adjoin(params)
def create_specGloss_pbr(mc):
try:
bpy.context.scene.render.engine = 'BLENDER_EEVEE'
node = mc.tree.nodes.new('ShaderNodeEeveeSpecular')
mc.tree.nodes.remove(node)
has_specular_node = True
except Exception:
has_specular_node = False
if has_specular_node:
params = {
'node': 'EeveeSpecular',
'dim': (200, 540),
}
else:
params = {
'node': 'Group',
'group': 'pbrSpecularGlossiness',
'dim': (200, 540),
}
diffuse_block = create_diffuse(mc)
if diffuse_block:
params['input.Base Color'] = diffuse_block
spec_rough_block = create_spec_roughness(mc)
if spec_rough_block:
params['output.0/input.Specular'] = spec_rough_block
params['output.1/input.Roughness'] = spec_rough_block
normal_block = create_normal_block(mc)
if normal_block:
params['input.Normal'] = normal_block
if has_specular_node:
occlusion_block = create_occlusion_block(mc)
if occlusion_block:
params['output.0/input.Ambient Occlusion'] = occlusion_block
return mc.adjoin(params)
def create_unlit(mc):
params = {
# TODO: pick a better node?
'node': 'Emission',
}
base_color_block = create_base_color(mc)
if base_color_block:
params['input.Color'] = base_color_block
return mc.adjoin(params)
def create_base_color(mc):
block = None
if 'baseColorTexture' in mc.pbr:
block = create_texture_block(
mc,
'baseColorTexture',
mc.pbr['baseColorTexture'],
)
block.img_node.label = 'BASE COLOR'
# Remember for alpha value
mc.img_node = block.img_node
for color_set_num in range(0, mc.op.material_infos[mc.idx].num_color_sets):
vert_color_block = mc.adjoin({
'node': 'Attribute',
'prop.attribute_name': 'COLOR_%d' % color_set_num,
})
if block:
block = mc.adjoin({
'node': 'MixRGB',
'prop.blend_type': 'MULTIPLY',
'input.Fac': Value(1),
'input.Color1': block,
'input.Color2': vert_color_block,
})
else:
block = vert_color_block
factor = mc.pbr.get('baseColorFactor', [1, 1, 1, 1])
if factor != [1, 1, 1, 1] or 'baseColorFactor' in mc.liveness:
if block:
block = mc.adjoin({
'node': 'MixRGB',
'prop.blend_type': 'MULTIPLY',
'input.Fac': Value(1),
'input.Color1': block,
'input.Color2': Value(factor, record_to='baseColorFactor'),
})
else:
block = Value(factor, record_to='baseColorFactor')
return block
def create_diffuse(mc):
block = None
if 'diffuseTexture' in mc.pbr:
block = create_texture_block(
mc,
'diffuseTexture',
mc.pbr['diffuseTexture'],
)
block.img_node.label = 'DIFFUSE'
# Remember for alpha value
mc.img_node = block.img_node
for color_set_num in range(0, mc.op.material_infos[mc.idx].num_color_sets):
vert_color_block = mc.adjoin({
'node': 'Attribute',
'prop.attribute_name': 'COLOR_%d' % color_set_num,
})
if block:
block = mc.adjoin({
'node': 'MixRGB',
'prop.blend_type': 'MULTIPLY',
'input.Fac': Value(1),
'input.Color1': block,
'input.Color2': vert_color_block,
})
else:
block = vert_color_block
factor = mc.pbr.get('diffuseFactor', [1, 1, 1, 1])
if factor != [1, 1, 1, 1] or 'diffuseFactor' in mc.liveness:
if block:
block = mc.adjoin({
'node': 'MixRGB',
'prop.blend_type': 'MULTIPLY',
'input.Fac': Value(1),
'input.Color1': block,
'input.Color2': Value(factor, record_to='diffuseFactor'),
})
else:
block = Value(factor, record_to='diffuseFactor')
return block
def create_metal_roughness(mc):
block = None
if 'metallicRoughnessTexture' in mc.pbr:
tex_block = create_texture_block(
mc,
'metallicRoughnessTexture',
mc.pbr['metallicRoughnessTexture'],
)
tex_block.img_node.label = 'METALLIC ROUGHNESS'
tex_block.img_node.color_space = 'NONE'
block = mc.adjoin({
'node': 'SeparateRGB',
'input.Image': tex_block,
})
block.outputs = [block.outputs['B'], block.outputs['G']]
metal_factor = mc.pbr.get('metallicFactor', 1)
rough_factor = mc.pbr.get('roughnessFactor', 1)
if not block:
return [
Value(metal_factor, record_to='metallicFactor'),
Value(rough_factor, record_to='roughFactor'),
]
if metal_factor != 1 or 'metallicFactor' in mc.liveness:
metal_factor_options = {
'node': 'Math',
'prop.operation': 'MULTIPLY',
'output.0/input.0': block,
'input.1': Value(metal_factor, record_to='metallicFactor'),
}
else:
metal_factor_options = {}
if rough_factor != 1 or 'roughnessFactor' in mc.liveness:
rough_factor_options = {
'node': 'Math',
'prop.operation': 'MULTIPLY',
'output.1/input.0': block,
'input.1': Value(rough_factor, record_to='roughnessFactor'),
}
else:
rough_factor_options = {}
return mc.adjoin_split(metal_factor_options, rough_factor_options, block)
def create_spec_roughness(mc):
block = None
if 'specularGlossinessTexture' in mc.pbr:
block = create_texture_block(
mc,
'specularGlossinessTexture',
mc.pbr['specularGlossinessTexture'],
)
block.img_node.label = 'SPECULAR GLOSSINESS'
spec_factor = mc.pbr.get('specularFactor', [1, 1, 1]) + [1]
gloss_factor = mc.pbr.get('glossinessFactor', 1)
if not block:
return [
Value(spec_factor, record_to='specularFactor'),
Value(gloss_factor, record_to='glossinessFactor'),
]
if spec_factor != [1, 1, 1, 1] or 'specularFactor' in mc.liveness:
spec_factor_options = {
'node': 'MixRGB',
'prop.operation': 'MULTIPLY',
'input.Fac': Value(1),
'output.Color/input.Color1': block,
'input.Color2': Value(spec_factor, record_to='specularFactor'),
}
else:
spec_factor_options = {}
if gloss_factor != 1 or 'glossinessFactor' in mc.liveness:
gloss_factor_options = {
'node': 'Math',
'prop.operation': 'MULTIPLY',
'output.Alpha/input.0': block,
'input.1': Value(gloss_factor, record_to='glossinessFactor'),
}
else:
gloss_factor_options = {}
block = mc.adjoin_split(spec_factor_options, gloss_factor_options, block)
# Convert glossiness to roughness
return mc.adjoin_split(None, {
'node': 'Math',
'prop.operation': 'SUBTRACT',
'input.0': Value(1.0),
'output.1/input.1': block,
}, block)
def create_normal_block(mc):
if 'normalTexture' in mc.material:
tex_block = create_texture_block(
mc,
'normalTexture',
mc.material['normalTexture'],
)
tex_block.img_node.label = 'NORMAL'
tex_block.img_node.color_space = 'NONE'
return mc.adjoin({
'node': 'NormalMap',
'prop.uv_map': 'TEXCOORD_%d' % mc.material['normalTexture'].get('texCoord', 0),
'input.Strength': Value(mc.material['normalTexture'].get('scale', 1), record_to='normalTexture/scale'),
'input.Color': tex_block,
})
else:
return None
def create_occlusion_block(mc):
if 'occlusionTexture' in mc.material:
block = create_texture_block(
mc,
'occlusionTexture',
mc.material['occlusionTexture'],
)
block.img_node.label = 'OCCLUSION'
block.img_node.color_space = 'NONE'
block = block = mc.adjoin({
'node': 'SeparateRGB',
'input.Image': block,
})
strength = mc.material['occlusionTexture'].get('strength', 1)
if strength != 1 or 'occlusionTexture/strength' in mc.liveness:
block = block = mc.adjoin({
'node': 'Math',
'prop.operation': 'MULTIPLY',
'input.0': block,
'input.1': Value(strength, record_to='occlusionTexture/strength'),
})
return block
else:
return None
class MaterialCreator:
"""
Work-horse for creating nodes and automatically laying out blocks.
"""
def new_node(self, opts):
new_node = self.tree.nodes.new('ShaderNode' + opts['node'])
new_node.width = 140
new_node.height = 100
if 'group' in opts:
new_node.node_tree = self.op.get('node_group', opts['group'])
def str_or_int(x):
try:
return int(x)
except ValueError:
return x
input_blocks = []
for key, val in opts.items():
if key.startswith('input.'):
input_key = str_or_int(key[len('input.'):])
input_block = self.connect(val, 0, new_node, 'inputs', input_key)
if input_block and input_block not in input_blocks:
input_blocks.append(input_block)
elif key.startswith('output.'):
if '/' in key:
output_part, input_part = key.split('/')
output_key = str_or_int(output_part[len('output.'):])
input_key = str_or_int(input_part[len('input.'):])
input_block = self.connect(val, output_key, new_node, 'inputs', input_key)
if input_block and input_block not in input_blocks:
input_blocks.append(input_block)
else:
output_key = str_or_int(key[len('output.'):])
input_block = self.connect(val, 0, new_node, 'outputs', output_key)
if input_block and input_block not in input_blocks:
input_blocks.append(input_block)
elif key.startswith('prop.'):
prop_name = key[len('prop.'):]
setattr(new_node, prop_name, val)
elif key == 'dim':
new_node.width, new_node.height = val
return new_node, input_blocks
def adjoin(self, opts):
"""
Adjoins a new node. All the blocks that are used as inputs to it are
laid out in a column to its left.
[input1] -> [new_node]
[input2] ->
... ->
"""
new_node, input_blocks = self.new_node(opts)
input_block = Block.col_align_right(input_blocks)
block = Block.row_align_center([input_block, new_node])
block.outputs = new_node.outputs
return block
def adjoin_split(self, opts1, opts2, left_block):
"""
Adjoins at-most-two new nodes (either or both can be missing). They are
laid out in a column with left_block to their left. Return a block with
two outputs; the first is the output of the first block, or the first
output of left_block if missing; the second is the first output of the
second block, or the second of left_block if missing.
[left_block] -> [block1] ->
-> [block2] ->
"""
if not opts1 and not opts2:
return left_block
outputs = []
if opts1:
block1, __input_blocks = self.new_node(opts1)
outputs.append(block1.outputs[0])
else:
block1 = Block.empty()
outputs.append(left_block.outputs[0])
if opts2:
block2, __input_blocks = self.new_node(opts2)
outputs.append(block2.outputs[0])
else:
block2 = Block.empty()
outputs.append(left_block.outputs[1])
split_block = Block.col_align_right([block1, block2])
block = Block.row_align_center([left_block, split_block])
block.outputs = outputs
return block
def connect(self, connector, connector_key, node, socket_type, socket_key):
"""
Connect a connector, which may be either a socket or a Value (or
nothing) to a socket in the shader node tree.
"""
if connector is None:
return None
if type(connector) == Value:
connector = [connector]
if type(connector) == list:
self.connect_value(connector[connector_key], node, socket_type, socket_key)
return None
else:
assert(socket_type == 'inputs')
self.connect_block(connector, connector_key, node.inputs[socket_key])
return connector
def connect_value(self, value, node, socket_type, socket_key):
getattr(node, socket_type)[socket_key].default_value = value.value
# Record the data path to this socket in our material info so the
# animation creator can find it to animate
if value.record_to:
self.op.material_infos[self.idx].paths[value.record_to] = (
'nodes[' + json.dumps(node.name) + ']' +
'.' + socket_type + '[' + json.dumps(socket_key) + ']' +
'.default_value'
)
def connect_block(self, block, output_key, socket):
self.links.new(block.outputs[output_key], socket)
class Value:
"""
This is a helper class that tells the material creator to set the value of a
socket rather than connect it to another socket. The record_to property, if
present, is a key that the path to the socket should be remembered under.
Remembering the path to where a Value got written into the node tree is used
for animation importing (which needs to know where eg. the baseColorFactor
wound up; it could be in a Multiply node or directly in the color socket of
the Principled node, etc).
"""
def __init__(self, value, record_to=''):
self.value = value
self.record_to = record_to
| 32.304598
| 115
| 0.578456
|
38df2b0cd3f04801de3f7e7a492199b1d64f7390
| 1,341
|
py
|
Python
|
src/pygenec/cruzamento/umponto.py
|
duducosmos/pygenec
|
6d3ec464231ae7e1f9f39e4fa3ff3a3a71e45f06
|
[
"Apache-2.0"
] | 1
|
2019-08-05T17:23:15.000Z
|
2019-08-05T17:23:15.000Z
|
src/pygenec/cruzamento/umponto.py
|
duducosmos/pygenec
|
6d3ec464231ae7e1f9f39e4fa3ff3a3a71e45f06
|
[
"Apache-2.0"
] | null | null | null |
src/pygenec/cruzamento/umponto.py
|
duducosmos/pygenec
|
6d3ec464231ae7e1f9f39e4fa3ff3a3a71e45f06
|
[
"Apache-2.0"
] | 1
|
2021-01-10T19:45:31.000Z
|
2021-01-10T19:45:31.000Z
|
#!/usr/bin/env python3.6
# -*- Coding: UTF-8 -*-
"""
Cruzamento por por um ponto.
Programa sob licença GNU V.3.
Desenvolvido por: E. S. Pereira.
Versão 0.0.1.
"""
from numpy.random import randint
from numpy import array
from .cruzamento import Cruzamento, NoCompatibleIndividualSize
class UmPonto(Cruzamento):
"""
Gerador de população via cruzamento usando o operador de um ponto.
Entrada:
tamanho_populacao - Tamanho final da população resultante.
"""
def __init__(self, tamanho_populacao):
super(UmPonto, self).__init__(tamanho_populacao)
def cruzamento(selfself, progenitor1, progenitor2):
"""
Cruzamento de dois indivíduos via um pontos.
Entrada:
ind1 - Primeiro indivíduo
ind2 - Segundo indivíduo
O tamanho de ambos os indivíduos deve ser igual, do contrário um erro
será levantado.
"""
n1 = len(progenitor1)
n2 = len(progenitor2)
if n1 != n2:
msg = "Tamanho ind1 {0} diferente de ind2 {1}".format(n1, n2)
raise NoCompatibleIndividualSize(msg)
ponto = randint(1, n1 - 1)
desc1 = progenitor1.copy()
desc2 = progenitor2.copy()
desc1[ponto:] = progenitor2[ponto:]
desc2[ponto:] = progenitor1[ponto:]
return desc1, desc2
| 26.294118
| 77
| 0.634601
|
2c0eaa83a4dcc60d1705291e3dffac497e766e1b
| 357
|
py
|
Python
|
traceroute/src/simple_send_receive.py
|
Ak-Shaw/Networking
|
03eba022f368155bd40b8e53455282e1469469d1
|
[
"MIT"
] | 3
|
2020-10-17T07:06:06.000Z
|
2020-11-09T03:49:27.000Z
|
traceroute/src/simple_send_receive.py
|
Ak-Shaw/Networking
|
03eba022f368155bd40b8e53455282e1469469d1
|
[
"MIT"
] | 10
|
2020-10-19T06:50:54.000Z
|
2020-11-01T18:07:16.000Z
|
traceroute/src/simple_send_receive.py
|
Ak-Shaw/Networking
|
03eba022f368155bd40b8e53455282e1469469d1
|
[
"MIT"
] | 5
|
2020-10-18T01:20:23.000Z
|
2020-10-25T16:08:33.000Z
|
from scapy.all import *
import sys
host=sys.argv[1]
ttl = 200
print("Sending packet to "+host);
ipLayer = IP()
ipLayer.dst = host #set destination host
ipLayer.ttl = ttl #set TTL for packet
ICMPpkt = ICMP()
pkt = ipLayer/ICMPpkt
reply = sr1(pkt, verbose = 0) #send packet and wait for reply
print("Reply: Type:",reply[ICMP].type, "Source:"+reply[IP].src)
| 23.8
| 63
| 0.708683
|
8713003490e3752136167d18d0a37bf4e25eb98d
| 664
|
py
|
Python
|
django101/django102/migrations/0004_person.py
|
Minkov/python-web-2020-09
|
a43baf4dd4dd811caf25aad971a0f1a4d3d486a4
|
[
"MIT"
] | 4
|
2020-10-30T23:13:50.000Z
|
2020-12-26T21:35:00.000Z
|
django101/django102/migrations/0004_person.py
|
Minkov/python-web-2020-09
|
a43baf4dd4dd811caf25aad971a0f1a4d3d486a4
|
[
"MIT"
] | null | null | null |
django101/django102/migrations/0004_person.py
|
Minkov/python-web-2020-09
|
a43baf4dd4dd811caf25aad971a0f1a4d3d486a4
|
[
"MIT"
] | 7
|
2020-09-17T13:08:35.000Z
|
2020-10-31T15:01:46.000Z
|
# Generated by Django 3.1.2 on 2020-10-06 15:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django102', '0003_auto_20201006_1815'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=20)),
('last_name', models.CharField(max_length=20)),
('age', models.IntegerField(default=0)),
],
),
]
| 28.869565
| 115
| 0.557229
|
5ef69d018fe0ca2a339d52e7107cbe3e2f2e2ce6
| 740
|
py
|
Python
|
tests/test_5_auth.py
|
Pytlicek/FlaskRSS
|
d7cb4192f5b62630de3312837826c3a97b4a3c2f
|
[
"MIT"
] | 2
|
2021-11-12T23:15:47.000Z
|
2021-12-07T22:18:38.000Z
|
tests/test_5_auth.py
|
Pytlicek/FlaskRSS
|
d7cb4192f5b62630de3312837826c3a97b4a3c2f
|
[
"MIT"
] | 1
|
2020-11-26T15:58:10.000Z
|
2020-11-26T15:58:44.000Z
|
tests/test_5_auth.py
|
Pytlicek/FlaskRSS
|
d7cb4192f5b62630de3312837826c3a97b4a3c2f
|
[
"MIT"
] | null | null | null |
from flask import url_for
from app import app
def test_index_path(client):
response = client.get("/logout", follow_redirects=True)
assert response.status_code == 200
assert response.status_code != 302
assert "Login" in str(response.data)
assert "Articles:" not in str(response.data)
def test_feed_path_unauthorized(client):
response = client.get("/feeds")
assert response.status_code != 200
assert response.status_code == 302
assert "Redirecting..." in str(response.data)
def test_search_get_unauthorized(client):
response = client.get("/search", follow_redirects=True)
assert response.status_code == 200
assert response.status_code != 302
assert "predam" not in str(response.data)
| 29.6
| 59
| 0.728378
|
ffb67c5d5780c20a48623adde8f0e537aedd2483
| 154
|
py
|
Python
|
personal/views.py
|
florimondmanca/personal-api
|
6300f965d3f51d1bf5f10cf1eb15d673bd627631
|
[
"MIT"
] | 4
|
2018-08-17T08:06:06.000Z
|
2020-02-20T15:15:56.000Z
|
personal/views.py
|
florimondmanca/personal-api
|
6300f965d3f51d1bf5f10cf1eb15d673bd627631
|
[
"MIT"
] | 2
|
2018-10-08T15:59:58.000Z
|
2018-10-20T16:50:13.000Z
|
personal/views.py
|
florimondmanca/personal-api
|
6300f965d3f51d1bf5f10cf1eb15d673bd627631
|
[
"MIT"
] | 1
|
2019-09-14T23:15:10.000Z
|
2019-09-14T23:15:10.000Z
|
"""Project views."""
from django.views.generic import RedirectView
class IndexView(RedirectView):
"""Home page."""
pattern_name = "post-list"
| 15.4
| 45
| 0.688312
|
4bfd5b383e82e1a272cba3dcabf477fc62eea066
| 50,490
|
py
|
Python
|
tarpn/ax25/statemachine.py
|
rxt1077/tarpn-node-controller
|
ffbe1d78fbd1c10e891b3339b50002e5233e21ad
|
[
"MIT"
] | null | null | null |
tarpn/ax25/statemachine.py
|
rxt1077/tarpn-node-controller
|
ffbe1d78fbd1c10e891b3339b50002e5233e21ad
|
[
"MIT"
] | null | null | null |
tarpn/ax25/statemachine.py
|
rxt1077/tarpn-node-controller
|
ffbe1d78fbd1c10e891b3339b50002e5233e21ad
|
[
"MIT"
] | null | null | null |
"""
AX.25 State Machine Code
"""
import asyncio
from asyncio import Future
import queue
from dataclasses import dataclass, field
from enum import Enum, auto
from functools import partial
import logging
from typing import Callable, cast, Dict, Optional, List, Tuple
from tarpn.ax25 import AX25Call, AX25Packet, IFrame, SFrame, SupervisoryType, UFrame, UnnumberedType, UIFrame, \
L3Protocol, InternalInfo, SupervisoryCommand, DummyPacket, AX25, AX25StateType
from tarpn.log import LoggingMixin
from tarpn.util import AsyncioTimer, between, Timer
class AX25EventType(Enum):
AX25_UA = auto()
AX25_DM = auto()
AX25_UI = auto()
AX25_DISC = auto()
AX25_SABM = auto()
AX25_SABME = auto()
AX25_UNKNOWN = auto()
AX25_INFO = auto()
AX25_FRMR = auto()
AX25_RR = auto()
AX25_RNR = auto()
AX25_SREJ = auto()
AX25_REJ = auto()
T1_EXPIRE = auto()
T3_EXPIRE = auto()
DL_CONNECT = auto()
DL_DISCONNECT = auto()
DL_DATA = auto()
DL_UNIT_DATA = auto()
# DL_FLOW_OFF,
# DL_FLOW_ON,
IFRAME_READY = auto()
def __repr__(self):
return self.name
def __str__(self):
return self.name
@dataclass
class AX25StateEvent:
remote_call: AX25Call
packet: Optional[AX25Packet]
event_type: AX25EventType
future: Future = None
def __repr__(self):
if self.packet is not None:
return f"{self.event_type} {self.packet}"
else:
return f"{self.event_type}"
@classmethod
def t1_expire(cls, remote_call: AX25Call):
return cls(remote_call, None, AX25EventType.T1_EXPIRE)
@classmethod
def t3_expire(cls, remote_call: AX25Call):
return cls(remote_call, None, AX25EventType.T3_EXPIRE)
@classmethod
def iframe_ready(cls, remote_call: AX25Call):
return cls(remote_call, None, AX25EventType.IFRAME_READY)
@classmethod
def from_packet(cls, packet: AX25Packet):
if isinstance(packet, IFrame):
return cls(packet.source, packet, AX25EventType.AX25_INFO)
elif isinstance(packet, SFrame):
event_type = {
SupervisoryType.RR: AX25EventType.AX25_RR,
SupervisoryType.RNR: AX25EventType.AX25_RNR,
SupervisoryType.REJ: AX25EventType.AX25_REJ
}.get(packet.control_type, AX25EventType.AX25_UNKNOWN)
return cls(packet.source, packet, event_type)
elif isinstance(packet, UFrame):
event_type = {
UnnumberedType.DISC: AX25EventType.AX25_DISC,
UnnumberedType.DM: AX25EventType.AX25_DM,
UnnumberedType.FRMR: AX25EventType.AX25_FRMR,
UnnumberedType.SABM: AX25EventType.AX25_SABM,
UnnumberedType.UA: AX25EventType.AX25_UA,
UnnumberedType.UI: AX25EventType.AX25_UI,
}.get(packet.u_type, AX25EventType.AX25_UNKNOWN)
return cls(packet.source, packet, event_type)
elif isinstance(packet, UIFrame):
return cls(packet.source, packet, AX25EventType.AX25_UI)
else:
return cls(packet.source, packet, AX25EventType.AX25_UNKNOWN)
@classmethod
def dl_unit_data(cls, dest: AX25Call, protocol: L3Protocol, info: bytes):
return cls(dest, InternalInfo.internal_info(protocol, info), AX25EventType.DL_UNIT_DATA)
@classmethod
def dl_data(cls, dest: AX25Call, protocol: L3Protocol, info: bytes):
return cls(dest, InternalInfo.internal_info(protocol, info), AX25EventType.DL_DATA)
@classmethod
def dl_connect(cls, dest: AX25Call, source: AX25Call):
dummy = DummyPacket.dummy(dest, source)
return cls(dest, dummy, AX25EventType.DL_CONNECT)
@classmethod
def dl_disconnect(cls, dest: AX25Call, source: AX25Call):
dummy = DummyPacket.dummy(dest, source)
return cls(dest, dummy, AX25EventType.DL_DISCONNECT)
@dataclass
class AX25State:
"""Represents the internal state of an AX.25 connection. This is used in conjunction with
the state machine to manage the connection state and interface with the DL (Data-Link) layer
"""
session_id: str
"""Unique key for this state, by default the remote callsign+ssid"""
remote_call: AX25Call
"""Remote station connecting to the local node"""
local_call: AX25Call
"""Local station's callsign"""
internal_event_cb: Callable[[AX25StateEvent], None] = field(repr=False)
"""Callback for internal state machine events such as timeouts"""
t1: Timer = field(default=None, repr=False)
"""T1 timer. This is a timeout for hearing Info acks in connected mode"""
t3: Timer = field(default=None, repr=False)
"""T3 timer. This is an idle timeout. Ensure's that a link is still alive"""
current_state: AX25StateType = AX25StateType.Disconnected
vs: int = 0
"""
V(S) Send State Variable
The send state variable exists within the TNC and is never sent. It contains the next sequential number to be
assigned to the next transmitted I frame. This variable is updated with the transmission of each I frame.
N(S) Send Sequence Number
The send sequence number is found in the control field of all I frames. It contains the sequence number of the
I frame being sent. Just prior to the transmission of the I frame, N(S) is updated to equal the send state variable.
"""
vr: int = 0
"""
V(R) Receive State Variable
The receive state variable exists within the TNC. It contains the sequence number of the next expected received
I frame. This variable is updated upon the reception of an error-free I frame whose send sequence number equals the
present received state variable value.
N(R) Received Sequence Number
The received sequence number exists in both I and S frames. Prior to sending an I or S frame, this variable is
updated to equal that of the received state variable, thus implicitly acknowledging the proper reception of all
frames up to and including N(R)-1
"""
va: int = 0
"""
V(A) Acknowledge State Variable
The acknowledge state variable exists within the TNC and is never sent. It contains the sequence number of the last
frame acknowledged by its peer [V(A)-1 equals the N(S) of the last acknowledged I frame].
"""
retry_count: int = 0
"""Seen as RC in the specification"""
ack_pending: bool = False
smoothed_roundtrip_time_ms: int = 1000
"""Seen as SRT in the specification"""
reject_exception: bool = False
layer_3: bool = False
# TODO other fields
pending_frames: queue.Queue = field(default_factory=queue.Queue, repr=False) # (InternalInfo, Future)
sent_frames: Dict[int, IFrame] = field(default_factory=dict, repr=False)
futures: Dict[int, Future] = field(default_factory=dict, repr=False)
def log_prefix(self):
return f"AX25 [Id={self.session_id} Local={self.local_call} Remote={self.remote_call} State={self.current_state}]"
@classmethod
def create(cls,
remote_call: AX25Call,
local_call: AX25Call,
internal_event_cb: Callable[[AX25StateEvent], None],
timer_factory: Callable[[float, Callable[[], None]], Timer]):
def async_callback(event: AX25StateEvent) -> None:
cb = timer_factory(0, partial(internal_event_cb, event))
cb.start()
new_state = cls(str(remote_call), remote_call, local_call, async_callback)
new_state.t1 = timer_factory(1_000, new_state.t1_timeout) # TODO configure these
new_state.t3 = timer_factory(180_000, new_state.t3_timeout)
return new_state
def t1_timeout(self):
self.internal_event_cb(AX25StateEvent.t1_expire(self.remote_call))
def t3_timeout(self):
self.internal_event_cb(AX25StateEvent.t3_expire(self.remote_call))
def reset(self):
self.vs = 0
self.vr = 0
self.va = 0
self.retry_count = 0
self.smoothed_roundtrip_time_ms = 1_000
self.t1.delay = 1_000
self.t1.cancel()
self.t3.cancel()
def clear_exception_conditions(self):
# // Clear peer busy
# // Clear reject exception
# // Clear own busy
self.ack_pending = False
def clear_pending_iframes(self):
for i in range(self.pending_frames.qsize()):
(iframe, future) = self.pending_frames.get()
future.cancel("Cleared")
self.pending_frames.task_done()
def push_iframe(self, i_frame: InternalInfo, future: Future):
self.pending_frames.put((i_frame, future))
self.internal_event_cb(AX25StateEvent.iframe_ready(self.remote_call))
def get_send_state(self):
return self.vs % 8
def set_send_state(self, vs: int):
#self.print_window()
self.vs = vs
def inc_send_state(self):
#self.print_window()
self.vs += 1
def get_recv_state(self):
return self.vr % 8
def set_recv_state(self, vr):
#self.print_window()
self.vr = vr
def inc_recv_state(self):
#self.print_window()
self.vr += 1
def get_ack_state(self):
return self.va & 0xFF
def set_ack_state(self, va):
#self.print_window()
self.va = va & 0xFF
def print_window(self):
print(f"{self.local_call} V(A)={self.get_ack_state()} V(R)={self.get_recv_state()} V(S)={self.get_send_state()}"
f" window_exceeded={self.window_exceeded()}")
def window_exceeded(self):
"""If V(S) is equal to V(A) + window size (7) means we can't transmit any more until we get an ACK"""
return (self.vs % 8) == ((self.va + 7) % 8)
def check_send_eq_ack(self):
return self.vs % 8 == self.va
def enqueue_info_ack(self, ax25: AX25, final=True):
rr = SFrame.s_frame(self.remote_call, self.local_call, [], SupervisoryCommand.Response,
SupervisoryType.RR,
self.get_recv_state(), final)
ax25.write_packet(rr)
self.ack_pending = False
def check_ui(ui_frame: UIFrame, ax25: AX25):
if ui_frame.get_command() == SupervisoryCommand.Command:
# TODO check length, error K
ax25.dl_data_indication(ui_frame.source, ui_frame.dest, ui_frame.protocol, ui_frame.info)
else:
ax25.dl_error(ui_frame.source, ui_frame.dest, "Q")
def establish_data_link(state: AX25State, ax25: AX25):
state.clear_exception_conditions()
state.retry_count = 0
sabm = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
UnnumberedType.SABM, True)
ax25.write_packet(sabm)
state.t3.cancel()
state.t1.start()
def transmit_enquiry(state: AX25State, ax25: AX25):
rr = SFrame.s_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
SupervisoryType.RR, state.get_recv_state(), True)
ax25.write_packet(rr)
state.ack_pending = False
state.t1.start()
def enquiry_response(state: AX25State, ax25: AX25, final=True):
rr = SFrame.s_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response, SupervisoryType.RR,
state.get_recv_state(), final)
ax25.write_packet(rr)
state.ack_pending = False
def select_t1_value(state: AX25State):
if state.retry_count == 0:
srt = 7. / 8. * state.smoothed_roundtrip_time_ms + (1. / 8. * state.t1.delay) - (1. / 8. * state.t1.remaining())
state.smoothed_roundtrip_time_ms = srt
state.t1.delay = srt * 2
else:
t1 = pow(2, (state.retry_count + 1.0)) * state.smoothed_roundtrip_time_ms
state.t1.delay = t1
def check_iframe_ack(state: AX25State, nr: int):
if nr == state.get_send_state():
state.set_ack_state(nr & 0xFF)
vs = (nr + 7) % 8
fut = state.futures.get(vs)
if fut and not fut.done():
fut.set_result(None)
state.t1.cancel()
state.t3.start()
select_t1_value(state)
elif nr != state.get_ack_state():
state.set_ack_state(nr & 0xFF)
vs = (nr + 7) % 8
fut = state.futures.get(vs)
if fut and not fut.done():
fut.set_result(None)
state.t1.start()
async def delay_outgoing_data(state: AX25State, pending: InternalInfo, future: Future):
await asyncio.sleep(0.200)
state.push_iframe(pending, future)
def check_need_for_response(state: AX25State, ax25: AX25, s_frame: SFrame):
if s_frame.get_command() and s_frame.poll_final:
enquiry_response(state, ax25)
elif s_frame.get_command() == SupervisoryCommand.Response and s_frame.poll_final:
ax25.dl_error(state.remote_call, state.local_call, "A")
def check_nr(state: AX25State, nr: int):
if state.get_send_state() < state.get_ack_state():
# Window wrap-around case
return between(nr, state.get_ack_state(), 7) or \
between(nr, 0, state.get_send_state())
else:
return between(nr, state.get_ack_state(), state.get_send_state())
def nr_error_recovery(state: AX25State, ax25: AX25):
ax25.dl_error(state.remote_call, state.local_call, "J")
establish_data_link(state, ax25)
state.layer_3 = False
def invoke_retransmission(state: AX25State, ax25: AX25):
x = state.get_send_state()
vs = state.get_recv_state()
while vs != x:
old_frame = state.sent_frames[vs]
old_future = state.futures[vs]
state.push_iframe(InternalInfo.internal_info(old_frame.protocol, old_frame.info), old_future)
vs += 1
def disconnected_handler(
state: AX25State,
event: AX25StateEvent,
ax25: AX25,
logger: LoggingMixin) -> AX25StateType:
"""Handle packets when we are in a disconnected state
"""
assert state.current_state == AX25StateType.Disconnected
if event.event_type == AX25EventType.AX25_UA:
ax25.dl_error(event.packet.source, event.packet.dest, "C")
ax25.dl_error(event.packet.source, event.packet.dest, "D")
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.AX25_DM:
# do nothing
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.AX25_UI:
ui_frame = cast(UIFrame, event.packet)
check_ui(ui_frame, ax25)
if ui_frame.poll_final:
dm_response = UFrame.u_frame(ui_frame.source, ui_frame.dest, [], SupervisoryCommand.Response,
UnnumberedType.DM, ui_frame.poll_final)
ax25.write_packet(dm_response)
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.DL_DISCONNECT:
ax25.dl_disconnect_indication(event.packet.source, event.packet.dest)
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.AX25_DISC:
u_frame = cast(UFrame, event.packet)
dm_response = UFrame.u_frame(u_frame.source, u_frame.dest, [], SupervisoryCommand.Response,
UnnumberedType.DM, u_frame.poll_final)
ax25.write_packet(dm_response)
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.DL_UNIT_DATA:
internal_info = cast(InternalInfo, event.packet)
ui = UIFrame.ui_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command, False,
internal_info.protocol, internal_info.info)
ax25.write_packet(ui)
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.DL_DATA:
event.future.set_exception(RuntimeError("Not connected"))
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.DL_CONNECT:
state.reset()
establish_data_link(state, ax25)
state.layer_3 = True
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.AX25_SABM:
sabm_frame = cast(UIFrame, event.packet)
ua_resp = UFrame.u_frame(sabm_frame.source, sabm_frame.dest, [],
SupervisoryCommand.Response, UnnumberedType.UA, True)
ax25.write_packet(ua_resp)
state.reset()
ax25.dl_connect_indication(sabm_frame.source, sabm_frame.dest)
state.t3.start()
return AX25StateType.Connected
elif event.event_type in (AX25EventType.AX25_RR, AX25EventType.AX25_RNR, AX25EventType.AX25_REJ,
AX25EventType.AX25_FRMR, AX25EventType.AX25_SREJ):
s_frame = cast(SFrame, event.packet)
# Send DM
dm_response = UFrame.u_frame(s_frame.source, s_frame.dest, [], SupervisoryCommand.Response,
UnnumberedType.DM, s_frame.poll_final)
ax25.write_packet(dm_response)
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.AX25_INFO:
i_frame = cast(IFrame, event.packet)
dm_response = UFrame.u_frame(i_frame.source, i_frame.dest, [], SupervisoryCommand.Response,
UnnumberedType.DM, i_frame.poll)
ax25.write_packet(dm_response)
return AX25StateType.Disconnected
else:
logger.debug(f"Ignoring {event}")
return AX25StateType.Disconnected
def awaiting_connection_handler(
state: AX25State,
event: AX25StateEvent,
ax25: AX25,
logger: LoggingMixin) -> AX25StateType:
"""Handle packets when we are in a awaiting connection state
"""
assert state.current_state == AX25StateType.AwaitingConnection
if event.event_type == AX25EventType.DL_CONNECT:
state.clear_pending_iframes()
state.layer_3 = True
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.AX25_SABM:
u_frame = cast(UFrame, event.packet)
ua = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response,
UnnumberedType.UA, u_frame.poll_final)
ax25.write_packet(ua)
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.AX25_DISC:
u_frame = cast(UFrame, event.packet)
dm = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response,
UnnumberedType.DM, u_frame.poll_final)
ax25.write_packet(dm)
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.DL_DATA:
if not state.layer_3:
pending = cast(InternalInfo, event.packet)
state.push_iframe(pending, event.future)
else:
event.future.set_exception(RuntimeError("Not connected"))
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.IFRAME_READY:
if not state.layer_3:
(pending, future) = state.pending_frames.get()
#asyncio.ensure_future(delay_outgoing_data(state, pending, future))
state.push_iframe(pending, future)
state.pending_frames.task_done()
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.AX25_UI:
ui_frame = cast(UIFrame, event.packet)
check_ui(ui_frame, ax25)
if ui_frame.poll_final:
dm = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response,
UnnumberedType.DM, True)
ax25.write_packet(dm)
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.DL_UNIT_DATA:
pending = cast(InternalInfo, event.packet)
ui = UIFrame.ui_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
False, pending.protocol, pending.info)
ax25.write_packet(ui)
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.AX25_DM:
u_frame = cast(UFrame, event.packet)
if u_frame.poll_final:
state.clear_pending_iframes()
ax25.dl_disconnect_indication(state.remote_call, state.local_call)
state.t1.cancel()
return AX25StateType.Disconnected
else:
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.AX25_UA:
u_frame = cast(UFrame, event.packet)
if u_frame.poll_final:
if state.layer_3:
ax25.dl_connect_indication(state.remote_call, state.local_call)
else:
if state.get_send_state() != state.get_ack_state():
state.clear_pending_iframes()
ax25.dl_connect_indication(state.remote_call, state.local_call)
state.reset()
select_t1_value(state)
return AX25StateType.Connected
else:
ax25.dl_error(state.remote_call, state.local_call, "D")
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.AX25_SABME:
dm = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response,
UnnumberedType.DM, True)
ax25.write_packet(dm)
ax25.dl_disconnect_indication(state.remote_call, state.local_call)
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.T1_EXPIRE:
if state.retry_count < 4: # TODO config this
state.retry_count += 1
sabm = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
UnnumberedType.SABM, True)
ax25.write_packet(sabm)
select_t1_value(state)
state.t1.start()
return AX25StateType.AwaitingConnection
else:
ax25.dl_error(state.remote_call, state.local_call, "G")
ax25.dl_disconnect_indication(state.remote_call, state.local_call)
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.AX25_FRMR:
state.smoothed_roundtrip_time_ms = 1000
state.t1.delay = state.smoothed_roundtrip_time_ms * 2
establish_data_link(state, ax25)
state.layer_3 = True
return AX25StateType.AwaitingConnection
else:
logger.debug(f"Ignoring {event}")
return AX25StateType.AwaitingConnection
def connected_handler(
state: AX25State,
event: AX25StateEvent,
ax25: AX25,
logger: LoggingMixin) -> AX25StateType:
assert state.current_state == AX25StateType.Connected
if event.event_type == AX25EventType.DL_CONNECT:
state.clear_pending_iframes()
establish_data_link(state, ax25)
# Set Layer 3
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.DL_DISCONNECT:
state.clear_pending_iframes()
state.retry_count = 0
u_frame = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
UnnumberedType.DISC, True)
ax25.write_packet(u_frame)
state.t3.cancel()
state.t1.start()
return AX25StateType.AwaitingRelease
elif event.event_type == AX25EventType.DL_DATA:
pending = cast(InternalInfo, event.packet)
state.push_iframe(pending, event.future)
return AX25StateType.Connected
elif event.event_type == AX25EventType.IFRAME_READY:
(pending, future) = cast(InternalInfo, state.pending_frames.get())
logger.debug(f"Pending iframe: {pending}")
if state.window_exceeded():
logger.debug(f"Window exceeded, delaying frame")
#asyncio.create_task(delay_outgoing_data(state, pending, future))
state.push_iframe(pending, future)
else:
i_frame = IFrame.i_frame(
dest=state.remote_call,
source=state.local_call,
repeaters=[],
command=SupervisoryCommand.Command,
poll=False, # Ensure we get RR from other end
receive_seq_number=state.get_recv_state(),
send_seq_number=state.get_send_state(),
protocol=pending.protocol,
info=pending.info)
ax25.write_packet(i_frame)
state.sent_frames[state.get_send_state()] = i_frame
state.futures[state.get_send_state()] = future
# Complete the future indicating the DL_DATA event was sent out
if future:
future.set_result(None)
state.inc_send_state()
state.ack_pending = False
if state.t1.running():
state.t3.cancel()
state.t1.start()
state.pending_frames.task_done()
return AX25StateType.Connected
elif event.event_type == AX25EventType.T1_EXPIRE:
state.retry_count = 1
transmit_enquiry(state, ax25)
return AX25StateType.TimerRecovery
elif event.event_type == AX25EventType.T3_EXPIRE:
state.retry_count = 0
transmit_enquiry(state, ax25)
return AX25StateType.TimerRecovery
elif event.event_type == AX25EventType.AX25_SABM:
u_frame = cast(UFrame, event.packet)
ua = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response,
UnnumberedType.UA, u_frame.poll_final)
ax25.write_packet(ua)
state.clear_exception_conditions()
ax25.dl_error(state.remote_call, state.local_call, "F")
if state.get_send_state() == state.get_ack_state():
state.clear_pending_iframes()
ax25.dl_connect_indication(state.remote_call, state.local_call)
state.reset()
return AX25StateType.Connected
elif event.event_type == AX25EventType.AX25_DISC:
state.clear_pending_iframes()
u_frame = cast(UFrame, event.packet)
ua = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response,
UnnumberedType.UA, u_frame.poll_final)
ax25.write_packet(ua)
ax25.dl_disconnect_indication(state.remote_call, state.local_call)
state.t1.cancel()
state.t3.cancel()
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.AX25_UA:
ax25.dl_error(state.remote_call, state.local_call, "C")
establish_data_link(state, ax25)
state.layer_3 = False
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.AX25_DM:
ax25.dl_error(state.remote_call, state.local_call, "E")
ax25.dl_disconnect_indication(state.remote_call, state.local_call)
state.clear_pending_iframes()
state.t1.cancel()
state.t3.cancel()
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.DL_UNIT_DATA:
(info, future) = state.pending_frames.get()
ui_frame = UIFrame.ui_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command, True,
info.protocol, info.info)
ax25.write_packet(ui_frame)
state.pending_frames.task_done()
if future:
future.set_result(None) # No ack's needed for unit data
return AX25StateType.Connected
elif event.event_type == AX25EventType.AX25_UI:
ui_frame = cast(UIFrame, event.packet)
ax25.dl_data_indication(state.remote_call, state.local_call, ui_frame.protocol, ui_frame.info)
if ui_frame.poll_final:
enquiry_response(state, ax25)
return AX25StateType.Connected
elif event.event_type in (AX25EventType.AX25_RR, AX25EventType.AX25_RNR):
# TODO set peer busy if RNR, else clear peer busy
s_frame = cast(SFrame, event.packet)
check_need_for_response(state, ax25, s_frame)
if check_nr(state, s_frame.receive_seq_number):
check_iframe_ack(state, s_frame.receive_seq_number)
return AX25StateType.Connected
else:
logger.warning(f"N(R) error recovery, V(A)={state.get_ack_state()} N(R)={s_frame.receive_seq_number} "
f"V(S)={state.get_send_state()}")
nr_error_recovery(state, ax25)
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.AX25_INFO:
i_frame = cast(IFrame, event.packet)
if i_frame.get_command() == SupervisoryCommand.Command:
if check_nr(state, i_frame.receive_seq_number):
check_iframe_ack(state, i_frame.receive_seq_number)
if i_frame.send_seq_number == state.get_recv_state():
state.inc_recv_state()
state.reject_exception = False
state.enqueue_info_ack(ax25, i_frame.poll)
# This should be before the info ack in theory
ax25.dl_data_indication(state.remote_call, state.local_call, i_frame.protocol, i_frame.info)
else:
if state.reject_exception:
if i_frame.poll:
rr = SFrame.s_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response,
SupervisoryType.RR, state.get_recv_state(), True)
ax25.write_packet(rr)
state.ack_pending = False
else:
state.reject_exception = True
rej = SFrame.s_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response,
SupervisoryType.REJ, state.get_recv_state(), i_frame.poll)
ax25.write_packet(rej)
return AX25StateType.Connected
else:
logger.warning(f"N(R) error recovery, V(A)={state.get_ack_state()} N(R)={i_frame.receive_seq_number} "
f"V(S)={state.get_send_state()}")
nr_error_recovery(state, ax25)
return AX25StateType.AwaitingConnection
else:
ax25.dl_error(state.remote_call, state.local_call, "S")
return AX25StateType.Connected
elif event.event_type == AX25EventType.AX25_FRMR:
ax25.dl_error(state.remote_call, state.local_call, "K")
establish_data_link(state, ax25)
state.layer_3 = True
return AX25StateType.Connected
else:
logger.debug(f"Ignoring {event}")
return AX25StateType.Connected
def timer_recovery_handler(
state: AX25State,
event: AX25StateEvent,
ax25: AX25,
logger: LoggingMixin) -> AX25StateType:
assert state.current_state == AX25StateType.TimerRecovery
if event.event_type == AX25EventType.DL_CONNECT:
state.clear_pending_iframes()
establish_data_link(state, ax25)
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.DL_DISCONNECT:
state.clear_pending_iframes()
state.retry_count = 0
disc = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
UnnumberedType.DISC, True)
ax25.write_packet(disc)
state.t3.cancel()
state.t1.start()
return AX25StateType.AwaitingRelease
elif event.event_type == AX25EventType.DL_DATA:
pending = cast(InternalInfo, event.packet)
state.push_iframe(pending, event.future)
return AX25StateType.TimerRecovery
elif event.event_type == AX25EventType.IFRAME_READY:
pending, future = state.pending_frames.get()
if state.window_exceeded():
logger.debug("Window exceeded, delaying frame")
#asyncio.ensure_future(delay_outgoing_data(state, pending, future))
state.push_iframe(pending, future)
else:
i_frame = IFrame.i_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command, False,
state.get_recv_state(), state.get_send_state(), pending.protocol, pending.info)
ax25.write_packet(i_frame)
state.sent_frames[state.get_send_state()] = i_frame
state.futures[state.get_send_state()] = future
state.inc_send_state()
state.ack_pending = False
if not state.t1.running():
state.t3.cancel()
state.t1.start()
state.pending_frames.task_done()
return AX25StateType.TimerRecovery
elif event.event_type == AX25EventType.T1_EXPIRE:
if state.retry_count < 4:
state.retry_count += 1
transmit_enquiry(state, ax25)
return AX25StateType.TimerRecovery
else:
logger.debug("datalink retries exceeded, disconnecting")
if state.get_ack_state() == state.get_send_state():
ax25.dl_error(state.remote_call, state.local_call, "U")
else:
ax25.dl_error(state.remote_call, state.local_call, "I")
state.internal_event_cb(AX25StateEvent.dl_disconnect(state.remote_call, state.local_call))
state.clear_pending_iframes()
dm = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
UnnumberedType.DM, True)
ax25.write_packet(dm)
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.AX25_SABM:
u_frame = cast(UFrame, event.packet)
ua = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response,
UnnumberedType.UA, u_frame.poll_final)
ax25.write_packet(ua)
ax25.dl_error(state.remote_call, state.local_call, "F")
if not state.get_send_state() == state.get_ack_state():
state.clear_pending_iframes()
ax25.dl_connect_indication(state.remote_call, state.local_call)
state.reset()
state.t3.start()
return AX25StateType.Connected
elif event.event_type == AX25EventType.AX25_RNR:
# TODO Set peer busy
return AX25StateType.TimerRecovery
elif event.event_type == AX25EventType.AX25_RR:
# TODO Set peer clear
s_frame = cast(SFrame, event.packet)
if s_frame.get_command() == SupervisoryCommand.Response and s_frame.poll_final:
"""
Check if N(R) (received seq) is leq the V(S) (send seq) and if
the V(A) (ack'd seq) is leq N(R) (received seq).
N(S) is the senders seq number
N(R) is the receivers next expected seq number
V(A) is the last acknowledged seq
V(S) is the next send seq
V(R) is the next expected seq number
E.g., V(A) <= N(R) <= V(S) reads as:
if the last acknowledged sequence is less than or equal to the next expected seq is less than
or equal to the next send seq
We get an RR with N(R) of 5, that means the receiver expects our next send seq to be 5
If this is equal to our last ack'd seq it means we've missed a whole window.
"""
state.t1.cancel()
select_t1_value(state)
if check_nr(state, s_frame.receive_seq_number):
state.set_ack_state(s_frame.receive_seq_number)
if state.get_send_state() == state.get_ack_state():
state.t3.start()
logger.debug("Re-connected")
return AX25StateType.Connected
else:
logger.debug(f"Invoke retransmission, N(R)={state.get_recv_state()}")
invoke_retransmission(state, ax25)
return AX25StateType.TimerRecovery
else:
logger.warning(f"N(R) error recovery, V(S)={state.get_send_state()} N(R)={s_frame.receive_seq_number} "
f"V(A)={state.get_ack_state()}")
nr_error_recovery(state, ax25)
return AX25StateType.AwaitingConnection
else:
if s_frame.get_command() == SupervisoryCommand.Command and s_frame.poll_final:
enquiry_response(state, ax25)
if check_nr(state, state.get_recv_state()):
state.set_ack_state(s_frame.receive_seq_number)
logger.debug("Still in timer recovery")
return AX25StateType.TimerRecovery
else:
logger.warning(f"N(R) error recovery, V(S)={state.get_send_state()} V(R)={state.get_recv_state()} "
f"V(A)={state.get_ack_state()}")
nr_error_recovery(state, ax25)
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.AX25_DISC:
u_frame = cast(UFrame, event.packet)
state.clear_pending_iframes()
ua = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response,
UnnumberedType.UA, u_frame.poll_final)
ax25.write_packet(ua)
ax25.dl_disconnect_indication(state.remote_call, state.local_call)
state.t1.cancel()
state.t3.cancel()
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.AX25_UA:
ax25.dl_error(state.remote_call, state.local_call, "C")
establish_data_link(state, ax25)
state.layer_3 = False
return AX25StateType.AwaitingConnection
elif event.event_type == AX25EventType.AX25_UI:
ui_frame = cast(UIFrame, event.packet)
check_ui(ui_frame, ax25)
if ui_frame.poll_final:
enquiry_response(state, ax25)
return AX25StateType.TimerRecovery
elif event.event_type == AX25EventType.DL_UNIT_DATA:
pending = cast(InternalInfo, event.packet)
ui = UIFrame.ui_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command, True,
pending.protocol, pending.info)
ax25.write_packet(ui)
return AX25StateType.TimerRecovery
elif event.event_type == AX25EventType.AX25_DM:
ax25.dl_error(state.remote_call, state.local_call, "E")
ax25.dl_disconnect_indication(state.remote_call, state.local_call)
state.clear_pending_iframes()
state.t1.cancel()
state.t3.cancel()
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.AX25_INFO:
i_frame = cast(IFrame, event.packet)
if i_frame.get_command() == SupervisoryCommand.Command:
if check_nr(state, i_frame.receive_seq_number):
check_iframe_ack(state, i_frame.receive_seq_number)
if i_frame.send_seq_number == state.get_recv_state():
state.inc_recv_state()
state.reject_exception = False
ax25.dl_data_indication(state.remote_call, state.local_call, i_frame.protocol, i_frame.info)
if i_frame.poll:
state.enqueue_info_ack(ax25)
else:
if state.reject_exception:
if i_frame.poll:
rr = SFrame.s_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response,
SupervisoryType.RR, state.get_recv_state(), True)
ax25.write_packet(rr)
state.ack_pending = False
else:
state.reject_exception = True
rr = SFrame.s_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Response,
SupervisoryType.REJ, state.get_recv_state(), i_frame.poll)
ax25.write_packet(rr)
state.ack_pending = False
return AX25StateType.TimerRecovery
else:
logger.warning(f"N(R) error recovery, N(R)={i_frame.receive_seq_number} V(S)={state.get_send_state()}")
nr_error_recovery(state, ax25)
return AX25StateType.AwaitingConnection
else:
ax25.dl_error(state.remote_call, state.local_call, "S")
return AX25StateType.TimerRecovery
elif event.event_type == AX25EventType.AX25_FRMR:
ax25.dl_error(state.remote_call, state.local_call, "K")
establish_data_link(state, ax25)
return AX25StateType.AwaitingConnection
else:
logger.debug(f"Ignoring {event}")
return AX25StateType.TimerRecovery
def awaiting_release_handler(
state: AX25State,
event: AX25StateEvent,
ax25: AX25,
logger: LoggingMixin) -> AX25StateType:
assert state.current_state == AX25StateType.AwaitingRelease
if event.event_type == AX25EventType.DL_DISCONNECT:
dm = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
UnnumberedType.DM, False)
ax25.write_packet(dm)
return AX25StateType.Disconnected
elif event.event_type == AX25EventType.AX25_SABM:
u_frame = cast(UFrame, event.packet)
dm = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
UnnumberedType.DM, u_frame.poll_final)
ax25.write_packet(dm)
return AX25StateType.AwaitingRelease
elif event.event_type == AX25EventType.AX25_DISC:
u_frame = cast(UFrame, event.packet)
dm = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
UnnumberedType.DM, u_frame.poll_final)
ax25.write_packet(dm)
return AX25StateType.AwaitingRelease
elif event.event_type == AX25EventType.DL_DATA:
event.future.set_exception(RuntimeError("Not connected"))
return AX25StateType.AwaitingRelease
elif event.event_type == AX25EventType.DL_UNIT_DATA:
pending = cast(InternalInfo, event.packet)
ui = UIFrame.ui_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
False, pending.protocol, pending.info)
ax25.write_packet(ui)
return AX25StateType.AwaitingRelease
elif event.event_type in (AX25EventType.AX25_INFO, AX25EventType.AX25_RR, AX25EventType.AX25_RNR,
AX25EventType.AX25_REJ, AX25EventType.AX25_SREJ):
u_frame = cast(UFrame, event.packet)
if u_frame.poll_final:
dm = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
UnnumberedType.DM, True)
ax25.write_packet(dm)
return AX25StateType.AwaitingRelease
elif event.event_type == AX25EventType.AX25_UI:
ui = cast(UIFrame, event.packet)
check_ui(ui, ax25)
if ui.poll_final:
dm = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
UnnumberedType.DM, True)
ax25.write_packet(dm)
return AX25StateType.AwaitingRelease
elif event.event_type == AX25EventType.AX25_UA:
ua = cast(UFrame, event.packet)
if ua.poll_final:
ax25.dl_disconnect_indication(state.remote_call, state.local_call)
state.t1.cancel()
return AX25StateType.Disconnected
else:
ax25.dl_error(state.remote_call, state.local_call, "D")
return AX25StateType.AwaitingRelease
elif event.event_type == AX25EventType.AX25_DM:
ua = cast(UFrame, event.packet)
if ua.poll_final:
ax25.dl_disconnect_indication(state.remote_call, state.local_call)
state.t1.cancel()
return AX25StateType.Disconnected
else:
return AX25StateType.AwaitingRelease
elif event.event_type == AX25EventType.T1_EXPIRE:
if state.retry_count < 4:
state.retry_count += 1
disc = UFrame.u_frame(state.remote_call, state.local_call, [], SupervisoryCommand.Command,
UnnumberedType.DISC, True)
ax25.write_packet(disc)
select_t1_value(state)
state.t1.start()
return AX25StateType.AwaitingRelease
else:
ax25.dl_error(state.remote_call, state.local_call, "H")
ax25.dl_disconnect_indication(state.remote_call, state.local_call)
return AX25StateType.Disconnected
else:
logger.debug(f"Ignoring {event}")
return AX25StateType.AwaitingRelease
class DeferredAX25(AX25):
"""
This is needed to capture calls to the data-link manager within the state machine. This way we can queue
up the calls and defer calling them until the state machine has completed one full cycle. Otherwise when
we call _out of_ the state machine (like dl_connect) it may induce further calls _into_ the state machine
that may result in out of order internal events.
"""
def __init__(self, actual_ax25: AX25):
self.actual_ax25: AX25 = actual_ax25
self.calls = []
def dl_error(self, remote_call: AX25Call, local_call: AX25Call, error_code):
self.calls.append(partial(self.actual_ax25.dl_error, remote_call, local_call, error_code))
def dl_data_indication(self, remote_call: AX25Call, local_call: AX25Call, protocol: L3Protocol, data: bytes):
self.calls.append(partial(self.actual_ax25.dl_data_indication, remote_call, local_call, protocol, data))
def dl_connect_indication(self, remote_call: AX25Call, local_call: AX25Call):
self.calls.append(partial(self.actual_ax25.dl_connect_indication, remote_call, local_call))
def dl_disconnect_indication(self, remote_call: AX25Call, local_call: AX25Call):
self.calls.append(partial(self.actual_ax25.dl_disconnect_indication, remote_call, local_call))
def write_packet(self, packet: AX25Packet):
self.calls.append(partial(self.actual_ax25.write_packet, packet))
def local_call(self) -> AX25Call:
return self.actual_ax25.local_call()
def apply(self):
for deferred in self.calls:
deferred()
def size(self):
return len(self.calls)
class AX25StateMachine:
"""State management for AX.25 Data Links
Holds a mapping of AX.25 sessions keyed on remote callsign.
"""
def __init__(self, ax25: AX25, timer_factory: Callable[[float, Callable[[], None]], Timer]):
self._ax25 = ax25
self._sessions: Dict[Tuple[AX25Call, AX25Call], AX25State] = {} # (remote, local)
self._handlers = {
AX25StateType.Disconnected: disconnected_handler,
AX25StateType.Connected: connected_handler,
AX25StateType.AwaitingConnection: awaiting_connection_handler,
AX25StateType.TimerRecovery: timer_recovery_handler,
AX25StateType.AwaitingRelease: awaiting_release_handler
}
self._timer_factory = timer_factory
self._logger = logging.getLogger("ax25.state")
def log(self, state: AX25State, msg: str, *args, **kwargs):
self._logger.info(f"[Id={state.session_id} Local={state.local_call} Remote={state.remote_call} "
f"State={state.current_state}] V(A)={state.get_ack_state()} V(R)={state.get_recv_state()} "
f"V(S)={state.get_send_state()} {msg}")
def _get_or_create_session(self, remote_call: AX25Call, local_call: AX25Call) -> AX25State:
state = self._sessions.get((remote_call, local_call))
if state is None:
state = AX25State.create(remote_call, local_call, self.handle_internal_event, self._timer_factory)
self._sessions[(remote_call, local_call)] = state
return state
def get_sessions(self) -> Dict[AX25Call, AX25StateType]:
return {s.remote_call: s.current_state for k, s in self._sessions.items() if k[1] == self._ax25.local_call()}
def get_state(self, remote_call: AX25Call) -> AX25StateType:
local_call = self._ax25.local_call()
state = self._sessions.get((remote_call, local_call))
if state is None:
return AX25StateType.Disconnected
else:
return state.current_state
def is_window_exceeded(self, remote_call: AX25Call) -> bool:
local_call = self._ax25.local_call()
state = self._sessions.get((remote_call, local_call))
if state is None:
return False
else:
return state.window_exceeded()
def handle_packet(self, packet: AX25Packet):
state = self._get_or_create_session(packet.source, packet.dest)
event = AX25StateEvent.from_packet(packet)
handler = self._handlers[state.current_state]
if handler is None:
raise RuntimeError(f"No handler for {handler}")
deferred = DeferredAX25(self._ax25)
logger = LoggingMixin(self._logger, state.log_prefix)
new_state = handler(state, event, deferred, logger)
state.current_state = new_state
logger.debug(f"Handled {event}")
deferred.apply()
def handle_internal_event(self, event: AX25StateEvent) -> bool:
if event.event_type in (AX25EventType.DL_CONNECT, AX25EventType.DL_UNIT_DATA):
# allow these events to create a new session
state = self._get_or_create_session(event.remote_call, self._ax25.local_call())
else:
local_call = self._ax25.local_call()
state = self._sessions.get((event.remote_call, local_call))
if not state:
raise RuntimeError(f"No session for internal event {event}")
handler = self._handlers[state.current_state]
if handler is None:
raise RuntimeError(f"No handler for {handler}")
deferred = DeferredAX25(self._ax25)
logger = LoggingMixin(self._logger, state.log_prefix)
new_state = handler(state, event, deferred, logger)
state.current_state = new_state
logger.debug(f"Handled {event}")
deferred.apply()
return state.window_exceeded()
| 44.211909
| 122
| 0.647257
|
31884fbe9e12177eb9a8364107617b9f07ee3c86
| 589
|
py
|
Python
|
arda_db/browser/migrations/0001_initial.py
|
rwspicer/ARDA
|
9bd98786feff4afbd45afdf3f3f1c2549f6356cf
|
[
"MIT"
] | 5
|
2015-04-22T09:20:10.000Z
|
2021-03-12T01:52:07.000Z
|
arda_db/browser/migrations/0001_initial.py
|
rwspicer/ARDA
|
9bd98786feff4afbd45afdf3f3f1c2549f6356cf
|
[
"MIT"
] | null | null | null |
arda_db/browser/migrations/0001_initial.py
|
rwspicer/ARDA
|
9bd98786feff4afbd45afdf3f3f1c2549f6356cf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Resource',
fields=[
('r_id', models.AutoField(serialize=False, primary_key=True)),
('r_type', models.CharField(max_length=1, choices=[(b'0', b'libaray'), (b'1', b'service'), (b'2', b'online')])),
],
options={
},
bases=(models.Model,),
),
]
| 24.541667
| 128
| 0.531409
|
06c316734d0c5dfdec35448932dcdb142c786a56
| 27,691
|
py
|
Python
|
desktop/core/src/desktop/tests.py
|
erickt/hue
|
a046f1dd21226689ed447422f3373d96c65b2fd2
|
[
"Apache-2.0"
] | null | null | null |
desktop/core/src/desktop/tests.py
|
erickt/hue
|
a046f1dd21226689ed447422f3373d96c65b2fd2
|
[
"Apache-2.0"
] | null | null | null |
desktop/core/src/desktop/tests.py
|
erickt/hue
|
a046f1dd21226689ed447422f3373d96c65b2fd2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import sys
import tempfile
import time
import desktop
import desktop.conf
import desktop.urls
import desktop.views as views
import proxy.conf
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_false, assert_equal, assert_not_equal, assert_raises, nottest
from django.conf.urls import patterns, url
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.db.models import query, CharField, SmallIntegerField
from useradmin.models import GroupPermission
from beeswax.conf import HIVE_SERVER_HOST
from desktop.lib import django_mako
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.paginator import Paginator
from desktop.lib.conf import validate_path
from desktop.lib.django_util import TruncatingModel
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.test_utils import grant_access
from desktop.models import Document
from desktop.views import check_config, home
from pig.models import PigScript
def setup_test_environment():
"""
Sets up mako to signal template rendering.
"""
django_mako.render_to_string = django_mako.render_to_string_test
setup_test_environment.__test__ = False
def teardown_test_environment():
"""
This method is called by nose_runner when
the tests all finish. This helps track
down when tests aren't cleaning up after
themselves and leaving threads hanging around.
"""
import threading
# We should shut down all relevant threads by test completion.
threads = list(threading.enumerate())
try:
import threadframe
import traceback
if len(threads) > 1:
for v in threadframe.dict().values():
traceback.print_stack(v)
finally:
# threadframe is only available in the dev build.
pass
assert 1 == len(threads), threads
django_mako.render_to_string = django_mako.render_to_string_normal
teardown_test_environment.__test__ = False
def test_home():
c = make_logged_in_client(username="test_home", groupname="test_home", recreate=True, is_superuser=False)
user = User.objects.get(username="test_home")
response = c.get(reverse(home))
assert_equal(["notmine", "trash", "mine", "history"], json.loads(response.context['json_tags']).keys())
assert_equal(200, response.status_code)
script, created = PigScript.objects.get_or_create(owner=user)
doc = Document.objects.link(script, owner=script.owner, name='test_home')
response = c.get(reverse(home))
assert_true(str(doc.id) in json.loads(response.context['json_documents']))
response = c.get(reverse(home))
tags = json.loads(response.context['json_tags'])
assert_equal([doc.id], tags['mine'][0]['docs'], tags)
assert_equal([], tags['trash']['docs'], tags)
assert_equal([], tags['history']['docs'], tags)
doc.send_to_trash()
response = c.get(reverse(home))
tags = json.loads(response.context['json_tags'])
assert_equal([], tags['mine'][0]['docs'], tags)
assert_equal([doc.id], tags['trash']['docs'], tags)
assert_equal([], tags['history']['docs'], tags)
doc.restore_from_trash()
response = c.get(reverse(home))
tags = json.loads(response.context['json_tags'])
assert_equal([doc.id], tags['mine'][0]['docs'], tags)
assert_equal([], tags['trash']['docs'], tags)
assert_equal([], tags['history']['docs'], tags)
doc.add_to_history()
response = c.get(reverse(home))
tags = json.loads(response.context['json_tags'])
assert_equal([], tags['mine'][0]['docs'], tags)
assert_equal([], tags['trash']['docs'], tags)
assert_equal([], tags['history']['docs'], tags) # We currently don't fetch [doc.id]
def test_skip_wizard():
c = make_logged_in_client() # is_superuser
response = c.get('/', follow=True)
assert_true(['admin_wizard.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
c.cookies['hueLandingPage'] = 'home'
response = c.get('/', follow=True)
assert_true(['home.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
c.cookies['hueLandingPage'] = ''
response = c.get('/', follow=True)
assert_true(['admin_wizard.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
c = make_logged_in_client(username="test_skip_wizard", password="test_skip_wizard", is_superuser=False)
response = c.get('/', follow=True)
assert_true(['home.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
c.cookies['hueLandingPage'] = 'home'
response = c.get('/', follow=True)
assert_true(['home.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
c.cookies['hueLandingPage'] = ''
response = c.get('/', follow=True)
assert_true(['home.mako' in _template.filename for _template in response.templates], [_template.filename for _template in response.templates])
def test_log_view():
c = make_logged_in_client()
URL = reverse(views.log_view)
LOG = logging.getLogger(__name__)
LOG.warn('une voix m’a réveillé')
# UnicodeDecodeError: 'ascii' codec can't decode byte... should not happen
response = c.get(URL)
assert_equal(200, response.status_code)
c = make_logged_in_client()
URL = reverse(views.log_view)
LOG = logging.getLogger(__name__)
LOG.warn('Got response: PK\x03\x04\n\x00\x00\x08\x00\x00\xad\x0cN?\x00\x00\x00\x00')
# DjangoUnicodeDecodeError: 'utf8' codec can't decode byte 0xad in position 75: invalid start byte... should not happen
response = c.get(URL)
assert_equal(200, response.status_code)
def test_download_log_view():
c = make_logged_in_client()
URL = reverse(views.download_log_view)
LOG = logging.getLogger(__name__)
LOG.warn(u'une voix m’a réveillé')
# UnicodeDecodeError: 'ascii' codec can't decode byte... should not happen
response = c.get(URL)
assert_equal("application/zip", response.get('Content-Type', ''))
def test_dump_config():
c = make_logged_in_client()
CANARY = "abracadabra"
# Depending on the order of the conf.initialize() in settings, the set_for_testing() are not seen in the global settings variable
clear = HIVE_SERVER_HOST.set_for_testing(CANARY)
response1 = c.get(reverse('desktop.views.dump_config'))
assert_true(CANARY in response1.content, response1.content)
response2 = c.get(reverse('desktop.views.dump_config'), dict(private="true"))
assert_true(CANARY in response2.content)
# There are more private variables...
assert_true(len(response1.content) < len(response2.content))
clear()
CANARY = "(localhost|127\.0\.0\.1):(50030|50070|50060|50075)"
clear = proxy.conf.WHITELIST.set_for_testing(CANARY)
response1 = c.get(reverse('desktop.views.dump_config'))
assert_true(CANARY in response1.content)
clear()
# Malformed port per HUE-674
CANARY = "asdfoijaoidfjaosdjffjfjaoojosjfiojdosjoidjfoa"
clear = HIVE_SERVER_HOST.set_for_testing(CANARY)
response1 = c.get(reverse('desktop.views.dump_config'))
assert_true(CANARY in response1.content, response1.content)
clear()
CANARY = '/tmp/spacé.dat'
finish = proxy.conf.WHITELIST.set_for_testing(CANARY)
try:
response = c.get(reverse('desktop.views.dump_config'))
assert_true(CANARY in response.content, response.content)
finally:
finish()
# Not showing some passwords
response = c.get(reverse('desktop.views.dump_config'))
assert_false('bind_password' in response.content)
# Login as someone else
client_not_me = make_logged_in_client(username='not_me', is_superuser=False, groupname='test')
grant_access("not_me", "test", "desktop")
response = client_not_me.get(reverse('desktop.views.dump_config'))
assert_true("You must be a superuser" in response.content, response.content)
os.environ["HUE_CONF_DIR"] = "/tmp/test_hue_conf_dir"
resp = c.get(reverse('desktop.views.dump_config'))
del os.environ["HUE_CONF_DIR"]
assert_true('/tmp/test_hue_conf_dir' in resp.content, resp)
def test_prefs():
c = make_logged_in_client()
# Get everything
response = c.get('/desktop/prefs/')
assert_equal('{}', response.content)
# Set and get
response = c.get('/desktop/prefs/foo', dict(set="bar"))
assert_equal('true', response.content)
response = c.get('/desktop/prefs/foo')
assert_equal('"bar"', response.content)
# Reset (use post this time)
c.post('/desktop/prefs/foo', dict(set="baz"))
response = c.get('/desktop/prefs/foo')
assert_equal('"baz"', response.content)
# Check multiple values
c.post('/desktop/prefs/elephant', dict(set="room"))
response = c.get('/desktop/prefs/')
assert_true("baz" in response.content)
assert_true("room" in response.content)
# Delete everything
c.get('/desktop/prefs/elephant', dict(delete=""))
c.get('/desktop/prefs/foo', dict(delete=""))
response = c.get('/desktop/prefs/')
assert_equal('{}', response.content)
# Check non-existent value
response = c.get('/desktop/prefs/doesNotExist')
assert_equal('null', response.content)
def test_status_bar():
"""
Subs out the status_bar_views registry with temporary examples.
Tests handling of errors on view functions.
"""
backup = views._status_bar_views
views._status_bar_views = []
c = make_logged_in_client()
views.register_status_bar_view(lambda _: HttpResponse("foo", status=200))
views.register_status_bar_view(lambda _: HttpResponse("bar"))
views.register_status_bar_view(lambda _: None)
def f(r):
raise Exception()
views.register_status_bar_view(f)
response = c.get("/desktop/status_bar")
assert_equal("foobar", response.content)
views._status_bar_views = backup
def test_paginator():
"""
Test that the paginator works with partial list.
"""
def assert_page(page, data, start, end):
assert_equal(page.object_list, data)
assert_equal(page.start_index(), start)
assert_equal(page.end_index(), end)
# First page 1-20
obj = range(20)
pgn = Paginator(obj, per_page=20, total=25)
assert_page(pgn.page(1), obj, 1, 20)
# Second page 21-25
obj = range(5)
pgn = Paginator(obj, per_page=20, total=25)
assert_page(pgn.page(2), obj, 21, 25)
# Handle extra data on first page (22 items on a 20-page)
obj = range(22)
pgn = Paginator(obj, per_page=20, total=25)
assert_page(pgn.page(1), range(20), 1, 20)
# Handle extra data on second page (22 items on a 20-page)
obj = range(22)
pgn = Paginator(obj, per_page=20, total=25)
assert_page(pgn.page(2), range(5), 21, 25)
# Handle total < len(obj). Only works for QuerySet.
obj = query.QuerySet()
obj._result_cache = range(10)
pgn = Paginator(obj, per_page=10, total=9)
assert_page(pgn.page(1), range(10), 1, 10)
# Still works with a normal complete list
obj = range(25)
pgn = Paginator(obj, per_page=20)
assert_page(pgn.page(1), range(20), 1, 20)
assert_page(pgn.page(2), range(20, 25), 21, 25)
def test_thread_dump():
c = make_logged_in_client()
response = c.get("/desktop/debug/threads")
assert_true("test_thread_dump" in response.content)
def test_truncating_model():
class TinyModel(TruncatingModel):
short_field = CharField(max_length=10)
non_string_field = SmallIntegerField()
a = TinyModel()
a.short_field = 'a' * 9 # One less than it's max length
assert_true(a.short_field == 'a' * 9, 'Short-enough field does not get truncated')
a.short_field = 'a' * 11 # One more than it's max_length
assert_true(a.short_field == 'a' * 10, 'Too-long field gets truncated')
a.non_string_field = 10**10
assert_true(a.non_string_field == 10**10, 'non-string fields are not truncated')
def test_error_handling():
raise SkipTest
restore_django_debug = desktop.conf.DJANGO_DEBUG_MODE.set_for_testing(False)
restore_500_debug = desktop.conf.HTTP_500_DEBUG_MODE.set_for_testing(False)
exc_msg = "error_raising_view: Test earráid handling"
def error_raising_view(request, *args, **kwargs):
raise Exception(exc_msg)
def popup_exception_view(request, *args, **kwargs):
raise PopupException(exc_msg, title="earráid", detail=exc_msg)
# Add an error view
error_url_pat = patterns('',
url('^500_internal_error$', error_raising_view),
url('^popup_exception$', popup_exception_view))
desktop.urls.urlpatterns.extend(error_url_pat)
try:
def store_exc_info(*args, **kwargs):
pass
# Disable the test client's exception forwarding
c = make_logged_in_client()
c.store_exc_info = store_exc_info
response = c.get('/500_internal_error')
assert_true(any(["500.mako" in _template.filename for _template in response.templates]))
assert_true('Thank you for your patience' in response.content)
assert_true(exc_msg not in response.content)
# Now test the 500 handler with backtrace
desktop.conf.HTTP_500_DEBUG_MODE.set_for_testing(True)
response = c.get('/500_internal_error')
assert_equal(response.template.name, 'Technical 500 template')
assert_true(exc_msg in response.content)
# PopupException
response = c.get('/popup_exception')
assert_true(any(["popup_error.mako" in _template.filename for _template in response.templates]))
assert_true(exc_msg in response.content)
finally:
# Restore the world
for i in error_url_pat:
desktop.urls.urlpatterns.remove(i)
restore_django_debug()
restore_500_debug()
def test_desktop_permissions():
USERNAME = 'test_core_permissions'
GROUPNAME = 'default'
desktop.conf.REDIRECT_WHITELIST.set_for_testing('^\/.*$,^http:\/\/testserver\/.*$')
c = make_logged_in_client(USERNAME, groupname=GROUPNAME, recreate=True, is_superuser=False)
# Access to the basic works
assert_equal(200, c.get('/accounts/login/', follow=True).status_code)
assert_equal(200, c.get('/accounts/logout', follow=True).status_code)
assert_equal(200, c.get('/home', follow=True).status_code)
def test_app_permissions():
USERNAME = 'test_app_permissions'
GROUPNAME = 'impala_only'
desktop.conf.REDIRECT_WHITELIST.set_for_testing('^\/.*$,^http:\/\/testserver\/.*$')
c = make_logged_in_client(USERNAME, groupname=GROUPNAME, recreate=True, is_superuser=False)
# Reset all perms
GroupPermission.objects.filter(group__name=GROUPNAME).delete()
# Access to nothing
assert_equal(401, c.get('/beeswax', follow=True).status_code)
assert_equal(401, c.get('/impala', follow=True).status_code)
assert_equal(401, c.get('/hbase', follow=True).status_code)
# Add access to beeswax
grant_access(USERNAME, GROUPNAME, "beeswax")
assert_equal(200, c.get('/beeswax', follow=True).status_code)
assert_equal(401, c.get('/impala', follow=True).status_code)
assert_equal(401, c.get('/hbase', follow=True).status_code)
# Add access to hbase
grant_access(USERNAME, GROUPNAME, "hbase")
assert_equal(200, c.get('/beeswax', follow=True).status_code)
assert_equal(401, c.get('/impala', follow=True).status_code)
assert_equal(200, c.get('/hbase', follow=True).status_code)
# Reset all perms
GroupPermission.objects.filter(group__name=GROUPNAME).delete()
assert_equal(401, c.get('/beeswax', follow=True).status_code)
assert_equal(401, c.get('/impala', follow=True).status_code)
assert_equal(401, c.get('/hbase', follow=True).status_code)
# Test only impala perm
grant_access(USERNAME, GROUPNAME, "impala")
assert_equal(401, c.get('/beeswax', follow=True).status_code)
assert_equal(200, c.get('/impala', follow=True).status_code)
assert_equal(401, c.get('/hbase', follow=True).status_code)
def test_error_handling_failure():
# Change rewrite_user to call has_hue_permission
# Try to get filebrowser page
# test for default 500 page
# Restore rewrite_user
import desktop.auth.backend
c = make_logged_in_client()
restore_django_debug = desktop.conf.DJANGO_DEBUG_MODE.set_for_testing(False)
restore_500_debug = desktop.conf.HTTP_500_DEBUG_MODE.set_for_testing(False)
original_rewrite_user = desktop.auth.backend.rewrite_user
def rewrite_user(user):
user = original_rewrite_user(user)
delattr(user, 'has_hue_permission')
return user
original_rewrite_user = desktop.auth.backend.rewrite_user
desktop.auth.backend.rewrite_user = rewrite_user
try:
# Make sure we are showing default 500.html page.
# See django.test.client#L246
assert_raises(AttributeError, c.get, reverse('desktop.views.dump_config'))
finally:
# Restore the world
restore_django_debug()
restore_500_debug()
desktop.auth.backend.rewrite_user = original_rewrite_user
def test_404_handling():
view_name = '/the-view-that-is-not-there'
c = make_logged_in_client()
response = c.get(view_name)
assert_true(any(['404.mako' in _template.filename for _template in response.templates]), response.templates)
assert_true('Not Found' in response.content)
assert_true(view_name in response.content)
class RecordingHandler(logging.Handler):
def __init__(self, *args, **kwargs):
logging.Handler.__init__(self, *args, **kwargs)
self.records = []
def emit(self, r):
self.records.append(r)
def test_log_event():
c = make_logged_in_client()
root = logging.getLogger("desktop.views.log_frontend_event")
handler = RecordingHandler()
root.addHandler(handler)
c.get("/desktop/log_frontend_event?level=info&message=foo")
assert_equal("INFO", handler.records[-1].levelname)
assert_equal("Untrusted log event from user test: foo", handler.records[-1].message)
assert_equal("desktop.views.log_frontend_event", handler.records[-1].name)
c.get("/desktop/log_frontend_event?level=error&message=foo2")
assert_equal("ERROR", handler.records[-1].levelname)
assert_equal("Untrusted log event from user test: foo2", handler.records[-1].message)
c.get("/desktop/log_frontend_event?message=foo3")
assert_equal("INFO", handler.records[-1].levelname)
assert_equal("Untrusted log event from user test: foo3", handler.records[-1].message)
c.post("/desktop/log_frontend_event", {
"message": "01234567" * 1024})
assert_equal("INFO", handler.records[-1].levelname)
assert_equal("Untrusted log event from user test: " + "01234567"*(1024/8),
handler.records[-1].message)
root.removeHandler(handler)
def test_validate_path():
reset = desktop.conf.SSL_PRIVATE_KEY.set_for_testing('/')
assert_equal([], validate_path(desktop.conf.SSL_PRIVATE_KEY, is_dir=True))
reset()
reset = desktop.conf.SSL_PRIVATE_KEY.set_for_testing('/tmm/does_not_exist')
assert_not_equal([], validate_path(desktop.conf.SSL_PRIVATE_KEY, is_dir=True))
reset()
@attr('requires_hadoop')
def test_config_check():
reset = (
desktop.conf.SECRET_KEY.set_for_testing(''),
desktop.conf.SSL_CERTIFICATE.set_for_testing('foobar'),
desktop.conf.SSL_PRIVATE_KEY.set_for_testing(''),
desktop.conf.DEFAULT_SITE_ENCODING.set_for_testing('klingon')
)
try:
cli = make_logged_in_client()
resp = cli.get('/desktop/debug/check_config')
assert_true('Secret key should be configured' in resp.content, resp)
assert_true('desktop.ssl_certificate' in resp.content, resp)
assert_true('Path does not exist' in resp.content, resp)
assert_true('SSL private key file should be set' in resp.content, resp)
assert_true('klingon' in resp.content, resp)
assert_true('Encoding not supported' in resp.content, resp)
# Set HUE_CONF_DIR and make sure check_config returns appropriate conf
os.environ["HUE_CONF_DIR"] = "/tmp/test_hue_conf_dir"
resp = cli.get('/desktop/debug/check_config')
del os.environ["HUE_CONF_DIR"]
assert_true('/tmp/test_hue_conf_dir' in resp.content, resp)
finally:
for old_conf in reset:
old_conf()
def test_last_access_time():
c = make_logged_in_client(username="access_test")
c.post('/accounts/login/')
login = desktop.auth.views.get_current_users()
before_access_time = time.time()
response = c.get('/home')
after_access_time = time.time()
access = desktop.auth.views.get_current_users()
user = response.context['user']
login_time = login[user]['time']
access_time = access[user]['time']
# Check that 'last_access_time' is later than login time
assert_true(login_time < access_time)
# Check that 'last_access_time' is in between the timestamps before and after the last access path
assert_true(before_access_time < access_time)
assert_true(access_time < after_access_time)
def test_ui_customizations():
custom_banner = 'test ui customization'
reset = (
desktop.conf.CUSTOM.BANNER_TOP_HTML.set_for_testing(custom_banner),
)
try:
c = make_logged_in_client()
resp = c.get('/about', follow=True)
assert_true(custom_banner in resp.content, resp)
finally:
for old_conf in reset:
old_conf()
@attr('requires_hadoop')
def test_check_config_ajax():
c = make_logged_in_client()
response = c.get(reverse(check_config))
assert_true("misconfiguration" in response.content, response.content)
def test_cx_Oracle():
"""
Tests that cx_Oracle (external dependency) is built correctly.
"""
if 'ORACLE_HOME' not in os.environ and 'ORACLE_INSTANTCLIENT_HOME' not in os.environ:
raise SkipTest
try:
import cx_Oracle
return
except ImportError, ex:
if "No module named" in ex.message:
assert_true(False, "cx_Oracle skipped its build. This happens if "
"env var ORACLE_HOME or ORACLE_INSTANTCLIENT_HOME is not defined. "
"So ignore this test failure if your build does not need to work "
"with an oracle backend.")
class TestStrictRedirection():
def setUp(self):
self.client = make_logged_in_client()
self.user = dict(username="test", password="test")
desktop.conf.REDIRECT_WHITELIST.set_for_testing('^\/.*$,^http:\/\/example.com\/.*$')
def test_redirection_blocked(self):
# Redirection with code 301 should be handled properly
# Redirection with Status code 301 example reference: http://www.somacon.com/p145.php
self._test_redirection(redirection_url='http://www.somacon.com/color/html_css_table_border_styles.php',
expected_status_code=403)
# Redirection with code 302 should be handled properly
self._test_redirection(redirection_url='http://www.google.com',
expected_status_code=403)
def test_redirection_allowed(self):
# Redirection to the host where Hue is running should be OK.
self._test_redirection(redirection_url='/', expected_status_code=302)
self._test_redirection(redirection_url='/pig', expected_status_code=302)
self._test_redirection(redirection_url='http://testserver/', expected_status_code=302)
self._test_redirection(redirection_url='https://testserver/', expected_status_code=302, **{
'SERVER_PORT': '443',
'wsgi.url_scheme': 'https',
})
self._test_redirection(redirection_url='http://example.com/', expected_status_code=302)
def _test_redirection(self, redirection_url, expected_status_code, **kwargs):
self.client.get('/accounts/logout', **kwargs)
response = self.client.post('/accounts/login/?next=' + redirection_url, self.user, **kwargs)
assert_equal(expected_status_code, response.status_code)
if expected_status_code == 403:
error_msg = 'Redirect to ' + redirection_url + ' is not allowed.'
assert_true(error_msg in response.content, response.content)
class BaseTestPasswordConfig(object):
SCRIPT = '%s -c "print \'\\n password from script \\n\'"' % sys.executable
def get_config_password(self):
raise NotImplementedError
def get_config_password_script(self):
raise NotImplementedError
def get_password(self):
raise NotImplementedError
@nottest
def run_test_read_password_from_script(self):
resets = [
self.get_config_password().set_for_testing(None),
self.get_config_password_script().set_for_testing(self.SCRIPT)
]
try:
assert_equal(self.get_password(), ' password from script ')
finally:
for reset in resets:
reset()
@nottest
def run_test_config_password_overrides_script_password(self):
resets = [
self.get_config_password().set_for_testing(' password from config '),
self.get_config_password_script().set_for_testing(self.SCRIPT),
]
try:
assert_equal(self.get_password(), ' password from config ')
finally:
for reset in resets:
reset()
class TestDatabasePasswordConfig(BaseTestPasswordConfig):
def get_config_password(self):
return desktop.conf.DATABASE.PASSWORD
def get_config_password_script(self):
return desktop.conf.DATABASE.PASSWORD_SCRIPT
def get_password(self):
return desktop.conf.get_database_password()
def test_read_password_from_script(self):
self.run_test_read_password_from_script()
def test_config_password_overrides_script_password(self):
self.run_test_config_password_overrides_script_password()
class TestLDAPPasswordConfig(BaseTestPasswordConfig):
def get_config_password(self):
return desktop.conf.LDAP_PASSWORD
def get_config_password_script(self):
return desktop.conf.LDAP_PASSWORD_SCRIPT
def get_password(self):
return desktop.conf.get_ldap_password()
def test_read_password_from_script(self):
self.run_test_read_password_from_script()
def test_config_password_overrides_script_password(self):
self.run_test_config_password_overrides_script_password()
class TestLDAPBindPasswordConfig(BaseTestPasswordConfig):
def setup(self):
self.finish = desktop.conf.LDAP.LDAP_SERVERS.set_for_testing({'test': {}})
def teardown(self):
self.finish()
def get_config_password(self):
return desktop.conf.LDAP.LDAP_SERVERS['test'].BIND_PASSWORD
def get_config_password_script(self):
return desktop.conf.LDAP.LDAP_SERVERS['test'].BIND_PASSWORD_SCRIPT
def get_password(self):
return desktop.conf.get_ldap_bind_password(desktop.conf.LDAP.LDAP_SERVERS['test'])
def test_read_password_from_script(self):
self.run_test_read_password_from_script()
def test_config_password_overrides_script_password(self):
self.run_test_config_password_overrides_script_password()
class TestSMTPPasswordConfig(BaseTestPasswordConfig):
def get_config_password(self):
return desktop.conf.SMTP.PASSWORD
def get_config_password_script(self):
return desktop.conf.SMTP.PASSWORD_SCRIPT
def get_password(self):
return desktop.conf.get_smtp_password()
def test_read_password_from_script(self):
self.run_test_read_password_from_script()
def test_config_password_overrides_script_password(self):
self.run_test_config_password_overrides_script_password()
| 34.441542
| 152
| 0.739049
|
dd4e256e04a70231681e654657954d492e6fd8f2
| 29,982
|
py
|
Python
|
week2/utilities/build_ltr.py
|
fredriko/search_with_machine_learning_course
|
85670d7adf337fede418fa5665b3c5ee80e42b2b
|
[
"Apache-2.0"
] | null | null | null |
week2/utilities/build_ltr.py
|
fredriko/search_with_machine_learning_course
|
85670d7adf337fede418fa5665b3c5ee80e42b2b
|
[
"Apache-2.0"
] | null | null | null |
week2/utilities/build_ltr.py
|
fredriko/search_with_machine_learning_course
|
85670d7adf337fede418fa5665b3c5ee80e42b2b
|
[
"Apache-2.0"
] | null | null | null |
####
#
# Our main class for creating an LTR model via XG Boost and uploading it to OpenSearch
#
###
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import argparse
import json
import os
from getpass import getpass
from urllib.parse import urljoin
import click_models as cm
import data_prepper as dp
import ltr_utils as ltr
import pandas as pd
import search_utils as su
import xgb_utils as xgbu
from opensearchpy import OpenSearch
if __name__ == "__main__":
host = 'localhost'
port = 9200
auth = ('admin', 'admin') # For testing only. Don't store credentials in code.
parser = argparse.ArgumentParser(description='Build LTR.')
# TODO: setup argparse requirements/dependencies to better enforce arguments that require other arguments
general = parser.add_argument_group("general")
general.add_argument("-i", '--index', default="bbuy_products",
help='The name of the main index to search')
general.add_argument("-s", '--host', default="localhost",
help='The OpenSearch host name')
general.add_argument("-p", '--port', type=int, default=9200,
help='The OpenSearch port')
general.add_argument('--user',
help='The OpenSearch admin. If this is set, the program will prompt for password too. If not set, use default of admin/admin')
general.add_argument("-l", '--ltr_store', default="week2",
help='The name of the LTR store. Will be prepended with _ltr on creation')
general.add_argument("-a", '--all_clicks',
help='The path to the CSV file containing all click/session data. This is required if doing --generate_impressions or --xgb_test')
general.add_argument("--output_dir", default="output", help="the directory to output all files to")
dp_group = parser.add_argument_group("LTR Data Prep")
dp_group.add_argument("--ltr_terms_field", default="_id",
help="If --synthesize our data, this should be set to 'sku'")
dp_group.add_argument('--normalize_json',
help='A path to a JSON map of LTR feature-normalizer type pairs. If unset, data normalization will not happen. See week2/conf/normalize_types.json for an example')
dp_group.add_argument('--synthesize', action="store_true",
help='if set, along with --generate_impressions, creates impressions based on an implied ranking in the click logs. Writes to --impressions_file.')
dp_group.add_argument('--generate_impressions', action="store_true",
help='Generate impressions by running a search and comparing results using the --train_file file. Writes to --impressions_file. See also --synthesize.')
dp_group.add_argument('--generate_num_rows', default=5000, type=int,
help='The number of impressions to generate using retrieval. Randomly samples from all_clicks. Use with --generate_impressions. Ignored if --synthesize')
dp_group.add_argument('--min_impressions', default=20, type=int,
help='The minimum number of times a query must be seen to be included in the impressions set')
dp_group.add_argument('--min_clicks', default=10, type=int,
help='The minimum number of clicks a doc must have to be included in the impressions set')
dp_group.add_argument('--query_ids', default="query_to_query_ids.json",
help='The name of the file to read/write under the --output_dir as JSON')
dp_group.add_argument("-r", '--impressions_file', default="impressions.csv",
help='Where to write the ranks/features CSV file to under --output_dir. Output is written from a Pandas data frame')
ltr_group = parser.add_argument_group("LTR Store Creation and Features")
ltr_group.add_argument("-c", '--create_ltr_store', action="store_true",
help='Set the flag to create the LTR store. If one exists, it will be deleted')
ltr_group.add_argument("-f", '--featureset',
help='The path to the Featureset JSON file to read from')
ltr_group.add_argument("-n", '--featureset_name', default="bbuy_main_featureset",
help='The name of the featureset')
ltr_group.add_argument('--upload_featureset', action="store_true",
help='Upload the featureset given by the --featureset argument to OpenSearch')
ltr_group.add_argument("-u", '--upload_ltr_model', action="store_true",
help='Upload XGB LTR model under the given featureset. Requires --featureset_name and --xgb_model')
xgb_group = parser.add_argument_group("XGB Model Training and Testing")
xgb_group.add_argument("-x", '--xgb',
help='Train an XGB Boost model using the training file given')
xgb_group.add_argument("-t", '--create_xgb_training', action="store_true",
help='Create the training data set by logging the features for the training file and then outputting in RankSVM format. Must have --train_file and --featureset')
xgb_group.add_argument('--train_file', default="train.csv",
help='Where to load the training file from under the output_dir. Required when using --create_xgb_training')
xgb_group.add_argument('--xgb_conf', default="xgb-conf.json",
help='Path to XGB parameters JSON dictionary. See week2/conf/xgb-conf.json')
xgb_group.add_argument('--xgb_feat_map', default="xgb-feat-map.txt",
help='File name under --output_dir containing the feature map. Must be set when creating training data. See week2/conf/xgb-feat-map.txt')
xgb_group.add_argument("--xgb_rounds", default=5, type=int, help="The number of rounds to train the model on.")
xgb_group.add_argument("--xgb_model", default="xgb_model.model",
help="The file name to read/write the XGB model to in --output_dir. Two files will be written: 1 with the original XBG model and 1 that is ready for uploading to LTR (name with '.ltr' appended)")
xgb_group.add_argument("--xgb_model_name", default="ltr_model", help="The name of the model")
xgb_group.add_argument("--xgb_plot", action="store_true",
help="Writes model analysis images (.png) to the --output_dir. Requires the --xgb_model, --xgb_model_name and --xgb_feat_map args")
xgb_group.add_argument("--xgb_test",
help="Given a path to a test data set, created separately from the train set, see how our model does!")
xgb_group.add_argument("--xgb_test_output", default="xgb_test_output.csv",
help="File under --output_dir to write the differences between baseline and LTR search")
xgb_group.add_argument("--xgb_test_num_queries", default=100, type=int,
help="Of the test data, only run this many queries when testing.")
xgb_group.add_argument("--xgb_main_query_weight", default=1, type=float,
help="For the rescore query, how much weight to give the main query.")
xgb_group.add_argument("--xgb_rescore_query_weight", default=2, type=float,
help="For the rescore query, how much weight to give the rescore query.")
analyze_group = parser.add_argument_group("Analyze Test Results")
analyze_group.add_argument("--analyze", action="store_true",
help="Calculate a variety of stats and other things about the results. Uses --xgb_test_output")
analyze_group.add_argument("--precision", type=int, default=10, help="What precision level to report")
analyze_group.add_argument("--analyze_explains", action="store_true",
help="Run the queries from LTR queries that performed WORSE than the non-LTR query through explains and output the values. Expensive. Uses --xgb_test_output. Outputs --output_dir as simple_ltr_explains.csv and ltr_hand_tuned_explains.csv.")
analyze_group.add_argument("--max_explains", type=int, default=100, help="The maximum number of explains to output")
click_group = parser.add_argument_group("Click Models")
click_group.add_argument("--click_model", choices=["ctr", "binary", "heuristic"], default="ctr",
help='Simple Click-through-rate model')
click_group.add_argument("--downsample", action="store_true",
help='Downsample whatever is most prevelant to create a more balanced training set.')
split_group = parser.add_argument_group("Train/Test Splits")
split_group.add_argument("--split_input",
help="If specified, will split the given file into training and testing, writing it to the file name given as an argument into --split_train and --split_test")
split_group.add_argument("--split_train", default="train.csv",
help="The name of the training file to output under --output_dir")
split_group.add_argument("--split_test", default="test.csv",
help="The name of the test file to output to under --output_dir")
split_group.add_argument("--split_train_rows", type=int,
help="The total number of rows from the input file to put in the train split. Limiting the rows can be helpful for testing code, but likely won't produce good models.")
split_group.add_argument("--split_test_rows", type=int,
help="The total number of rows from the input file to put in the test split. Helpful for testing code, but likely won't produce good results since it won't have insights into clicks. See --xgb_test_num_queries.")
# Some handy utilities
util_group = parser.add_argument_group("Utilities")
util_group.add_argument("--lookup_query",
help="Given a query in --all_clicks, dump out all the product info for items that got clicks")
util_group.add_argument("--lookup_explain", action="store_true",
help="With --lookup_query, run explains for each query/sku pair")
util_group.add_argument("--lookup_product", help="Given a SKU, return the product")
util_group.add_argument("--verify_products", action="store_true",
help="Looks through all SKUs in --all_clicks and reports the ones that aren't in the index. Argument is where to output the items to under --output. WARNING: This is slow.")
util_group.add_argument("--verify_file", default="validity.csv",
help="The filename to store --verify_products output to under the --output_dir. If set with --all_clicks or --split_input and the file exists, then this file will be used to filter bad SKUs from click file")
args = parser.parse_args()
output_file = "output.txt"
featureset_file = "featureset.json"
if len(vars(args)) == 0:
parser.print_usage()
exit()
host = args.host
port = args.port
if args.user:
password = getpass()
auth = (args.user, password)
base_url = "https://{}:{}/".format(host, port)
opensearch = OpenSearch(
hosts=[{'host': host, 'port': port}],
http_compress=True, # enables gzip compression for request bodies
http_auth=auth,
# client_cert = client_cert_path,
# client_key = client_key_path,
use_ssl=True,
verify_certs=False, # set to true if you have certs
ssl_assert_hostname=False,
ssl_show_warn=False,
)
output_dir = args.output_dir
from pathlib import Path
print(f"***** Current working directory is: {Path.cwd()}")
if os.path.isdir(output_dir) == False:
os.mkdir(output_dir)
ltr_store_name = args.ltr_store # a little extra housekeeping due to the finickiness of the SLTR query
ltr_store_path = "_ltr/" + args.ltr_store
ltr_model_path = urljoin(base_url, ltr_store_path)
feat_name = args.featureset_name
index_name = args.index
# Prep our data
data_prepper = dp.DataPrepper(opensearch, feat_name, index_name, ltr_store_name)
if args.split_input:
# Split based on date. All of our train data will be before a given date, and all test data will be after.
# This simulates the real world and allows us to safely use prior clicks in our baseline retrieval and models
data_prepper.create_splits(args.split_input, args.split_train, args.split_test, output_dir,
args.split_train_rows, args.split_test_rows, args.verify_file)
# Create the LTR Store
if args.create_ltr_store:
ltr.create_ltr_store(ltr_model_path, auth)
all_clicks_df = None
# Load up all of our data and filter it to get rid of junk data (promo codes, SKUs that don't exist
if args.all_clicks:
try:
print("Loading all clicks from %s" % args.all_clicks)
all_clicks_df = pd.read_csv(args.all_clicks, parse_dates=['click_time', 'query_time'])
all_clicks_df = data_prepper.filter_junk_clicks(all_clicks_df, args.verify_file, output_dir)
# all_clicks_df = all_clicks_df.astype({'click_time': 'datetime64', 'query_time':'datetime64'})
except Exception as e:
print("Error loading all clicks data")
print(e)
exit(2)
# uplaod the LTR featureset
if args.upload_featureset:
featureset_path = urljoin(ltr_model_path + "/", "_featureset/{}".format(feat_name))
print("Installing %s featureset at %s" % (args.featureset, featureset_path))
with open(args.featureset) as json_file:
the_feature_set = json.load(json_file)
rsp = ltr.post_featureset(featureset_path, the_feature_set, auth)
print("Featureset Creation: %s" % rsp)
# Upload an LTR model
if args.upload_ltr_model:
# delete any old model first
ltr.delete_model(urljoin(ltr_model_path + "/", "_model/{}".format(args.xgb_model_name)), auth)
featureset_path = urljoin(ltr_model_path + "/", "_featureset/{}".format(feat_name))
model_path = urljoin(featureset_path + "/", "_createmodel")
os_model_file = "%s.ltr" % args.xgb_model
with open(os_model_file) as model_file:
ltr.upload_model(model_path, json.load(model_file), auth)
######
#
# Impressions are candidate queries with *simulated* ranks added to them as well as things like number of impressions
# and click counts. Impressions are what we use to then generate training data. In the real world, you wouldn't
# need to do this because you would be logging both clicked and unclicked events.
# TLDR: we are trying to build a dataset that approximates what Best Buy search looked like back when this data was captured.
# We have two approaches to impressions:
# 1) We synthesize/infer them from the existing clicks, essentially assuming there is a built in position bias in the logs that *roughly* approximates the actual ranking of Best Buy search
# back when this data was captured. Run using --generate_impressions and --synthesize
# 2) Taking --generate_num_rows, run a random sample of queries through our current search engine. If we find docs that have clicks, mark them as relevant. All else are non-relevant.
# Both approaches add rank, clicks and num_impressions onto the resulting data frame
# We also dump out a map of queries to query ids. Query ids are used in our XGB model.
# Outputs to --output_dir using the --impressions_file argument, which defaults to impressions.csv
######
if args.generate_impressions:
impressions_df = None
train_df = None
if args.train_file: # these should be pre-filtered, assuming we used our splitter, so let's not waste time filtering here
train_df = pd.read_csv(args.train_file, parse_dates=['click_time', 'query_time'])
else:
print("You must provide the --train_file option")
exit(2)
if args.synthesize:
(impressions_df, query_ids_map) = data_prepper.synthesize_impressions(train_df,
min_impressions=args.min_impressions,
min_clicks=args.min_clicks)
else:
# use the synthesize to feed into our generate
(impressions_df, query_ids_map) = data_prepper.synthesize_impressions(train_df,
min_impressions=args.min_impressions,
min_clicks=args.min_clicks)
impressions_df.drop(["product_name", "sku"], axis=1)
impressions_df = impressions_df.sample(n=args.generate_num_rows).reset_index(drop=True) # shuffle things
# impressions_df = impressions_df[:args.generate_num_rows]
(impressions_df, query_ids_map) = data_prepper.generate_impressions(impressions_df,
query_ids_map,
min_impressions=args.min_impressions,
min_clicks=args.min_clicks) # impressions as a Pandas DataFrame
print("Writing impressions to file: %s/%s" % (output_dir, args.impressions_file))
impressions_df.to_csv("%s/%s" % (output_dir, args.impressions_file), index=False)
query_ids = query_ids_map
# be sure to write out our query id map
with open("%s/%s" % (output_dir, args.query_ids), 'w') as qids:
qids.write(json.dumps(query_ids_map))
#####
#
# Given an --impressions_file, create an SVMRank formatted output file containing one row per query-doc-features-comments.
# Looping over impressions, this code issues queries to OpenSearch using the SLTR EXT function to extract LTR feaatures per every query-SKU pair
# It then optionally normalizes the data (we will not use this in class, but it's there for future use where we don't use XGB, since XGB doesn't need normalization since it's calculating splits)
# We also apply any click models we've implemented to then assign a grade/relevance score for each and every row. See click_models.py.
# Click models can also optionally downsample to create a more balanced training set.
# Finally, we output two files: 1) training.xgb -- the file to feed to XGB for training
# 2) training.xgb.csv -- a CSV version of the training data that is easier to work with in Pandas than the XGB file.
# This CSV file can be useful for debugging purposes.
#
#####
if args.create_xgb_training and args.impressions_file:
print("Loading impressions from %s/%s" % (output_dir, args.impressions_file))
impressions_df = pd.read_csv("%s/%s" % (output_dir, args.impressions_file))
if impressions_df is not None:
# We need a map of normalize types for our features. Would be nice if we could store this on the featureset
normalize_type_map = {}
if args.normalize_json:
with open(args.normalize_json) as json_file:
types = json.load(json_file)
for item in types:
normalize_type_map[item['name']] = item['normalize_function']
# We need our featureset
with open(args.featureset) as json_file:
the_feature_set = json.load(json_file)
# Log our features for the training set
print("Logging features")
features_df = data_prepper.log_features(impressions_df, terms_field=args.ltr_terms_field)
# Calculate some stats so we can normalize values.
# Since LTR only supports min/max, mean/std. dev and sigmoid, we can only do that
if args.normalize_json:
# Aggregations here returns the stats about our features, like min/max, std dev. If we ever use
# https://elasticsearch-learning-to-rank.readthedocs.io/en/latest/training-models.html#creating-a-model-with-feature-normalization
# we will need these to be saved/looked up so that we can add the normalizers to the model
(features_df, aggregations) = data_prepper.normalize_data(features_df, the_feature_set,
normalize_type_map)
# Write out the normalized DF
features_df.to_csv("%s.normalized" % args.impressions_file)
else:
aggregations = {}
# Join the features data to the impressions data
# drop the features_df doc_id, as it isn't needed anymore
features_df.drop("doc_id", axis=1, inplace=True)
features_df.to_csv("%s/features.csv" % output_dir)
# Merge our impressions with our features using a left join on query_id and sku
train_features_df = pd.merge(impressions_df, features_df, how="left", on=["query_id", "sku"])
train_features_df["doc_id"] = train_features_df["sku"]
# Apply any specified click model.
train_features_df = cm.apply_click_model(train_features_df, args.click_model,
downsample=args.downsample)
# Now write out in XGB/SVM Rank format
print("NAN counts: %s" % train_features_df.isna().any().count())
train_features_df = train_features_df.fillna(0)
train_features_df = train_features_df.sample(frac=1) # shuffle
train_features_df.to_csv("%s/training.xgb.csv" % output_dir)
ltr.write_training_file(train_features_df, "%s/training.xgb" % output_dir,
"%s/%s" % (output_dir, args.xgb_feat_map))
else:
print("Unable to create training file, no ranks/features data available.")
#############
#
# Train a model using XG Boost! Taking in the training file (training.xgb by default) specified by --xgb,
# build a model by iterating --xgb_rounds using the --xgb_conf (see https://xgboost.readthedocs.io/en/stable/python/python_intro.html#setting-parameters)
# Once training is complete, dump out the model as JSON and in the OpenSearch LTR model format (which has weird escaping: https://elasticsearch-learning-to-rank.readthedocs.io/en/latest/training-models.html)
# Also save in XGB binary format.
#
#############
if args.xgb:
# Defaults
bst, xgb_params = xgbu.train(args.xgb, args.xgb_rounds, args.xgb_conf)
print("Dumping out model using feature map: %s" % args.xgb_feat_map)
model = bst.get_dump(fmap=("%s/%s" % (output_dir, args.xgb_feat_map)), dump_format='json')
# Write out both the raw and the LTR ready model to disk
# Create our metadata for uploading the model
model_name = args.xgb_model_name
ltr.write_opensearch_ltr_model(model_name, model, "%s/%s" % (output_dir, args.xgb_model),
objective=xgb_params.get("objective", "reg:logistic"))
print("Saving XGB Binary model to %s/%s" % (output_dir, args.xgb_model))
bst.save_model("%s/%s" % (output_dir, args.xgb_model))
# Output some useful XGB Plots using matplotlib: https://xgboost.readthedocs.io/en/stable/python/python_api.html#module-xgboost.plotting
if args.xgb_plot:
xgbu.plots("%s/%s" % (output_dir, args.xgb_model), args.xgb_model_name,
"%s/%s" % (output_dir, args.xgb_feat_map), output_dir)
################
#
# Taking in the --xgb_test file and the --train_file (for accessing click priors), run --xgb_test_num_queries through
# OpenSearch and retrieve the results. Creates and writes several data frames to --output_dir:
# 1) --xgb_test_output -- the main results, as CSV. Contains info about where and what was retrieved.
# 2) --xgb_test_output appended with .no_results -- All the queries that returned zero results
# 3) --xgb_test_output appended with .new_queries -- All the queries that were "new" to us in the test set (e.g. we never saw this query in training).
# These can be useful for debugging and to see how well we generalize
#
################
if args.xgb_test:
# To test, we're going to calculate MAP by looking at how many "relevant" documents were in the top X of
# our result set.
test_data = pd.read_csv(args.xgb_test, parse_dates=['click_time', 'query_time'])
train_df = None
# we use the training file for priors, but we must make sure we don't have leaks
if args.train_file: # these should be pre-filtered, assuming we used our splitter, so let's not waste time filtering here
train_df = pd.read_csv(args.train_file, parse_dates=['click_time', 'query_time'])
else:
print("You must provide the --train_file option")
exit(2)
# DataFrame: query, doc, rank, type, miss, score, new
results_df, no_results = su.evaluate_test_set(test_data, train_df, opensearch, args.xgb_model_name,
args.ltr_store, args.index, num_queries=args.xgb_test_num_queries,
main_query_weight=args.xgb_main_query_weight, rescore_query_weight=args.xgb_rescore_query_weight
)
print("Writing results of test to %s" % "%s/%s" % (output_dir, args.xgb_test_output))
results_df.to_csv("%s/%s" % (output_dir, args.xgb_test_output), index=False)
no_results_df = pd.DataFrame(no_results)
no_results_df.to_csv("%s/%s.no_results" % (output_dir, args.xgb_test_output), index=False)
print("Meta:\nModel name: %s, Store Name: %s, Index: %s, Precision: %s \n" % (
args.xgb_model_name, args.ltr_store, args.index, 10))
# do some comparisons
print("Zero results queries: %s\n\n" % no_results)
new_queries_df = results_df[results_df["new"] == True]["query"].drop_duplicates()
new_queries_df.to_csv("%s/%s.new_queries" % (output_dir, args.xgb_test_output), index=False)
# Given the output of --xgb_test, output some useful info about things like MRR and Precision. Also creates a number
# of interesting join data frames that can be used to compare results.
if args.analyze:
pd.set_option('display.max_columns', None)
test_df = pd.read_csv("%s/test.csv" % output_dir, parse_dates=['click_time', 'query_time'])
train_df = pd.read_csv("%s/%s" % (output_dir, args.train_file), parse_dates=['click_time', 'query_time'])
print("Analyzing results from %s/%s" % (output_dir, args.xgb_test_output))
results_df = pd.read_csv("%s/%s" % (output_dir, args.xgb_test_output))
no_results_df = pd.read_csv("%s/%s.no_results" % (output_dir, args.xgb_test_output))
new_queries_df = pd.read_csv("%s/%s.new_queries" % (output_dir, args.xgb_test_output))
su.analyze_results(results_df, no_results_df, new_queries_df, opensearch, args.index, args.xgb_model_name,
args.ltr_store, train_df, test_df, output_dir, precision=args.precision, analyze_explains=args.analyze_explains, max_explains=args.max_explains)
# Given a query in --all_clicks, output to the screen all of the documents that matched this query. Can be useful for debugging.
if args.lookup_query:
query = args.lookup_query
su.lookup_query(query, all_clicks_df, opensearch, index=index_name,
explain=args.lookup_explain,
source=["name", "shortDescription", "longDescription", "salesRankShortTerm",
"salesRankMediumTerm", "salesRankLongTerm", "features"])
# Given --lookup_product SKU, output that document to the terminal. Useful for debuggging
if args.lookup_product:
sku = args.lookup_product
doc = su.lookup_product(sku, opensearch, index_name)
print("Retrieved doc:\n %s" % json.dumps(doc, indent=4))
# opensearch.get(index_name, sku)
# Loop through *ALL* unique SKUs from --all_clicks and validate they exist in the index by using the --lookup_product option to retrieve the document.
# Outputs a data frame as CSV named validity.csv which tracks whether a SKU is in the index or not. Can be used for filtering --all_clicks for training et. al.
if args.verify_products:
skus = all_clicks_df['sku'].drop_duplicates()
print("Verifying %s skus. This may take a while" % len(skus))
sku_tracker = []
valid_tracker = []
status = {"sku": sku_tracker, "status": valid_tracker}
for item in skus.iteritems():
doc = su.lookup_product(item[1], opensearch, index_name)
sku_tracker.append(item[1])
if doc is None:
valid_tracker.append(0)
else:
valid_tracker.append(1)
df = pd.DataFrame(status)
output_file = "%s/%s" % (output_dir, args.verify_file)
print("Writing results to %s" % output_file)
df.to_csv(output_file, index=False)
| 67.073826
| 274
| 0.649356
|
8c9774d34d0f9b617ccb0e032aed97df48589381
| 161,648
|
py
|
Python
|
google/container/v1/container-v1-py/google/container_v1/services/cluster_manager/client.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/container/v1/container-v1-py/google/container_v1/services/cluster_manager/client.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/container/v1/container-v1-py/google/container_v1/services/cluster_manager/client.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
import warnings
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.container_v1.services.cluster_manager import pagers
from google.container_v1.types import cluster_service
from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import ClusterManagerGrpcTransport
from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport
class ClusterManagerClientMeta(type):
"""Metaclass for the ClusterManager client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]]
_transport_registry["grpc"] = ClusterManagerGrpcTransport
_transport_registry["grpc_asyncio"] = ClusterManagerGrpcAsyncIOTransport
def get_transport_class(cls,
label: str = None,
) -> Type[ClusterManagerTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ClusterManagerClient(metaclass=ClusterManagerClientMeta):
"""Google Kubernetes Engine Cluster Manager v1"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "container.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ClusterManagerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ClusterManagerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ClusterManagerTransport:
"""Returns the transport used by the client instance.
Returns:
ClusterManagerTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ClusterManagerTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the cluster manager client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ClusterManagerTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ClusterManagerTransport):
# transport is a ClusterManagerTransport instance.
if credentials or client_options.credentials_file:
raise ValueError("When providing a transport instance, "
"provide its credentials directly.")
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def list_clusters(self,
request: Union[cluster_service.ListClustersRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.ListClustersResponse:
r"""Lists all clusters owned by a project in either the
specified zone or all zones.
Args:
request (Union[google.container_v1.types.ListClustersRequest, dict]):
The request object. ListClustersRequest lists clusters.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the
parent field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides, or "-" for all zones. This
field has been deprecated and replaced by the parent
field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
parent (str):
The parent (project and location) where the clusters
will be listed. Specified in the format
``projects/*/locations/*``. Location "-" matches all
zones and all regions.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.ListClustersResponse:
ListClustersResponse is the result of
ListClustersRequest.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, parent])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.ListClustersRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.ListClustersRequest):
request = cluster_service.ListClustersRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_clusters]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_cluster(self,
request: Union[cluster_service.GetClusterRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Cluster:
r"""Gets the details of a specific cluster.
Args:
request (Union[google.container_v1.types.GetClusterRequest, dict]):
The request object. GetClusterRequest gets the settings
of a cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster
to retrieve. This field has been
deprecated and replaced by the name
field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster) of the cluster to
retrieve. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Cluster:
A Google Kubernetes Engine cluster.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.GetClusterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.GetClusterRequest):
request = cluster_service.GetClusterRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_cluster]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_cluster(self,
request: Union[cluster_service.CreateClusterRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster: cluster_service.Cluster = None,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Creates a cluster, consisting of the specified number and type
of Google Compute Engine instances.
By default, the cluster is created in the project's `default
network <https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
One firewall is added for the cluster. After cluster creation,
the Kubelet creates routes for each node to allow the containers
on that node to communicate with all other instances in the
cluster.
Finally, an entry is added to the project's global metadata
indicating which CIDR range the cluster is using.
Args:
request (Union[google.container_v1.types.CreateClusterRequest, dict]):
The request object. CreateClusterRequest creates a
cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the
parent field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the parent field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster (google.container_v1.types.Cluster):
Required. A `cluster
resource <https://cloud.google.com/container-engine/reference/rest/v1/projects.locations.clusters>`__
This corresponds to the ``cluster`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
parent (str):
The parent (project and location) where the cluster will
be created. Specified in the format
``projects/*/locations/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster, parent])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.CreateClusterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.CreateClusterRequest):
request = cluster_service.CreateClusterRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster is not None:
request.cluster = cluster
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_cluster]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_cluster(self,
request: Union[cluster_service.UpdateClusterRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
update: cluster_service.ClusterUpdate = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Updates the settings of a specific cluster.
Args:
request (Union[google.container_v1.types.UpdateClusterRequest, dict]):
The request object. UpdateClusterRequest updates the
settings of a cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster
to upgrade. This field has been
deprecated and replaced by the name
field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update (google.container_v1.types.ClusterUpdate):
Required. A description of the
update.
This corresponds to the ``update`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster) of the cluster to
update. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, update, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.UpdateClusterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.UpdateClusterRequest):
request = cluster_service.UpdateClusterRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if update is not None:
request.update = update
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_cluster]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_node_pool(self,
request: Union[cluster_service.UpdateNodePoolRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Updates the version and/or image type for the
specified node pool.
Args:
request (Union[google.container_v1.types.UpdateNodePoolRequest, dict]):
The request object. UpdateNodePoolRequests update a node
pool's image and/or version.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.UpdateNodePoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.UpdateNodePoolRequest):
request = cluster_service.UpdateNodePoolRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_node_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_node_pool_autoscaling(self,
request: Union[cluster_service.SetNodePoolAutoscalingRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Sets the autoscaling settings for the specified node
pool.
Args:
request (Union[google.container_v1.types.SetNodePoolAutoscalingRequest, dict]):
The request object. SetNodePoolAutoscalingRequest sets
the autoscaler settings of a node pool.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.SetNodePoolAutoscalingRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.SetNodePoolAutoscalingRequest):
request = cluster_service.SetNodePoolAutoscalingRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_node_pool_autoscaling]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_logging_service(self,
request: Union[cluster_service.SetLoggingServiceRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
logging_service: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Sets the logging service for a specific cluster.
Args:
request (Union[google.container_v1.types.SetLoggingServiceRequest, dict]):
The request object. SetLoggingServiceRequest sets the
logging service of a cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster
to upgrade. This field has been
deprecated and replaced by the name
field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
logging_service (str):
Required. The logging service the cluster should use to
write logs. Currently available options:
- ``logging.googleapis.com/kubernetes`` - The Cloud
Logging service with a Kubernetes-native resource
model
- ``logging.googleapis.com`` - The legacy Cloud Logging
service (no longer available as of GKE 1.15).
- ``none`` - no logs will be exported from the cluster.
If left as an empty
string,\ ``logging.googleapis.com/kubernetes`` will be
used for GKE 1.14+ or ``logging.googleapis.com`` for
earlier versions.
This corresponds to the ``logging_service`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster) of the cluster to
set logging. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, logging_service, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.SetLoggingServiceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.SetLoggingServiceRequest):
request = cluster_service.SetLoggingServiceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if logging_service is not None:
request.logging_service = logging_service
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_logging_service]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_monitoring_service(self,
request: Union[cluster_service.SetMonitoringServiceRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
monitoring_service: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Sets the monitoring service for a specific cluster.
Args:
request (Union[google.container_v1.types.SetMonitoringServiceRequest, dict]):
The request object. SetMonitoringServiceRequest sets the
monitoring service of a cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster
to upgrade. This field has been
deprecated and replaced by the name
field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
monitoring_service (str):
Required. The monitoring service the cluster should use
to write metrics. Currently available options:
- "monitoring.googleapis.com/kubernetes" - The Cloud
Monitoring service with a Kubernetes-native resource
model
- ``monitoring.googleapis.com`` - The legacy Cloud
Monitoring service (no longer available as of GKE
1.15).
- ``none`` - No metrics will be exported from the
cluster.
If left as an empty
string,\ ``monitoring.googleapis.com/kubernetes`` will
be used for GKE 1.14+ or ``monitoring.googleapis.com``
for earlier versions.
This corresponds to the ``monitoring_service`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster) of the cluster to
set monitoring. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, monitoring_service, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.SetMonitoringServiceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.SetMonitoringServiceRequest):
request = cluster_service.SetMonitoringServiceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if monitoring_service is not None:
request.monitoring_service = monitoring_service
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_monitoring_service]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_addons_config(self,
request: Union[cluster_service.SetAddonsConfigRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
addons_config: cluster_service.AddonsConfig = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Sets the addons for a specific cluster.
Args:
request (Union[google.container_v1.types.SetAddonsConfigRequest, dict]):
The request object. SetAddonsConfigRequest sets the
addons associated with the cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster
to upgrade. This field has been
deprecated and replaced by the name
field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
addons_config (google.container_v1.types.AddonsConfig):
Required. The desired configurations
for the various addons available to run
in the cluster.
This corresponds to the ``addons_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster) of the cluster to
set addons. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, addons_config, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.SetAddonsConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.SetAddonsConfigRequest):
request = cluster_service.SetAddonsConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if addons_config is not None:
request.addons_config = addons_config
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_addons_config]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_locations(self,
request: Union[cluster_service.SetLocationsRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
locations: Sequence[str] = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Sets the locations for a specific cluster. Deprecated. Use
`projects.locations.clusters.update <https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update>`__
instead.
Args:
request (Union[google.container_v1.types.SetLocationsRequest, dict]):
The request object. SetLocationsRequest sets the
locations of the cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster
to upgrade. This field has been
deprecated and replaced by the name
field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
locations (Sequence[str]):
Required. The desired list of Google Compute Engine
`zones <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster's nodes should be located. Changing
the locations a cluster is in will result in nodes being
either created or removed from the cluster, depending on
whether locations are being added or removed.
This list must always include the cluster's primary
zone.
This corresponds to the ``locations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster) of the cluster to
set locations. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
warnings.warn("ClusterManagerClient.set_locations is deprecated",
DeprecationWarning)
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, locations, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.SetLocationsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.SetLocationsRequest):
request = cluster_service.SetLocationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if locations is not None:
request.locations = locations
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_locations]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_master(self,
request: Union[cluster_service.UpdateMasterRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
master_version: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Updates the master for a specific cluster.
Args:
request (Union[google.container_v1.types.UpdateMasterRequest, dict]):
The request object. UpdateMasterRequest updates the
master of the cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster
to upgrade. This field has been
deprecated and replaced by the name
field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
master_version (str):
Required. The Kubernetes version to
change the master to.
Users may specify either explicit
versions offered by Kubernetes Engine or
version aliases, which have the
following behavior:
- "latest": picks the highest valid
Kubernetes version - "1.X": picks the
highest valid patch+gke.N patch in the
1.X version - "1.X.Y": picks the highest
valid gke.N patch in the 1.X.Y version -
"1.X.Y-gke.N": picks an explicit
Kubernetes version - "-": picks the
default Kubernetes version
This corresponds to the ``master_version`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster) of the cluster to
update. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, master_version, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.UpdateMasterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.UpdateMasterRequest):
request = cluster_service.UpdateMasterRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if master_version is not None:
request.master_version = master_version
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_master]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_master_auth(self,
request: Union[cluster_service.SetMasterAuthRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Sets master auth materials. Currently supports
changing the admin password or a specific cluster,
either via password generation or explicitly setting the
password.
Args:
request (Union[google.container_v1.types.SetMasterAuthRequest, dict]):
The request object. SetMasterAuthRequest updates the
admin password of a cluster.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.SetMasterAuthRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.SetMasterAuthRequest):
request = cluster_service.SetMasterAuthRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_master_auth]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_cluster(self,
request: Union[cluster_service.DeleteClusterRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Deletes the cluster, including the Kubernetes
endpoint and all worker nodes.
Firewalls and routes that were configured during cluster
creation are also deleted.
Other Google Compute Engine resources that might be in
use by the cluster, such as load balancer resources, are
not deleted if they weren't present when the cluster was
initially created.
Args:
request (Union[google.container_v1.types.DeleteClusterRequest, dict]):
The request object. DeleteClusterRequest deletes a
cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster
to delete. This field has been
deprecated and replaced by the name
field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster) of the cluster to
delete. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.DeleteClusterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.DeleteClusterRequest):
request = cluster_service.DeleteClusterRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_cluster]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_operations(self,
request: Union[cluster_service.ListOperationsRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.ListOperationsResponse:
r"""Lists all operations in a project in a specific zone
or all zones.
Args:
request (Union[google.container_v1.types.ListOperationsRequest, dict]):
The request object. ListOperationsRequest lists
operations.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the
parent field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
to return operations for, or ``-`` for all zones. This
field has been deprecated and replaced by the parent
field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.ListOperationsResponse:
ListOperationsResponse is the result
of ListOperationsRequest.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.ListOperationsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.ListOperationsRequest):
request = cluster_service.ListOperationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_operations]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_operation(self,
request: Union[cluster_service.GetOperationRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
operation_id: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Gets the specified operation.
Args:
request (Union[google.container_v1.types.GetOperationRequest, dict]):
The request object. GetOperationRequest gets a single
operation.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operation_id (str):
Deprecated. The server-assigned ``name`` of the
operation. This field has been deprecated and replaced
by the name field.
This corresponds to the ``operation_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, operation id) of the
operation to get. Specified in the format
``projects/*/locations/*/operations/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, operation_id, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.GetOperationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.GetOperationRequest):
request = cluster_service.GetOperationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if operation_id is not None:
request.operation_id = operation_id
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_operation]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def cancel_operation(self,
request: Union[cluster_service.CancelOperationRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
operation_id: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Cancels the specified operation.
Args:
request (Union[google.container_v1.types.CancelOperationRequest, dict]):
The request object. CancelOperationRequest cancels a
single operation.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the operation resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operation_id (str):
Deprecated. The server-assigned ``name`` of the
operation. This field has been deprecated and replaced
by the name field.
This corresponds to the ``operation_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, operation id) of the
operation to cancel. Specified in the format
``projects/*/locations/*/operations/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, operation_id, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.CancelOperationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.CancelOperationRequest):
request = cluster_service.CancelOperationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if operation_id is not None:
request.operation_id = operation_id
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.cancel_operation]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def get_server_config(self,
request: Union[cluster_service.GetServerConfigRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.ServerConfig:
r"""Returns configuration info about the Google
Kubernetes Engine service.
Args:
request (Union[google.container_v1.types.GetServerConfigRequest, dict]):
The request object. Gets the current Kubernetes Engine
service configuration.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
to return operations for. This field has been deprecated
and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project and location) of the server config to
get, specified in the format ``projects/*/locations/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.ServerConfig:
Kubernetes Engine service
configuration.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.GetServerConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.GetServerConfigRequest):
request = cluster_service.GetServerConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_server_config]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_json_web_keys(self,
request: Union[cluster_service.GetJSONWebKeysRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.GetJSONWebKeysResponse:
r"""Gets the public component of the cluster signing keys
in JSON Web Key format.
This API is not yet intended for general use, and is not
available for all clusters.
Args:
request (Union[google.container_v1.types.GetJSONWebKeysRequest, dict]):
The request object. GetJSONWebKeysRequest gets the
public component of the keys used by the cluster to sign
token requests. This will be the jwks_uri for the
discover document returned by getOpenIDConfig. See the
OpenID Connect Discovery 1.0 specification for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.GetJSONWebKeysResponse:
GetJSONWebKeysResponse is a valid
JSON Web Key Set as specififed in rfc
7517
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.GetJSONWebKeysRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.GetJSONWebKeysRequest):
request = cluster_service.GetJSONWebKeysRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_json_web_keys]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_node_pools(self,
request: Union[cluster_service.ListNodePoolsRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.ListNodePoolsResponse:
r"""Lists the node pools for a cluster.
Args:
request (Union[google.container_v1.types.ListNodePoolsRequest, dict]):
The request object. ListNodePoolsRequest lists the node
pool(s) for a cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the
parent field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the parent field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster.
This field has been deprecated and
replaced by the parent field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
parent (str):
The parent (project, location, cluster id) where the
node pools will be listed. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.ListNodePoolsResponse:
ListNodePoolsResponse is the result
of ListNodePoolsRequest.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, parent])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.ListNodePoolsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.ListNodePoolsRequest):
request = cluster_service.ListNodePoolsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_node_pools]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_node_pool(self,
request: Union[cluster_service.GetNodePoolRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
node_pool_id: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.NodePool:
r"""Retrieves the requested node pool.
Args:
request (Union[google.container_v1.types.GetNodePoolRequest, dict]):
The request object. GetNodePoolRequest retrieves a node
pool for a cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster.
This field has been deprecated and
replaced by the name field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
node_pool_id (str):
Deprecated. The name of the node
pool. This field has been deprecated and
replaced by the name field.
This corresponds to the ``node_pool_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster, node pool id) of
the node pool to get. Specified in the format
``projects/*/locations/*/clusters/*/nodePools/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.NodePool:
NodePool contains the name and
configuration for a cluster's node pool.
Node pools are a set of nodes (i.e.
VM's), with a common configuration and
specification, under the control of the
cluster master. They may have a set of
Kubernetes labels applied to them, which
may be used to reference them during pod
scheduling. They may also be resized up
or down, to accommodate the workload.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.GetNodePoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.GetNodePoolRequest):
request = cluster_service.GetNodePoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if node_pool_id is not None:
request.node_pool_id = node_pool_id
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_node_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_node_pool(self,
request: Union[cluster_service.CreateNodePoolRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
node_pool: cluster_service.NodePool = None,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Creates a node pool for a cluster.
Args:
request (Union[google.container_v1.types.CreateNodePoolRequest, dict]):
The request object. CreateNodePoolRequest creates a node
pool for a cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the
parent field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the parent field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster.
This field has been deprecated and
replaced by the parent field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
node_pool (google.container_v1.types.NodePool):
Required. The node pool to create.
This corresponds to the ``node_pool`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
parent (str):
The parent (project, location, cluster id) where the
node pool will be created. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, node_pool, parent])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.CreateNodePoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.CreateNodePoolRequest):
request = cluster_service.CreateNodePoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if node_pool is not None:
request.node_pool = node_pool
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_node_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_node_pool(self,
request: Union[cluster_service.DeleteNodePoolRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
node_pool_id: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Deletes a node pool from a cluster.
Args:
request (Union[google.container_v1.types.DeleteNodePoolRequest, dict]):
The request object. DeleteNodePoolRequest deletes a node
pool for a cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster.
This field has been deprecated and
replaced by the name field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
node_pool_id (str):
Deprecated. The name of the node pool
to delete. This field has been
deprecated and replaced by the name
field.
This corresponds to the ``node_pool_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster, node pool id) of
the node pool to delete. Specified in the format
``projects/*/locations/*/clusters/*/nodePools/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.DeleteNodePoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.DeleteNodePoolRequest):
request = cluster_service.DeleteNodePoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if node_pool_id is not None:
request.node_pool_id = node_pool_id
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_node_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def rollback_node_pool_upgrade(self,
request: Union[cluster_service.RollbackNodePoolUpgradeRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
node_pool_id: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Rolls back a previously Aborted or Failed NodePool
upgrade. This makes no changes if the last upgrade
successfully completed.
Args:
request (Union[google.container_v1.types.RollbackNodePoolUpgradeRequest, dict]):
The request object. RollbackNodePoolUpgradeRequest
rollbacks the previously Aborted or Failed NodePool
upgrade. This will be an no-op if the last upgrade
successfully completed.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster
to rollback. This field has been
deprecated and replaced by the name
field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
node_pool_id (str):
Deprecated. The name of the node pool
to rollback. This field has been
deprecated and replaced by the name
field.
This corresponds to the ``node_pool_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster, node pool id) of
the node poll to rollback upgrade. Specified in the
format
``projects/*/locations/*/clusters/*/nodePools/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.RollbackNodePoolUpgradeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.RollbackNodePoolUpgradeRequest):
request = cluster_service.RollbackNodePoolUpgradeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if node_pool_id is not None:
request.node_pool_id = node_pool_id
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.rollback_node_pool_upgrade]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_node_pool_management(self,
request: Union[cluster_service.SetNodePoolManagementRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Sets the NodeManagement options for a node pool.
Args:
request (Union[google.container_v1.types.SetNodePoolManagementRequest, dict]):
The request object. SetNodePoolManagementRequest sets
the node management properties of a node pool.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.SetNodePoolManagementRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.SetNodePoolManagementRequest):
request = cluster_service.SetNodePoolManagementRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_node_pool_management]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_labels(self,
request: Union[cluster_service.SetLabelsRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Sets labels on a cluster.
Args:
request (Union[google.container_v1.types.SetLabelsRequest, dict]):
The request object. SetLabelsRequest sets the Google
Cloud Platform labels on a Google Container Engine
cluster, which will in turn set them for Google Compute
Engine resources used by that cluster
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.SetLabelsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.SetLabelsRequest):
request = cluster_service.SetLabelsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_labels]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_legacy_abac(self,
request: Union[cluster_service.SetLegacyAbacRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
enabled: bool = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Enables or disables the ABAC authorization mechanism
on a cluster.
Args:
request (Union[google.container_v1.types.SetLegacyAbacRequest, dict]):
The request object. SetLegacyAbacRequest enables or
disables the ABAC authorization mechanism for a cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster
to update. This field has been
deprecated and replaced by the name
field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
enabled (bool):
Required. Whether ABAC authorization
will be enabled in the cluster.
This corresponds to the ``enabled`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster id) of the cluster
to set legacy abac. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, enabled, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.SetLegacyAbacRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.SetLegacyAbacRequest):
request = cluster_service.SetLegacyAbacRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if enabled is not None:
request.enabled = enabled
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_legacy_abac]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def start_ip_rotation(self,
request: Union[cluster_service.StartIPRotationRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Starts master IP rotation.
Args:
request (Union[google.container_v1.types.StartIPRotationRequest, dict]):
The request object. StartIPRotationRequest creates a new
IP for the cluster and then performs a node upgrade on
each node pool to point to the new IP.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster.
This field has been deprecated and
replaced by the name field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster id) of the cluster
to start IP rotation. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.StartIPRotationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.StartIPRotationRequest):
request = cluster_service.StartIPRotationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.start_ip_rotation]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def complete_ip_rotation(self,
request: Union[cluster_service.CompleteIPRotationRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Completes master IP rotation.
Args:
request (Union[google.container_v1.types.CompleteIPRotationRequest, dict]):
The request object. CompleteIPRotationRequest moves the
cluster master back into single-IP mode.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster.
This field has been deprecated and
replaced by the name field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster id) of the cluster
to complete IP rotation. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.CompleteIPRotationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.CompleteIPRotationRequest):
request = cluster_service.CompleteIPRotationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.complete_ip_rotation]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_node_pool_size(self,
request: Union[cluster_service.SetNodePoolSizeRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Sets the size for a specific node pool.
Args:
request (Union[google.container_v1.types.SetNodePoolSizeRequest, dict]):
The request object. SetNodePoolSizeRequest sets the size
a node pool.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.SetNodePoolSizeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.SetNodePoolSizeRequest):
request = cluster_service.SetNodePoolSizeRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_node_pool_size]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_network_policy(self,
request: Union[cluster_service.SetNetworkPolicyRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
network_policy: cluster_service.NetworkPolicy = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Enables or disables Network Policy for a cluster.
Args:
request (Union[google.container_v1.types.SetNetworkPolicyRequest, dict]):
The request object. SetNetworkPolicyRequest
enables/disables network policy for a cluster.
project_id (str):
Deprecated. The Google Developers Console `project ID or
project
number <https://developers.google.com/console/help/new/#projectnumber>`__.
This field has been deprecated and replaced by the name
field.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Deprecated. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides. This field has been
deprecated and replaced by the name field.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Deprecated. The name of the cluster.
This field has been deprecated and
replaced by the name field.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
network_policy (google.container_v1.types.NetworkPolicy):
Required. Configuration options for
the NetworkPolicy feature.
This corresponds to the ``network_policy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster id) of the cluster
to set networking policy. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, network_policy, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.SetNetworkPolicyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.SetNetworkPolicyRequest):
request = cluster_service.SetNetworkPolicyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if network_policy is not None:
request.network_policy = network_policy
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_network_policy]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_maintenance_policy(self,
request: Union[cluster_service.SetMaintenancePolicyRequest, dict] = None,
*,
project_id: str = None,
zone: str = None,
cluster_id: str = None,
maintenance_policy: cluster_service.MaintenancePolicy = None,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
r"""Sets the maintenance policy for a cluster.
Args:
request (Union[google.container_v1.types.SetMaintenancePolicyRequest, dict]):
The request object. SetMaintenancePolicyRequest sets the
maintenance policy for a cluster.
project_id (str):
Required. The Google Developers Console `project ID or
project
number <https://support.google.com/cloud/answer/6158840>`__.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
Required. The name of the Google Compute Engine
`zone <https://cloud.google.com/compute/docs/zones#available>`__
in which the cluster resides.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
cluster_id (str):
Required. The name of the cluster to
update.
This corresponds to the ``cluster_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
maintenance_policy (google.container_v1.types.MaintenancePolicy):
Required. The maintenance policy to
be set for the cluster. An empty field
clears the existing maintenance policy.
This corresponds to the ``maintenance_policy`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
name (str):
The name (project, location, cluster id) of the cluster
to set maintenance policy. Specified in the format
``projects/*/locations/*/clusters/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.types.Operation:
This operation resource represents
operations that may have happened or are
happening on the cluster. All fields are
output only.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, zone, cluster_id, maintenance_policy, name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.SetMaintenancePolicyRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.SetMaintenancePolicyRequest):
request = cluster_service.SetMaintenancePolicyRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if zone is not None:
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
if maintenance_policy is not None:
request.maintenance_policy = maintenance_policy
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_maintenance_policy]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_usable_subnetworks(self,
request: Union[cluster_service.ListUsableSubnetworksRequest, dict] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListUsableSubnetworksPager:
r"""Lists subnetworks that are usable for creating
clusters in a project.
Args:
request (Union[google.container_v1.types.ListUsableSubnetworksRequest, dict]):
The request object. ListUsableSubnetworksRequest
requests the list of usable subnetworks available to a
user for creating clusters.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.container_v1.services.cluster_manager.pagers.ListUsableSubnetworksPager:
ListUsableSubnetworksResponse is the
response of
ListUsableSubnetworksRequest.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a cluster_service.ListUsableSubnetworksRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, cluster_service.ListUsableSubnetworksRequest):
request = cluster_service.ListUsableSubnetworksRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_usable_subnetworks]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListUsableSubnetworksPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-container",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"ClusterManagerClient",
)
| 43.22139
| 149
| 0.584764
|
5ad1cd4eb463dc49a9bdbe48221a6dd2e63ef1e7
| 24,718
|
py
|
Python
|
tests/gpuarray/test_scan.py
|
canyon289/Theano-PyMC
|
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/gpuarray/test_scan.py
|
canyon289/Theano-PyMC
|
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/gpuarray/test_scan.py
|
canyon289/Theano-PyMC
|
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
import theano
import theano.sandbox.rng_mrg
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, test_ctx_name
from theano import gpuarray, tensor
from theano.gpuarray.basic_ops import GpuFromHost, HostFromGpu
from theano.gpuarray.elemwise import GpuElemwise
from theano.scan.basic import scan
from theano.scan.checkpoints import scan_checkpoints
from theano.scan.op import Scan
pygpu_gpuarray = pytest.importorskip("pygpy.gpuarray")
GpuArrayException = pygpu_gpuarray.GpuArrayException
if theano.config.mode == "FAST_COMPILE":
mode_with_opt = theano.compile.mode.get_mode("FAST_RUN")
else:
mode_with_opt = theano.compile.mode.get_default_mode()
if theano.config.mode in ("DEBUG_MODE", "DebugMode"):
mode_nodebug = theano.compile.mode.get_mode("FAST_RUN")
else:
mode_nodebug = mode_with_opt
class TestScan:
def setup_method(self):
utt.seed_rng()
def test_one_sequence_one_output_weights_gpu1(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = theano.tensor.fvector("u")
x0 = theano.tensor.fscalar("x0")
W_in = theano.tensor.fscalar("win")
W = theano.tensor.fscalar("w")
mode = mode_with_gpu.excluding("InputToGpuOptimizer")
output, updates = scan(
f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode,
)
output = GpuFromHost(test_ctx_name)(output)
f2 = theano.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=mode,
)
rng = np.random.RandomState(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
v_u = np.asarray(v_u, dtype="float32")
v_x0 = np.asarray(v_x0, dtype="float32")
W = np.asarray(W, dtype="float32")
W_in = np.asarray(W_in, dtype="float32")
# compute the output in numpy
v_out = np.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in range(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
theano_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(theano_values, v_out)
# TO DEL
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo if isinstance(node.op, scan.op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
topo = f2.maker.fgraph.toposort()
assert sum([isinstance(node.op, HostFromGpu) for node in topo]) == 0
assert sum([isinstance(node.op, GpuFromHost) for node in topo]) == 4
scan_node = [node for node in topo if isinstance(node.op, scan.op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any([isinstance(node.op, GpuElemwise) for node in scan_node_topo])
assert not any([isinstance(node.op, HostFromGpu) for node in scan_node_topo])
assert not any([isinstance(node.op, GpuFromHost) for node in scan_node_topo])
# This second version test the second case in the optimizer to the gpu.
def test_one_sequence_one_output_weights_gpu2(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = theano.tensor.fvector("u")
x0 = theano.tensor.fscalar("x0")
W_in = theano.tensor.fscalar("win")
W = theano.tensor.fscalar("w")
output, updates = scan(
f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode_with_gpu,
)
f2 = theano.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=mode_with_gpu,
)
# get random initial values
rng = np.random.RandomState(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out = np.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in range(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
theano_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(theano_values, v_out)
topo = f2.maker.fgraph.toposort()
assert sum([isinstance(node.op, HostFromGpu) for node in topo]) == 1
assert sum([isinstance(node.op, GpuFromHost) for node in topo]) == 4
scan_node = [node for node in topo if isinstance(node.op, scan.op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any([isinstance(node.op, GpuElemwise) for node in scan_node_topo])
assert not any([isinstance(node.op, HostFromGpu) for node in scan_node_topo])
assert not any([isinstance(node.op, GpuFromHost) for node in scan_node_topo])
# This third test checks that scan can deal with a mixture of dtypes as
# outputs when is running on GPU
def test_gpu3_mixture_dtype_outputs(self):
def f_rnn(u_t, x_tm1, W_in, W):
return (u_t * W_in + x_tm1 * W, theano.tensor.cast(u_t + x_tm1, "int64"))
u = theano.tensor.fvector("u")
x0 = theano.tensor.fscalar("x0")
W_in = theano.tensor.fscalar("win")
W = theano.tensor.fscalar("w")
output, updates = scan(
f_rnn,
u,
[x0, None],
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode_with_gpu,
)
f2 = theano.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=mode_with_gpu,
)
# get random initial values
rng = np.random.RandomState(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out1 = np.zeros((4,))
v_out2 = np.zeros((4,), dtype="int64")
v_out1[0] = v_u[0] * W_in + v_x0 * W
v_out2[0] = v_u[0] + v_x0
for step in range(1, 4):
v_out1[step] = v_u[step] * W_in + v_out1[step - 1] * W
v_out2[step] = np.int64(v_u[step] + v_out1[step - 1])
theano_out1, theano_out2 = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(theano_out1, v_out1)
utt.assert_allclose(theano_out2, v_out2)
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo if isinstance(node.op, scan.op.Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
assert scan_node.op.gpua
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert not any([isinstance(node.op, HostFromGpu) for node in scan_node_topo])
assert not any([isinstance(node.op, GpuFromHost) for node in scan_node_topo])
def test_gpu4_gibbs_chain(self):
rng = np.random.RandomState(utt.fetch_seed())
v_vsample = np.array(
rng.binomial(
1,
0.5,
size=(3, 20),
),
dtype="float32",
)
vsample = theano.shared(v_vsample)
trng = theano.sandbox.rng_mrg.MRG_RandomStreams(utt.fetch_seed())
def f(vsample_tm1):
return (
trng.binomial(vsample_tm1.shape, n=1, p=0.3, dtype="float32")
* vsample_tm1
)
theano_vsamples, updates = scan(
f,
[],
vsample,
[],
n_steps=10,
truncate_gradient=-1,
go_backwards=False,
mode=mode_with_gpu,
)
my_f = theano.function(
[],
theano_vsamples[-1],
updates=updates,
allow_input_downcast=True,
mode=mode_with_gpu,
)
# I leave this to tested by debugmode, this test was anyway
# more of does the graph compile kind of test
my_f()
class ScanGpuTests:
"""
This class defines a number of tests for Scan on GPU as well as a few
helper functions for these tests. The GPU tests defined in this class are
independent of the GPU backend used. Because of this, a class inheriting
from ScanGpuTests should define the following attributes and methods to
make the tests run on a specific backend :
- self.gpu_backend : Reference to the backend module
- self.mode_with_opt : Compilation mode to force usage of the gpu backend
- self.is_scan_on_gpu(node) : Method to determine is a scan node has been
moved to run on a gpu under the specific
backend. Returns a boolean.
"""
def test_one_sequence_one_output_weights_gpu1(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = theano.tensor.fvector("u")
x0 = theano.tensor.fscalar("x0")
W_in = theano.tensor.fscalar("win")
W = theano.tensor.fscalar("w")
# The following line is needed to have the first case being used
# Otherwise, it is the second that is tested.
mode = self.mode_with_gpu.excluding("InputToGpuOptimizer")
output, updates = scan(
f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=mode,
)
output = self.gpu_backend.gpu_from_host(output)
f2 = theano.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=self.mode_with_gpu,
)
# get random initial values
rng = np.random.RandomState(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
v_u = np.asarray(v_u, dtype="float32")
v_x0 = np.asarray(v_x0, dtype="float32")
W = np.asarray(W, dtype="float32")
W_in = np.asarray(W_in, dtype="float32")
# compute the output in numpy
v_out = np.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in range(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
theano_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(theano_values, v_out)
# TO DEL
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo if isinstance(node.op, Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
topo = f2.maker.fgraph.toposort()
assert (
sum([isinstance(node.op, self.gpu_backend.HostFromGpu) for node in topo])
== 0
)
assert (
sum([isinstance(node.op, self.gpu_backend.GpuFromHost) for node in topo])
== 4
)
scan_node = [node for node in topo if isinstance(node.op, Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any(
[
isinstance(node.op, self.gpu_backend.GpuElemwise)
for node in scan_node_topo
]
)
assert not any(
[
isinstance(node.op, self.gpu_backend.HostFromGpu)
for node in scan_node_topo
]
)
assert not any(
[
isinstance(node.op, self.gpu_backend.GpuFromHost)
for node in scan_node_topo
]
)
# This second version test the second case in the optimizer to the gpu.
def test_one_sequence_one_output_weights_gpu2(self):
def f_rnn(u_t, x_tm1, W_in, W):
return u_t * W_in + x_tm1 * W
u = theano.tensor.fvector("u")
x0 = theano.tensor.fscalar("x0")
W_in = theano.tensor.fscalar("win")
W = theano.tensor.fscalar("w")
output, updates = scan(
f_rnn,
u,
x0,
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=self.mode_with_gpu,
)
f2 = theano.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=self.mode_with_gpu,
)
# get random initial values
rng = np.random.RandomState(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out = np.zeros((4,))
v_out[0] = v_u[0] * W_in + v_x0 * W
for step in range(1, 4):
v_out[step] = v_u[step] * W_in + v_out[step - 1] * W
theano_values = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(theano_values, v_out)
topo = f2.maker.fgraph.toposort()
assert (
sum([isinstance(node.op, self.gpu_backend.HostFromGpu) for node in topo])
== 1
)
assert (
sum([isinstance(node.op, self.gpu_backend.GpuFromHost) for node in topo])
== 4
)
scan_node = [node for node in topo if isinstance(node.op, Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
scan_node_topo = scan_node.op.fn.maker.fgraph.toposort()
# check that there is no gpu transfer in the inner loop.
assert any(
[
isinstance(node.op, self.gpu_backend.GpuElemwise)
for node in scan_node_topo
]
)
assert not any(
[
isinstance(node.op, self.gpu_backend.HostFromGpu)
for node in scan_node_topo
]
)
assert not any(
[
isinstance(node.op, self.gpu_backend.GpuFromHost)
for node in scan_node_topo
]
)
# This third test checks that scan can deal with a mixture of dtypes as
# outputs when is running on GPU
def test_gpu3_mixture_dtype_outputs(self):
def f_rnn(u_t, x_tm1, W_in, W):
return (u_t * W_in + x_tm1 * W, tensor.cast(u_t + x_tm1, "int64"))
u = theano.tensor.fvector("u")
x0 = theano.tensor.fscalar("x0")
W_in = theano.tensor.fscalar("win")
W = theano.tensor.fscalar("w")
output, updates = scan(
f_rnn,
u,
[x0, None],
[W_in, W],
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=self.mode_with_gpu,
)
f2 = theano.function(
[u, x0, W_in, W],
output,
updates=updates,
allow_input_downcast=True,
mode=self.mode_with_gpu,
)
# get random initial values
rng = np.random.RandomState(utt.fetch_seed())
v_u = rng.uniform(size=(4,), low=-5.0, high=5.0)
v_x0 = rng.uniform()
W = rng.uniform()
W_in = rng.uniform()
# compute the output in numpy
v_out1 = np.zeros((4,))
v_out2 = np.zeros((4,), dtype="int64")
v_out1[0] = v_u[0] * W_in + v_x0 * W
v_out2[0] = v_u[0] + v_x0
for step in range(1, 4):
v_out1[step] = v_u[step] * W_in + v_out1[step - 1] * W
v_out2[step] = np.int64(v_u[step] + v_out1[step - 1])
theano_out1, theano_out2 = f2(v_u, v_x0, W_in, W)
utt.assert_allclose(theano_out1, v_out1)
utt.assert_allclose(theano_out2, v_out2)
topo = f2.maker.fgraph.toposort()
scan_node = [node for node in topo if isinstance(node.op, Scan)]
assert len(scan_node) == 1
scan_node = scan_node[0]
assert self.is_scan_on_gpu(scan_node)
def test_gibbs_chain(self):
rng = np.random.RandomState(utt.fetch_seed())
v_vsample = np.array(
rng.binomial(
1,
0.5,
size=(3, 20),
),
dtype="float32",
)
vsample = theano.shared(v_vsample)
trng = theano.sandbox.rng_mrg.MRG_RandomStreams(utt.fetch_seed())
def f(vsample_tm1):
return (
trng.binomial(vsample_tm1.shape, n=1, p=0.3, dtype="float32")
* vsample_tm1
)
theano_vsamples, updates = scan(
f,
[],
vsample,
[],
n_steps=10,
truncate_gradient=-1,
go_backwards=False,
mode=self.mode_with_gpu,
)
my_f = theano.function(
[],
theano_vsamples[-1],
updates=updates,
allow_input_downcast=True,
mode=self.mode_with_gpu,
)
# I leave this to tested by debugmode, this test was anyway more of
# doest the graph compile kind of test
my_f()
def test_gpu_memory_usage(self):
# This test validates that the memory usage of the defined theano
# function is reasonnable when executed on the GPU. It checks for
# a bug in which one of scan's optimization was not applied which
# made the scan node compute large and unnecessary outputs which
# brought memory usage on the GPU to ~12G.
# Dimensionality of input and output data (not one-hot coded)
n_in = 100
n_out = 100
# Number of neurons in hidden layer
n_hid = 4000
# Number of minibatches
mb_size = 2
# Time steps in minibatch
mb_length = 200
# Define input variables
xin = tensor.ftensor3(name="xin")
yout = tensor.ftensor3(name="yout")
# Initialize the network parameters
U = theano.shared(np.zeros((n_in, n_hid), dtype="float32"), name="W_xin_to_l1")
V = theano.shared(np.zeros((n_hid, n_hid), dtype="float32"), name="W_l1_to_l1")
W = theano.shared(np.zeros((n_hid, n_out), dtype="float32"), name="W_l1_to_l2")
nparams = [U, V, W]
# Build the forward pass
l1_base = tensor.dot(xin, U)
def scan_l(baseline, last_step):
return baseline + tensor.dot(last_step, V)
zero_output = tensor.alloc(np.asarray(0.0, dtype="float32"), mb_size, n_hid)
l1_out, _ = scan(
scan_l,
sequences=[l1_base],
outputs_info=[zero_output],
mode=self.mode_with_gpu_nodebug,
)
l2_out = tensor.dot(l1_out, W)
# Compute the cost and take the gradient wrt params
cost = tensor.sum((l2_out - yout) ** 2)
grads = tensor.grad(cost, nparams)
updates = list(zip(nparams, (n - g for n, g in zip(nparams, grads))))
# Compile the theano function
feval_backprop = theano.function(
[xin, yout], cost, updates=updates, mode=self.mode_with_gpu_nodebug
)
# Validate that the PushOutScanOutput optimization has been applied
# by checking the number of outputs of the grad Scan node in the
# compiled function.
nodes = feval_backprop.maker.fgraph.toposort()
scan_nodes = [n for n in nodes if isinstance(n.op, Scan)]
# The grad scan is always the 2nd one according to toposort. If the
# optimization has been applied, it has 2 outputs, otherwise 3.
grad_scan_node = scan_nodes[1]
assert len(grad_scan_node.outputs) == 2, len(grad_scan_node.outputs)
# Call the theano function to ensure the absence of a memory error
feval_backprop(
np.zeros((mb_length, mb_size, n_in), dtype="float32"),
np.zeros((mb_length, mb_size, n_out), dtype="float32"),
)
def test_memory_reuse_gpudimshuffle(self):
# Test the memory pre-allocation feature in scan when one output is
# the result of a GpuDimshuffle (because an optimization in
# GpuDimshuffle can cause issues with the memory pre-allocation
# where it falsely thinks that a pre-allocated memory region has
# been used when it hasn't).
def inner_fn(seq1, recurrent_out):
temp = seq1 + recurrent_out.sum()
output1 = temp.dimshuffle(1, 0)
output2 = temp.sum() + recurrent_out
return output1, output2
input1 = theano.tensor.ftensor3()
init = theano.tensor.ftensor3()
outputs_info = [None, init]
out, _ = scan(
inner_fn,
sequences=[input1],
outputs_info=outputs_info,
mode=self.mode_with_gpu,
)
out1 = out[0].flatten()
out2 = out[1].flatten()
fct = theano.function([input1, init], [out1, out2], mode=self.mode_with_gpu)
output = fct(
np.ones((2, 1, 1), dtype="float32"), np.ones((1, 1, 1), dtype="float32")
)
expected_output = (
np.array([2, 4], dtype="float32"),
np.array([3, 7], dtype="float32"),
)
utt.assert_allclose(output, expected_output)
class TestScanGpuarray(ScanGpuTests):
"""
This class takes the gpu tests for scan that are defined in
class ScanGpuTests and runs them using the gpuarray backend.
"""
def setup_method(self):
self.gpu_backend = gpuarray
# This is unfortunate, but required
def gpu_from_host(v):
return gpuarray.GpuFromHost(None)(v)
self.gpu_backend.gpu_from_host = gpu_from_host
self.mode_with_gpu = mode_with_opt.including("gpuarray", "scan")
self.mode_with_gpu_nodebug = mode_nodebug.including("gpuarray", "scan")
# Skip the test if pygpu is not available
if not self.gpu_backend.pygpu_activated:
pytest.skip("Optional package pygpu disabled")
utt.seed_rng()
def is_scan_on_gpu(self, node):
return node.op.info.get("gpua", False)
class TestScanCheckpoint:
def setup_method(self):
self.k = tensor.iscalar("k")
self.A = tensor.vector("A")
result, _ = scan(
fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(self.A),
non_sequences=self.A,
n_steps=self.k,
)
result_check, _ = scan_checkpoints(
fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(self.A),
non_sequences=self.A,
n_steps=self.k,
save_every_N=100,
)
self.result = result[-1]
self.result_check = result_check[-1]
self.grad_A = tensor.grad(self.result.sum(), self.A)
self.grad_A_check = tensor.grad(self.result_check.sum(), self.A)
def test_memory(self):
from tests.gpuarray.config import mode_with_gpu # noqa
f = theano.function(
inputs=[self.A, self.k], outputs=self.grad_A, mode=mode_with_gpu
)
f_check = theano.function(
inputs=[self.A, self.k], outputs=self.grad_A_check, mode=mode_with_gpu
)
free_gmem = theano.gpuarray.type._context_reg[None].free_gmem
data = np.ones(free_gmem // 3000, dtype=np.float32)
# Check that it works with the checkpoints
size = 1000
if isinstance(mode_with_gpu, theano.compile.DebugMode):
size = 100
f_check(data, size)
# Check that the basic scan fails in that case
# Skip that check in DebugMode, as it can fail in different ways
if not isinstance(mode_with_gpu, theano.compile.DebugMode):
with pytest.raises(GpuArrayException):
f(data, 1000)
| 33.906722
| 87
| 0.572983
|
fa3aee06123e1e4109a3fe3b5184ea02735b0a5d
| 1,034
|
py
|
Python
|
draugr/python_utilities/strings.py
|
cnHeider/draugr
|
b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6
|
[
"Apache-2.0"
] | 3
|
2019-09-27T08:04:59.000Z
|
2020-12-02T06:14:45.000Z
|
draugr/python_utilities/strings.py
|
cnHeider/draugr
|
b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6
|
[
"Apache-2.0"
] | 64
|
2019-09-27T08:03:42.000Z
|
2022-03-28T15:07:30.000Z
|
draugr/python_utilities/strings.py
|
cnHeider/draugr
|
b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6
|
[
"Apache-2.0"
] | 1
|
2020-10-01T00:18:57.000Z
|
2020-10-01T00:18:57.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 09/10/2019
"""
__all__ = ["indent_lines"]
from typing import Any
def indent_lines(
input_str: Any, indent_spaces_num: int = 2, ignore_single_lines: bool = False
) -> str:
"""
:param ignore_single_lines:
:type ignore_single_lines:
:param input_str:
:type input_str:
:param indent_spaces_num:
:type indent_spaces_num:
:return:
:rtype:"""
if not isinstance(input_str, str):
input_str = str(input_str)
s = input_str.split("\n")
indent_s = indent_spaces_num * " "
if len(s) == 1:
if ignore_single_lines:
return input_str
else:
return f"{indent_s}{input_str}"
first = s.pop(0)
s = [f"{indent_s}{line}" for line in s]
s = "\n".join(s)
s = f"{indent_s}{first}\n{s}"
return s
if __name__ == "__main__":
a = "slasc\nsaffasd\n2dasf"
print(a)
print(indent_lines(a))
| 21.541667
| 81
| 0.594778
|
bd3f6ae4802e4d5d01a48ae599756247ce79d46b
| 7,747
|
py
|
Python
|
docs/source/conf.py
|
plamere/PlaylistBuilder
|
ae71c0b493e9cb6ecca5cc02fd3f51afa3fb8d07
|
[
"MIT"
] | 41
|
2015-06-20T10:47:57.000Z
|
2022-01-31T16:55:37.000Z
|
docs/source/conf.py
|
plamere/PlaylistBuilder
|
ae71c0b493e9cb6ecca5cc02fd3f51afa3fb8d07
|
[
"MIT"
] | 5
|
2015-10-06T22:08:59.000Z
|
2016-05-21T04:47:38.000Z
|
docs/source/conf.py
|
plamere/PlaylistBuilder
|
ae71c0b493e9cb6ecca5cc02fd3f51afa3fb8d07
|
[
"MIT"
] | 13
|
2015-07-23T14:35:28.000Z
|
2020-07-04T05:27:33.000Z
|
# -*- coding: utf-8 -*-
#
# pbl documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 20 07:20:44 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
sys.path.insert(0, os.path.abspath('../..'))
import pbl
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pbl'
copyright = u'2015, Paul Lamere'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pbldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pbl.tex', u'pbl Documentation',
u'Paul Lamere', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pbl', u'pbl Documentation',
[u'Paul Lamere'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pbl', u'pbl Documentation',
u'Paul Lamere', 'pbl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 31.49187
| 80
| 0.711759
|
a74c5a71401d99753345b0d22cfbb2475dc96f3b
| 7,890
|
py
|
Python
|
setup.py
|
stjordanis/nlp-4
|
d30d25d3dad590dffe2d3004b4b301dd562dd4f2
|
[
"Apache-2.0"
] | 2
|
2021-11-14T09:11:43.000Z
|
2021-11-14T10:07:49.000Z
|
setup.py
|
stjordanis/nlp-4
|
d30d25d3dad590dffe2d3004b4b301dd562dd4f2
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
stjordanis/nlp-4
|
d30d25d3dad590dffe2d3004b4b301dd562dd4f2
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
""" HuggingFace/Datasets is an open library of datasets.
Note:
VERSION needs to be formatted following the MAJOR.MINOR.PATCH convention
(we need to follow this convention to be able to retrieve versioned scripts)
Simple check list for release from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
0. Prerequisites:
- Dependencies:
- twine: "pip install twine"
- Create an account in (and join the 'datasets' project):
- PyPI: https://pypi.org/
- Test PyPI: https://test.pypi.org/
1. Change the version in:
- __init__.py
- setup.py
- docs/source/conf.py
2. Commit these changes: "git commit -m 'Release: VERSION'"
3. Add a tag in git to mark the release: "git tag VERSION -m 'Add tag VERSION for pypi'"
Push the tag to remote: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
First, delete any "build" directory that may exist from previous builds.
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv/notebook by running:
pip install huggingface_hub fsspec aiohttp
pip install -U tqdm
pip install -i https://testpypi.python.org/pypi datasets
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Fill release notes in the tag in github once everything is looking hunky-dory.
8. Update the documentation commit in .circleci/deploy.sh for the accurate documentation to be displayed.
Update the version mapping in docs/source/_static/js/custom.js with: "python utils/release.py --version VERSION"
Set version to X.X.X+1.dev0 (e.g. 1.8.0 -> 1.8.1.dev0) in:
- setup.py
- __init__.py
9. Commit these changes: "git commit -m 'Release docs'"
Push the commit to remote: "git push origin master"
"""
import datetime
import itertools
import os
import sys
from setuptools import find_packages, setup
REQUIRED_PKGS = [
# We use numpy>=1.17 to have np.random.Generator (Dataset shuffling)
"numpy>=1.17",
# Backend and serialization.
# Minimum 3.0.0 to support mix of struct and list types in parquet, and batch iterators of parquet data
# pyarrow 4.0.0 introduced segfault bug, see: https://github.com/huggingface/datasets/pull/2268
"pyarrow>=1.0.0,!=4.0.0",
# For smart caching dataset processing
"dill",
# For performance gains with apache arrow
"pandas",
# for downloading datasets over HTTPS
"requests>=2.19.0",
# progress bars in download and scripts
"tqdm>=4.62.1",
# dataclasses for Python versions that don't have it
"dataclasses;python_version<'3.7'",
# for fast hashing
"xxhash",
# for better multiprocessing
"multiprocess",
# to get metadata of optional dependencies such as torch or tensorflow for Python versions that don't have it
"importlib_metadata;python_version<'3.8'",
# to save datasets locally or on any filesystem
# minimum 2021.05.0 to have the AbstractArchiveFileSystem
"fsspec[http]>=2021.05.0",
# for data streaming via http
"aiohttp",
# To get datasets from the Datasets Hub on huggingface.co
"huggingface_hub>=0.1.0,<1.0.0",
# Utilities from PyPA to e.g., compare versions
"packaging",
]
AUDIO_REQUIRE = [
"librosa",
]
BENCHMARKS_REQUIRE = [
"numpy==1.18.5",
"tensorflow==2.3.0",
"torch==1.6.0",
"transformers==3.0.2",
]
TESTS_REQUIRE = [
# test dependencies
"absl-py",
"pytest",
"pytest-datadir",
"pytest-xdist",
# optional dependencies
"apache-beam>=2.26.0",
"elasticsearch",
"aiobotocore",
"boto3",
"botocore",
"faiss-cpu>=1.6.4",
"fsspec[s3]",
"moto[s3,server]==2.0.4",
"rarfile>=4.0",
"s3fs==2021.08.1",
"tensorflow>=2.3,!=2.6.0,!=2.6.1",
"torch",
"torchaudio",
"transformers",
# datasets dependencies
"bs4",
"conllu",
"langdetect",
"lxml",
"mwparserfromhell",
"nltk",
"openpyxl",
"py7zr",
"tldextract",
"zstandard",
# metrics dependencies
"bert_score>=0.3.6",
"rouge_score",
"sacrebleu",
"scipy",
"seqeval",
"scikit-learn",
"jiwer",
"sentencepiece", # for bleurt
# to speed up pip backtracking
"toml>=0.10.1",
"requests_file>=1.5.1",
"tldextract>=3.1.0",
"texttable>=1.6.3",
"Werkzeug>=1.0.1",
"six~=1.15.0",
# metadata validation
"importlib_resources;python_version<'3.7'",
]
if os.name != "nt":
# dependencies of unbabel-comet
# only test if not on windows since there're issues installing fairseq on windows
TESTS_REQUIRE.extend(
[
"wget>=3.2",
"pytorch-nlp==0.5.0",
"pytorch_lightning",
"fastBPE==0.1.0",
"fairseq",
]
)
QUALITY_REQUIRE = ["black==21.4b0", "flake8==3.7.9", "isort>=5.0.0", "pyyaml>=5.3.1"]
EXTRAS_REQUIRE = {
"audio": AUDIO_REQUIRE,
"apache-beam": ["apache-beam>=2.26.0"],
"tensorflow": ["tensorflow>=2.2.0,!=2.6.0,!=2.6.1"],
"tensorflow_gpu": ["tensorflow-gpu>=2.2.0,!=2.6.0,!=2.6.1"],
"torch": ["torch"],
"s3": [
"fsspec",
"boto3",
"botocore",
"s3fs",
],
"streaming": [], # for backward compatibility
"dev": TESTS_REQUIRE + QUALITY_REQUIRE,
"tests": TESTS_REQUIRE,
"quality": QUALITY_REQUIRE,
"benchmarks": BENCHMARKS_REQUIRE,
"docs": [
"docutils==0.16.0",
"recommonmark",
"sphinx==3.1.2",
"sphinx-markdown-tables",
"sphinx-rtd-theme==0.4.3",
"sphinxext-opengraph==0.4.1",
"sphinx-copybutton",
"fsspec<2021.9.0",
"s3fs",
"sphinx-panels",
"sphinx-inline-tabs",
"myst-parser",
],
}
setup(
name="datasets",
version="1.15.2.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
description="HuggingFace community-driven open-source library of datasets",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
author="HuggingFace Inc.",
author_email="thomas@huggingface.co",
url="https://github.com/huggingface/datasets",
download_url="https://github.com/huggingface/datasets/tags",
license="Apache 2.0",
package_dir={"": "src"},
packages=find_packages("src"),
package_data={"datasets": ["py.typed", "scripts/templates/*"], "datasets.utils.resources": ["*.json", "*.yaml"]},
entry_points={"console_scripts": ["datasets-cli=datasets.commands.datasets_cli:main"]},
install_requires=REQUIRED_PKGS,
extras_require=EXTRAS_REQUIRE,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="datasets machine learning datasets metrics",
zip_safe=False, # Required for mypy to find the py.typed file
)
| 31.434263
| 117
| 0.646641
|
df3b416df81c2e2a458da90b43af19bb43e5284a
| 173
|
py
|
Python
|
lesson9/homework.py
|
kelinsobw/lessons
|
cc0008c31c3397ddadfa80d5e77f746c0a3817f6
|
[
"BSD-3-Clause"
] | null | null | null |
lesson9/homework.py
|
kelinsobw/lessons
|
cc0008c31c3397ddadfa80d5e77f746c0a3817f6
|
[
"BSD-3-Clause"
] | null | null | null |
lesson9/homework.py
|
kelinsobw/lessons
|
cc0008c31c3397ddadfa80d5e77f746c0a3817f6
|
[
"BSD-3-Clause"
] | null | null | null |
from class_car import Car
if __name__=="__main__":
my_car=Car("Mercedes","E500",2000)
while my_car.speed<100:
my_car.speed_up()
my_car.print_speed()
| 15.727273
| 38
| 0.66474
|
ba8d36e070b54b8406954bc16352b465a740db41
| 678
|
py
|
Python
|
animate.py
|
frostburn/buddhabulb
|
1a4c5e4ef6858e02bc2127930ed3e1b721fc5a35
|
[
"MIT"
] | null | null | null |
animate.py
|
frostburn/buddhabulb
|
1a4c5e4ef6858e02bc2127930ed3e1b721fc5a35
|
[
"MIT"
] | null | null | null |
animate.py
|
frostburn/buddhabulb
|
1a4c5e4ef6858e02bc2127930ed3e1b721fc5a35
|
[
"MIT"
] | null | null | null |
import subprocess
width = 300
height = 300
layer = 1
num_layers = 50
num_frames = 60
normalizer = 350000
subprocess.call(["rm", "tmp/*.rgb"])
# subprocess.call(["rm", "./tmp/*.png"])
for i in range(num_frames):
subprocess.call(map(str, ["./process", width, height, layer, num_layers, i, num_frames, normalizer]))
subprocess.call(["mv", "out.raw", "tmp/out%02d.rgb" % (i + 1)])
subprocess.call(["convert", "-delay", "10", "-loop", "0", "-depth", "8", "-size", "%dx%d" % (width, height), "./tmp/out*.rgb", "out.gif"])
# subprocess.call(["mogrify", "-delay", "10", "-loop", "0", "-depth", "8", "-size", "%dx%d" % (width, height), "-format", "png", "./tmp/out*.rgb"])
| 32.285714
| 147
| 0.591445
|
a57902d5e2616c76d87fa71a75c7235444cdbe12
| 2,435
|
py
|
Python
|
config/urls.py
|
patruq/ronatrack
|
ce18e1a5c0d77f9cfe5425a1a10f6778343f6bd4
|
[
"MIT"
] | 1
|
2020-08-26T21:21:26.000Z
|
2020-08-26T21:21:26.000Z
|
config/urls.py
|
patruq/ronatrack
|
ce18e1a5c0d77f9cfe5425a1a10f6778343f6bd4
|
[
"MIT"
] | null | null | null |
config/urls.py
|
patruq/ronatrack
|
ce18e1a5c0d77f9cfe5425a1a10f6778343f6bd4
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import include, path
from django.views import defaults as default_views
from django.views.generic import TemplateView
from rest_framework.authtoken.views import obtain_auth_token
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(
"about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("ronatrack.users.urls", namespace="users")),
path("survey/", include("ronatrack.survey.urls", namespace="survey")),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# Static file serving when using Gunicorn + Uvicorn for local web socket development
urlpatterns += staticfiles_urlpatterns()
# API URLS
urlpatterns += [
# API base url
path("api/", include("config.api_router")),
# DRF auth token
path("auth-token/", obtain_auth_token),
path("api/token/", TokenObtainPairView.as_view(), name="token_obtain_pair"),
path("api/token/refresh/", TokenRefreshView.as_view(), name="token_refresh"),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| 37.461538
| 88
| 0.678439
|
72e4e0128a499be2451ba24919ae0a8a300129c3
| 2,453
|
py
|
Python
|
ornitho/model/field.py
|
dda-dev/ornitho-client-python
|
94d09774026786c021f35cae8cc74b65a28075d9
|
[
"MIT"
] | 3
|
2020-06-17T17:58:54.000Z
|
2022-03-27T17:26:07.000Z
|
ornitho/model/field.py
|
dda-dev/ornitho-client-python
|
94d09774026786c021f35cae8cc74b65a28075d9
|
[
"MIT"
] | null | null | null |
ornitho/model/field.py
|
dda-dev/ornitho-client-python
|
94d09774026786c021f35cae8cc74b65a28075d9
|
[
"MIT"
] | 1
|
2021-12-17T13:13:10.000Z
|
2021-12-17T13:13:10.000Z
|
from typing import List, Optional, Union
from ornitho import APIException
from ornitho.api_requester import APIRequester
from ornitho.model.abstract import ListableModel
from ornitho.model.field_option import FieldOption
class Field(ListableModel):
ENDPOINT: str = "fields"
def __init__(self, id_: int) -> None:
"""Form constructor
:param id_: ID, which is used to get the form from Biolovison
:type id_: int
"""
super(Field, self).__init__(id_)
self._options: Optional[List[FieldOption]] = None
@classmethod
def get(cls, id_: Union[int, str], short_version: bool = False) -> "Field":
"""Retrieve Object from Biolovision with given ID
:param id_: Unique identifier
:param short_version: Indicates, if a short version with foreign keys should be returned by the API.
:type id_: Union[int, str]
:type short_version: bool
:return: Instance, retrieved from Biolovision with given ID
:rtype: Field
"""
fields = cls.list_all(short_version=short_version)
for field in fields:
if field.id_ == id_:
return field
raise APIException(f"Can't find field with ID {id_}")
def refresh(
self,
short_version: bool = False,
retries: int = 0,
) -> "Field":
raise NotImplementedError
@property
def group(self) -> str:
return self._raw_data["group"]
@property
def name(self) -> str:
return self._raw_data["name"]
@property
def text(self) -> Optional[str]:
return self._raw_data["text"] if "text" in self._raw_data else None
@property
def default(self) -> int:
return int(self._raw_data["default"])
@property
def mandatory(self) -> bool:
return False if self._raw_data.get("mandatory") == "0" else True
@property
def empty_choice(self) -> bool:
return False if self._raw_data.get("empty_choice") == "0" else True
@property
def options(self) -> List[FieldOption]:
if self._options is None:
with APIRequester() as requester:
url = f"fields/{self.id_}"
response, pagination_key = requester.request(method="GET", url=url)
self._options = [
FieldOption.create_from_ornitho_json(option) for option in response
]
return self._options
| 31.857143
| 108
| 0.62128
|
95e1c07ef4805af2dea1ff54992c732d65591edf
| 3,190
|
py
|
Python
|
tests/integration_tests/charts/schema_tests.py
|
cryptomela/superset
|
69c3cc712d8399a81b5b1b4783ea17a8c1f1cf70
|
[
"Apache-2.0"
] | 1
|
2021-11-21T15:18:19.000Z
|
2021-11-21T15:18:19.000Z
|
tests/integration_tests/charts/schema_tests.py
|
cryptomela/superset
|
69c3cc712d8399a81b5b1b4783ea17a8c1f1cf70
|
[
"Apache-2.0"
] | null | null | null |
tests/integration_tests/charts/schema_tests.py
|
cryptomela/superset
|
69c3cc712d8399a81b5b1b4783ea17a8c1f1cf70
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset"""
from unittest import mock
import pytest
from marshmallow import ValidationError
from tests.integration_tests.test_app import app
from superset.charts.schemas import ChartDataQueryContextSchema
from tests.integration_tests.base_tests import SupersetTestCase
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
)
from tests.integration_tests.fixtures.query_context import get_query_context
class TestSchema(SupersetTestCase):
@mock.patch(
"superset.common.query_context.config", {**app.config, "ROW_LIMIT": 5000},
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_query_context_limit_and_offset(self):
self.login(username="admin")
payload = get_query_context("birth_names")
# too low limit and offset
payload["queries"][0]["row_limit"] = -1
payload["queries"][0]["row_offset"] = -1
with self.assertRaises(ValidationError) as context:
_ = ChartDataQueryContextSchema().load(payload)
self.assertIn("row_limit", context.exception.messages["queries"][0])
self.assertIn("row_offset", context.exception.messages["queries"][0])
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_query_context_null_timegrain(self):
self.login(username="admin")
payload = get_query_context("birth_names")
payload["queries"][0]["extras"]["time_grain_sqla"] = None
_ = ChartDataQueryContextSchema().load(payload)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_query_context_series_limit(self):
self.login(username="admin")
payload = get_query_context("birth_names")
payload["queries"][0]["timeseries_limit"] = 2
payload["queries"][0]["timeseries_limit_metric"] = {
"expressionType": "SIMPLE",
"column": {
"id": 334,
"column_name": "gender",
"filterable": True,
"groupby": True,
"is_dttm": False,
"type": "VARCHAR(16)",
"optionName": "_col_gender",
},
"aggregate": "COUNT_DISTINCT",
"label": "COUNT_DISTINCT(gender)",
}
_ = ChartDataQueryContextSchema().load(payload)
| 40.897436
| 82
| 0.695298
|
2f4b8e487cb002019d24e2014d8ca85b42b4f5b6
| 840
|
py
|
Python
|
flask/src/flask_app/web_app/app.py
|
AlTosterino/FlaskVsFastAPI
|
db826b1bd19216ff1ae7bdba518244178d8f59bf
|
[
"MIT"
] | 5
|
2021-04-16T20:00:09.000Z
|
2022-01-23T23:39:03.000Z
|
flask/src/flask_app/web_app/app.py
|
AlTosterino/FlaskVsFastAPI
|
db826b1bd19216ff1ae7bdba518244178d8f59bf
|
[
"MIT"
] | null | null | null |
flask/src/flask_app/web_app/app.py
|
AlTosterino/FlaskVsFastAPI
|
db826b1bd19216ff1ae7bdba518244178d8f59bf
|
[
"MIT"
] | null | null | null |
import os
from http import HTTPStatus
from typing import Tuple
from flask_app.shared.exceptions import DatabaseRepositoryError
from flask_app.shared.exceptions.validation import ValidationError
from flask_app.web_app.routes import news_router
from flask import Flask
app = Flask(__name__)
app.register_blueprint(news_router)
@app.errorhandler(ValidationError)
def handle_validation_error(exc: ValidationError) -> Tuple[dict, int]:
status_code = HTTPStatus.UNPROCESSABLE_ENTITY
return {"detail": exc.errors}, status_code
@app.errorhandler(DatabaseRepositoryError)
def handle_database_error(exc: DatabaseRepositoryError) -> Tuple[dict, int]:
status_code = HTTPStatus.BAD_REQUEST
return {"detail": str(exc)}, status_code
if __name__ == "__main__":
app.run(debug=True, host=os.environ.get("APP_HOST", "127.0.0.1"))
| 28.965517
| 76
| 0.791667
|
183a376c10fb595137f133afeafc0612ccf8cad7
| 5,479
|
py
|
Python
|
src/webdav/Lockable.py
|
tseaver/Zope-RFA
|
08634f39b0f8b56403a2a9daaa6ee4479ef0c625
|
[
"ZPL-2.1"
] | 2
|
2015-12-21T10:34:56.000Z
|
2017-09-24T11:07:58.000Z
|
src/webdav/Lockable.py
|
MatthewWilkes/Zope
|
740f934fc9409ae0062e8f0cd6dcfd8b2df00376
|
[
"ZPL-2.1"
] | null | null | null |
src/webdav/Lockable.py
|
MatthewWilkes/Zope
|
740f934fc9409ae0062e8f0cd6dcfd8b2df00376
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""WebDAV support - lockable item.
"""
from AccessControl.class_init import InitializeClass
from AccessControl.SecurityInfo import ClassSecurityInfo
from Acquisition import aq_base
from Persistence import PersistentMapping
from zope.interface import implements
from webdav.EtagSupport import EtagSupport
from webdav.interfaces import ILockItem
from webdav.interfaces import IWriteLock
class ResourceLockedError(Exception):
pass
class LockableItem(EtagSupport):
"""Implements the WriteLock interface.
This class is inherited by Resource which is then inherited by the
majority of Zope objects.
"""
implements(IWriteLock)
# Protect methods using declarative security
security = ClassSecurityInfo()
security.declarePrivate('wl_lockmapping')
security.declarePublic('wl_isLocked', 'wl_getLock', 'wl_isLockedByUser',
'wl_lockItems', 'wl_lockValues', 'wl_lockTokens',)
security.declareProtected('WebDAV Lock items', 'wl_setLock')
security.declareProtected('WebDAV Unlock items', 'wl_delLock')
security.declareProtected('Manage WebDAV Locks', 'wl_clearLocks')
# Setting default roles for permissions - we want owners of conent
# to be able to lock.
security.setPermissionDefault('WebDAV Lock items', ('Manager', 'Owner',))
security.setPermissionDefault('WebDAV Unlock items',('Manager','Owner',))
def wl_lockmapping(self, killinvalids=0, create=0):
""" if 'killinvalids' is 1, locks who are no longer valid
will be deleted """
try: locks = getattr(self, '_dav_writelocks', None)
except: locks = None
if locks is None:
if create:
locks = self._dav_writelocks = PersistentMapping()
else:
# Don't generate a side effect transaction.
locks = {}
return locks
elif killinvalids:
# Delete invalid locks
for token, lock in locks.items():
if not lock.isValid():
del locks[token]
if (not locks) and hasattr(aq_base(self),
'__no_valid_write_locks__'):
self.__no_valid_write_locks__()
return locks
else:
return locks
def wl_lockItems(self, killinvalids=0):
return self.wl_lockmapping(killinvalids).items()
def wl_lockValues(self, killinvalids=0):
return self.wl_lockmapping(killinvalids).values()
def wl_lockTokens(self, killinvalids=0):
return self.wl_lockmapping(killinvalids).keys()
def wl_hasLock(self, token, killinvalids=0):
if not token: return 0
return token in self.wl_lockmapping(killinvalids).keys()
def wl_isLocked(self):
# returns true if 'self' is locked at all
# We set 'killinvalids' to 1 to delete all locks who are no longer
# valid (timeout has been exceeded)
locks = self.wl_lockmapping(killinvalids=1)
if locks.keys(): return 1
else: return 0
def wl_setLock(self, locktoken, lock):
locks = self.wl_lockmapping(create=1)
if ILockItem.providedBy(lock):
if locktoken == lock.getLockToken():
locks[locktoken] = lock
else:
raise ValueError, 'Lock tokens do not match'
else:
raise ValueError, 'Lock does not implement the LockItem Interface'
def wl_getLock(self, locktoken):
locks = self.wl_lockmapping(killinvalids=1)
return locks.get(locktoken, None)
def wl_delLock(self, locktoken):
locks = self.wl_lockmapping()
if locks.has_key(locktoken):
del locks[locktoken]
def wl_clearLocks(self):
# Called by lock management machinery to quickly and effectively
# destroy all locks.
try:
locks = self.wl_lockmapping()
locks.clear()
except:
# The locks may be totally messed up, so we'll just delete
# and replace.
if hasattr(self, '_dav_writelocks'):
del self._dav_writelocks
if IWriteLock.providedBy(self):
self._dav_writelocks = PersistentMapping()
# Call into a special hook used by LockNullResources to delete
# themselves. Could be used by other objects who want to deal
# with the state of empty locks.
if hasattr(aq_base(self), '__no_valid_write_locks__'):
self.__no_valid_write_locks__()
InitializeClass(LockableItem)
### Utility functions
def wl_isLocked(ob):
""" Returns true if the object is locked, returns 0 if the object
is not locked or does not implement the WriteLockInterface """
return wl_isLockable(ob) and ob.wl_isLocked()
def wl_isLockable(ob):
return IWriteLock.providedBy(ob)
| 35.577922
| 78
| 0.641175
|
7fb30423d009ca2113f6a0dacfeffe5a60a90b24
| 1,800
|
py
|
Python
|
haar_cascade/arac_plakasi_algilama/resimden_plaka_okuma.py
|
malidrsn/OpenCV-Tutorial-w-Basic-Examples
|
70e117ba4f3b46b937a92e35f7ddaedb46f8b7be
|
[
"MIT"
] | null | null | null |
haar_cascade/arac_plakasi_algilama/resimden_plaka_okuma.py
|
malidrsn/OpenCV-Tutorial-w-Basic-Examples
|
70e117ba4f3b46b937a92e35f7ddaedb46f8b7be
|
[
"MIT"
] | null | null | null |
haar_cascade/arac_plakasi_algilama/resimden_plaka_okuma.py
|
malidrsn/OpenCV-Tutorial-w-Basic-Examples
|
70e117ba4f3b46b937a92e35f7ddaedb46f8b7be
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import pytesseract
import imutils
img = cv2.imread("licence_plate.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# filtreleme işlemleri gürültü yok etme işlemleri
filtered = cv2.bilateralFilter(gray, 7, 250, 250)
# köşe bulma algoritması
edges = cv2.Canny(filtered, 30, 200)
# conturları arayacaz
contours = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# print("Kontur ", contours)
cnts = imutils.grab_contours(contours)
# print(" Grab cnts : ", cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:10] # 0 dan 10a kadar değerleri al
# print("Sıralanmış Cnts:", cnts)
screen = None
for c in cnts:
epsilon = 0.018 * cv2.arcLength(c, True) # deneyel yaklaşım uyguluyoruz. kareleri düzeltmek için
approx = cv2.approxPolyDP(c, epsilon, True) # konturları yakınlaştırıyor
if len(approx) == 4: # 4 köşe var ise anlamında
screen = approx
break
# maske uygulayacaz
mask = np.zeros(gray.shape, np.uint8)
# print(screen)
new_img = cv2.drawContours(mask, [screen], 0, (255, 255, 255), -1)
new_img = cv2.bitwise_and(img, img, mask=mask)
# kırpma
(x, y) = np.where(mask == 255)
(topx, topy) = (np.min(x), np.min(y))
(bottomx, bottomy) = (np.max(x), np.max(y))
crop = gray[topx:bottomx + 1, topy:bottomy + 1]
print(np.where(mask == 255))
# text okuma
text = pytesseract.image_to_string(crop, lang="eng")
print("Detected Plate is :", text)
# ekranda gösterme
cv2.imshow("Licence Plate Original", img)
cv2.imshow("Licence Plate Gray", gray)
cv2.imshow("Licence Plate Filtered", filtered)
cv2.imshow("Licence Plate Edged", edges)
cv2.imshow("Licence Plate Masked", mask)
cv2.imshow("Licence Plate New image", new_img)
cv2.imshow("Licence Plate New Cropped", crop)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 26.086957
| 101
| 0.713333
|
4772d3ee13a6abeda6eb319fc67d1892775f2a89
| 3,950
|
py
|
Python
|
parser/fase2/team03/parse/functions/functions_string.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 1
|
2021-01-09T09:39:57.000Z
|
2021-01-09T09:39:57.000Z
|
parser/fase2/team03/parse/functions/functions_string.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | null | null | null |
parser/fase2/team03/parse/functions/functions_string.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 4
|
2020-12-19T17:12:13.000Z
|
2021-01-07T20:29:53.000Z
|
from hashlib import md5, sha256
from parse.ast_node import ASTNode
# From here on, classes describing aggregate functions
# TODO: Convert, SetByte, Substr
class Convert(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return True
def generate(self, table, tree):
super().generate(table, tree)
return ''
class Decode(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return self.exp.decode('base64', 'strict')
def generate(self, table, tree):
super().generate(table, tree)
return ''
class Encode(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return self.exp.encode('base64', 'strict')
def generate(self, table, tree):
super().generate(table, tree)
return ''
class GetByte(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return bytes(self.exp, 'utf-8')
def generate(self, table, tree):
super().generate(table, tree)
return ''
class Length(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return len(self.exp)
def generate(self, table, tree):
super().generate(table, tree)
return ''
class Md5(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return md5(self.exp.encode())
def generate(self, table, tree):
super().generate(table, tree)
return ''
class SetByte(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return True
def generate(self, table, tree):
super().generate(table, tree)
return ''
class Sha256(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return sha256(self.exp)
def generate(self, table, tree):
super().generate(table, tree)
return ''
class Substr(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return len(self.exp)
def generate(self, table, tree):
super().generate(table, tree)
return ''
class Substring(ASTNode):
def __init__(self, exp, start, end, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
self.start = start
self.end = end
def execute(self, table, tree):
super().execute(table, tree)
return self.exp[self.start: self.end]
def generate(self, table, tree):
super().generate(table, tree)
return ''
class Trim(ASTNode):
def __init__(self, exp, line, column):
ASTNode.__init__(self, line, column)
self.exp = exp
def execute(self, table, tree):
super().execute(table, tree)
return self.exp.strip()
def generate(self, table, tree):
super().generate(table, tree)
return ''
| 24.534161
| 54
| 0.600759
|
14ef855f6636cbc3639feecca56621446ef4c505
| 4,371
|
py
|
Python
|
lib/streamlit/ReportThread.py
|
brandonJY/streamlit
|
afb51bdef42df3b9f4f1dfc23dc749974a5e4fc6
|
[
"Apache-2.0"
] | 1
|
2020-04-01T19:53:28.000Z
|
2020-04-01T19:53:28.000Z
|
lib/streamlit/ReportThread.py
|
sudachen/streamlit
|
f5326d68eb914eb5bb49da01b7f406ba4f5845d0
|
[
"Apache-2.0"
] | null | null | null |
lib/streamlit/ReportThread.py
|
sudachen/streamlit
|
f5326d68eb914eb5bb49da01b7f406ba4f5845d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from collections import namedtuple
from streamlit.logger import get_logger
LOGGER = get_logger(__name__)
class ReportContext(object):
def __init__(self, enqueue, widgets, widget_ids_this_run, uploaded_file_mgr):
# (dict) Mapping of container (type str or BlockPath) to top-level
# cursor (type AbstractCursor).
self.cursors = {}
# (callable) Function that enqueues ForwardMsg protos in the websocket.
self.enqueue = enqueue
# (Widgets) The Widgets state object for the report
self.widgets = widgets
# (_WidgetIDSet) The set of widget IDs that have been assigned in the
# current report run. This set is cleared at the start of each run.
self.widget_ids_this_run = widget_ids_this_run
# (UploadedFileManager) Object that manages files uploaded by this user.
self.uploaded_file_mgr = uploaded_file_mgr
def reset(self):
self.cursors = {}
self.widget_ids_this_run.clear()
class _WidgetIDSet(object):
"""Stores a set of widget IDs. Safe to mutate from multiple threads."""
def __init__(self):
self._lock = threading.Lock()
self._items = set()
def clear(self):
"""Clears all items in the set."""
with self._lock:
self._items.clear()
def add(self, item):
"""Adds an item to the set.
Parameters
----------
item : Any
The item to add.
Returns
-------
bool
True if the item was added, and False if it was already in
the set.
"""
with self._lock:
if item in self._items:
return False
self._items.add(item)
return True
REPORT_CONTEXT_ATTR_NAME = "streamlit_report_ctx"
class ReportThread(threading.Thread):
"""Extends threading.Thread with a ReportContext member"""
def __init__(
self, enqueue, widgets, target=None, name=None, uploaded_file_mgr=None,
):
super(ReportThread, self).__init__(target=target, name=name)
self.streamlit_report_ctx = ReportContext(
enqueue, widgets, _WidgetIDSet(), uploaded_file_mgr
)
def add_report_ctx(thread=None, ctx=None):
"""Adds the current ReportContext to a newly-created thread.
This should be called from this thread's parent thread,
before the new thread starts.
Parameters
----------
thread : threading.Thread
The thread to attach the current ReportContext to.
ctx : ReportContext or None
The ReportContext to add, or None to use the current thread's
ReportContext.
Returns
-------
threading.Thread
The same thread that was passed in, for chaining.
"""
if thread is None:
thread = threading.current_thread()
if ctx is None:
ctx = get_report_ctx()
if ctx is not None:
setattr(thread, REPORT_CONTEXT_ATTR_NAME, ctx)
return thread
def get_report_ctx():
"""
Returns
-------
ReportContext | None
The current thread's ReportContext, or None if it doesn't have one.
"""
thread = threading.current_thread()
ctx = getattr(thread, REPORT_CONTEXT_ATTR_NAME, None)
if ctx is None and streamlit._is_running_with_streamlit:
# Only warn about a missing ReportContext if we were started
# via `streamlit run`. Otherwise, the user is likely running a
# script "bare", and doesn't need to be warned about streamlit
# bits that are irrelevant when not connected to a report.
LOGGER.warning("Thread '%s': missing ReportContext" % thread.name)
return ctx
# Avoid circular dependencies in Python 2
import streamlit
| 30.566434
| 81
| 0.658888
|
04b230c08a3896d3794806e023a1cef80b0a9b2a
| 1,815
|
py
|
Python
|
data_structures/hash-table/hash_table.py
|
katcosgrove/data-structures-and-algorithms
|
8683268183e79c6abeeb58187101cb140c65568d
|
[
"MIT"
] | 1
|
2021-04-02T12:12:20.000Z
|
2021-04-02T12:12:20.000Z
|
data_structures/hash-table/hash_table.py
|
katcosgrove/data-structures-and-algorithms
|
8683268183e79c6abeeb58187101cb140c65568d
|
[
"MIT"
] | 2
|
2018-03-21T17:34:34.000Z
|
2018-03-26T16:46:07.000Z
|
data_structures/hash-table/hash_table.py
|
katcosgrove/data-structures-and-algorithms
|
8683268183e79c6abeeb58187101cb140c65568d
|
[
"MIT"
] | 2
|
2018-08-29T18:59:59.000Z
|
2019-04-12T21:16:14.000Z
|
from linked_list import LinkedList
# from functools import reduce
class HashTable:
"""Class for building a hash table."""
def __init__(self, max_size=1024):
self.max_size = max_size
self.buckets = [LinkedList() for _ in range(max_size)]
def __iter__(self):
keys = []
for bucket in self.buckets:
current = bucket.head
while current:
for key in current.val.keys():
keys.append(key)
current = current._next
return iter(keys)
def hash_key(self, key):
"""Get a hashed key for adding to the table."""
if type(key) is not str:
raise TypeError
return sum(map(lambda x: ord(x), key)) % self.max_size
def set(self, key, val):
"""Insert a key/value pair into the hash table."""
return self.buckets[self.hash_key(key)].insert({key: val})
def get(self, key):
"""Return value at given key."""
current = self.buckets[self.hash_key(key)].head
while current:
if key in current.val.keys():
return current.val[key]
current = current._next
def remove(self, key):
"""Remove value at given key."""
bucket = self.buckets[self.hash_key(key)]
current = bucket.head # lol buckethead
last = current
while current:
if key in current.val.keys():
if last is not current:
last._next = current._next
else:
bucket.head = current._next
try:
return current.val[key]
except KeyError:
raise KeyError('That value is not in the table!')
last = current
current = current._next
| 29.274194
| 66
| 0.544353
|
bd9941c6022598e11ecbd41e95b3f08074e5cc74
| 4,409
|
py
|
Python
|
utils/extra/common.py
|
luis-guilherme/mitra
|
18bd935b11dc8fcf594255a96809c05abc324e87
|
[
"MIT"
] | 864
|
2020-09-22T18:52:27.000Z
|
2022-03-28T19:57:25.000Z
|
utils/extra/common.py
|
luis-guilherme/mitra
|
18bd935b11dc8fcf594255a96809c05abc324e87
|
[
"MIT"
] | 13
|
2020-09-24T10:42:21.000Z
|
2021-12-20T14:44:36.000Z
|
utils/extra/common.py
|
luis-guilherme/mitra
|
18bd935b11dc8fcf594255a96809c05abc324e87
|
[
"MIT"
] | 55
|
2020-09-22T19:01:19.000Z
|
2022-03-20T09:15:45.000Z
|
#!/usr/bin/env python3
# common functions
# Ange Albertini 2020
import random
import re
from string import punctuation, digits, ascii_letters
def randblock(l):
return bytes([random.randrange(255) for i in range(l)])
# Cosmetic functions ###########################################################
ASCII = (punctuation + digits + ascii_letters + " ").encode()
def hexii(c):
#replace 00 by empty char
if c == b"\0":
return b" "
#replace printable char by .<char>
if c in ASCII:
return b" " + bytes([c])
if c == 0x0a:
return b"\n"
if c == b"\r":
return b"\\r"
#otherwise, return hex
return b"%02X" % c
def hexiis(s):
return repr(b" ".join([hexii(c) for c in s]))[2:-1]
def showsplit(d, i):
WIDTH = 8
return "%s | %s" % (hexiis(d[i-WIDTH:i]), hexiis(d[i:i+WIDTH]))
# 'GCM' functions ##############################################################
def cut3(data, a):
# skip 0:a[0] -- not needed ?
return data[a[0]:a[1]], data[a[1]:a[2]], data[a[2]:]
def mixfiles(d1, d2, cuts):
"""mixing data with exclusive parts of each data"""
assert len(d1) == len(d2)
d = b""
start = 0
keep = d1
skip = d2
for end in cuts:
d += keep[start:end]
start = end
keep, skip = skip, keep
d += keep[start:]
return d
def splitfile(data, cuts):
p1 = b""
p2 = b""
start = 0
count = 0
for end in cuts:
count += 1
p1 += data[start:end]
p2 += randblock(end-start)
start = end
p1, p2 = p2, p1
p1 += data[end:]
p2 += randblock(len(data)-end)
assert len(p1) == len(p2)
if count % 2 == 1:
p1, p2 = p2, p1
return p1, p2
# PDF functions ################################################################
def EnclosedStringS(d, starts, ends):
off = d.find(starts)
return d[off:d.find(ends, off + len(starts))]
def EnclosedString(d, starts, ends):
off = d.find(starts) + len(starts)
return d[off:d.find(ends, off)]
def getCount(d):
s = EnclosedString(d, b"/Count ", b"/")
count = int(s)
return count
def getObjDecl(d, s):
val = EnclosedString(d, s, b"0 R")
val = val.strip()
if val.decode().isnumeric():
return b"%s %s 0 R" % (s, val)
else:
return b""
def getValDecl(d, s):
"""locates declaration such as '/PageMode /UseOutlines' """
off = d.find(s) + len(s)
if off == -1:
return b""
match = re.match(b" *\/[A-Za-z0-9]*", d[off:])
if match is None:
return b""
else:
return b"%s %s" % (s, match[0])
def adjustToC(toc):
"""increasing page numbers of each ToC entry"""
for entry in toc:
d = entry[3]
if d["kind"] == 1:
d["page"] += 1
entry[2] += 1
return toc
def adjustPDF(contents):
startSig = contents.find(b"%PDF") # relative to file start
startXREF = contents.find(b"\nxref\n0 ") + 1
endXREF = contents.find(b" \n\n", startXREF) + 1
origXref = contents[startXREF:endXREF]
objCount = int(origXref.splitlines()[1].split(b" ")[1])
xrefLines = [
b"xref",
b"0 %i" % objCount,
# mutool declare its first xref like this
b"0000000000 00001 f "
]
i = 1
while i < objCount:
# only very standard object declarations
off = contents.find(b"\n%i 0 obj\n" % i) + 1
xrefLines.append(b"%010i 00000 n " % (off - startSig))
i += 1
xref = b"\n".join(xrefLines)
# XREF length should be unchanged
try:
assert len(xref) == len(origXref)
except AssertionError:
print("<:", repr(origXref))
print(">:", repr(xref))
contents = contents[:startXREF] + xref + contents[endXREF:]
startStartXref = contents.find(b"\nstartxref\n", endXREF) + len(b"\nstartxref\n")
endStartXref = contents.find(b"\n%%EOF", startStartXref)
contents = contents[:startStartXref] + b"%08i" % (startXREF - startSig) + contents[endStartXref:]
return contents
template = b"""%%PDF-1.3
%%\xC2\xB5\xC2\xB6
1 0 obj
<</Length 2 0 R>>
stream
%(payload)s
endstream
endobj
2 0 obj
%(payload_l)i
endobj
3 0 obj
<<
/Type /Catalog
/Pages 4 0 R
/Payload 1 0 R %% to prevent garbage collection
%(extra)s %% optional: Names + OpenAction + Outlines + PageMode
>>
endobj
4 0 obj
<</Type/Pages/Count %(count)i/Kids[%(kids)s]>>
endobj
"""
# a compact dummy PDF declaring an empty page
dummy = b"""%PDF-1.5
1 0 obj<</Type/Catalog/Pages 2 0 R>>endobj
2 0 obj<</Kids[3 0 R]/Type/Pages/Count 1>>endobj
3 0 obj<</Type/Page/Contents 4 0 R>>endobj
4 0 obj<<>>endobj
xref
0 5
0000000000 65536 f
0000000009 00000 n
0000000052 00000 n
0000000101 00000 n
0000000143 00000 n
trailer<</Size 5/Root 1 0 R>>
startxref
163
%%EOF
"""
| 19.595556
| 98
| 0.607621
|
2613b9d56184e4bb217f29cbb5246e3f2438da60
| 19,421
|
py
|
Python
|
lib/python2.7/site-packages/mpl_toolkits/mplot3d/axis3d.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 1
|
2017-12-05T15:35:47.000Z
|
2017-12-05T15:35:47.000Z
|
lib/python2.7/site-packages/mpl_toolkits/mplot3d/axis3d.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 10
|
2017-07-13T00:24:03.000Z
|
2017-07-17T07:39:03.000Z
|
lib/python2.7/site-packages/mpl_toolkits/mplot3d/axis3d.py
|
wfehrnstrom/harmonize
|
e5661d24b2021739e8ac4bf1d3a530eda4e155b3
|
[
"MIT"
] | 7
|
2017-08-01T04:02:07.000Z
|
2018-10-06T21:07:20.000Z
|
# axis3d.py, original mplot3d version by John Porter
# Created: 23 Sep 2005
# Parts rewritten by Reinier Heeres <reinier@heeres.eu>
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
import copy
from matplotlib import lines as mlines, axis as maxis, \
patches as mpatches
from matplotlib import rcParams
from . import art3d
from . import proj3d
import numpy as np
def get_flip_min_max(coord, index, mins, maxs):
if coord[index] == mins[index]:
return maxs[index]
else:
return mins[index]
def move_from_center(coord, centers, deltas, axmask=(True, True, True)):
'''Return a coordinate that is moved by "deltas" away from the center.'''
coord = copy.copy(coord)
#print coord, centers, deltas, axmask
for i in range(3):
if not axmask[i]:
continue
if coord[i] < centers[i]:
coord[i] -= deltas[i]
else:
coord[i] += deltas[i]
return coord
def tick_update_position(tick, tickxs, tickys, labelpos):
'''Update tick line and label position and style.'''
for (label, on) in ((tick.label1, tick.label1On), \
(tick.label2, tick.label2On)):
if on:
label.set_position(labelpos)
tick.tick1On, tick.tick2On = True, False
tick.tick1line.set_linestyle('-')
tick.tick1line.set_marker('')
tick.tick1line.set_data(tickxs, tickys)
tick.gridline.set_data(0, 0)
class Axis(maxis.XAxis):
# These points from the unit cube make up the x, y and z-planes
_PLANES = (
(0, 3, 7, 4), (1, 2, 6, 5), # yz planes
(0, 1, 5, 4), (3, 2, 6, 7), # xz planes
(0, 1, 2, 3), (4, 5, 6, 7), # xy planes
)
# Some properties for the axes
_AXINFO = {
'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2),
'color': (0.95, 0.95, 0.95, 0.5)},
'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2),
'color': (0.90, 0.90, 0.90, 0.5)},
'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1),
'color': (0.925, 0.925, 0.925, 0.5)},
}
def __init__(self, adir, v_intervalx, d_intervalx, axes, *args, **kwargs):
# adir identifies which axes this is
self.adir = adir
# data and viewing intervals for this direction
self.d_interval = d_intervalx
self.v_interval = v_intervalx
# This is a temporary member variable.
# Do not depend on this existing in future releases!
self._axinfo = self._AXINFO[adir].copy()
if rcParams['_internal.classic_mode']:
self._axinfo.update({'label':
{'va': 'center',
'ha': 'center'},
'tick':
{'inward_factor': 0.2,
'outward_factor': 0.1,
'linewidth': rcParams['lines.linewidth'],
'color': 'k'},
'axisline':
{'linewidth': 0.75,
'color': (0, 0, 0, 1)},
'grid' :
{'color': (0.9, 0.9, 0.9, 1),
'linewidth': 1.0,
'linestyle': '-'},
})
else:
self._axinfo.update({'label' :
{'va': 'center',
'ha': 'center'},
'tick' :
{'inward_factor': 0.2,
'outward_factor': 0.1,
'linewidth': rcParams.get(
adir + 'tick.major.width',
rcParams['xtick.major.width']),
'color': rcParams.get(
adir + 'tick.color',
rcParams['xtick.color'])},
'axisline':
{'linewidth': rcParams['axes.linewidth'],
'color': rcParams['axes.edgecolor']},
'grid' :
{'color': rcParams['grid.color'],
'linewidth': rcParams['grid.linewidth'],
'linestyle': rcParams['grid.linestyle']},
})
maxis.XAxis.__init__(self, axes, *args, **kwargs)
self.set_rotate_label(kwargs.get('rotate_label', None))
def init3d(self):
self.line = mlines.Line2D(xdata=(0, 0), ydata=(0, 0),
linewidth=self._axinfo['axisline']['linewidth'],
color=self._axinfo['axisline']['color'],
antialiased=True,
)
# Store dummy data in Polygon object
self.pane = mpatches.Polygon(np.array([[0,0], [0,1], [1,0], [0,0]]),
closed=False,
alpha=0.8,
facecolor=(1,1,1,0),
edgecolor=(1,1,1,0))
self.set_pane_color(self._axinfo['color'])
self.axes._set_artist_props(self.line)
self.axes._set_artist_props(self.pane)
self.gridlines = art3d.Line3DCollection([], )
self.axes._set_artist_props(self.gridlines)
self.axes._set_artist_props(self.label)
self.axes._set_artist_props(self.offsetText)
# Need to be able to place the label at the correct location
self.label._transform = self.axes.transData
self.offsetText._transform = self.axes.transData
def get_tick_positions(self):
majorLocs = self.major.locator()
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
return majorLabels, majorLocs
def get_major_ticks(self, numticks=None):
ticks = maxis.XAxis.get_major_ticks(self, numticks)
for t in ticks:
t.tick1line.set_transform(self.axes.transData)
t.tick2line.set_transform(self.axes.transData)
t.gridline.set_transform(self.axes.transData)
t.label1.set_transform(self.axes.transData)
t.label2.set_transform(self.axes.transData)
return ticks
def set_pane_pos(self, xys):
xys = np.asarray(xys)
xys = xys[:,:2]
self.pane.xy = xys
self.stale = True
def set_pane_color(self, color):
'''Set pane color to a RGBA tuple'''
self._axinfo['color'] = color
self.pane.set_edgecolor(color)
self.pane.set_facecolor(color)
self.pane.set_alpha(color[-1])
self.stale = True
def set_rotate_label(self, val):
'''
Whether to rotate the axis label: True, False or None.
If set to None the label will be rotated if longer than 4 chars.
'''
self._rotate_label = val
self.stale = True
def get_rotate_label(self, text):
if self._rotate_label is not None:
return self._rotate_label
else:
return len(text) > 4
def _get_coord_info(self, renderer):
minx, maxx, miny, maxy, minz, maxz = self.axes.get_w_lims()
if minx > maxx:
minx, maxx = maxx, minx
if miny > maxy:
miny, maxy = maxy, miny
if minz > maxz:
minz, maxz = maxz, minz
mins = np.array((minx, miny, minz))
maxs = np.array((maxx, maxy, maxz))
centers = (maxs + mins) / 2.
deltas = (maxs - mins) / 12.
mins = mins - deltas / 4.
maxs = maxs + deltas / 4.
vals = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
tc = self.axes.tunit_cube(vals, renderer.M)
avgz = [tc[p1][2] + tc[p2][2] + tc[p3][2] + tc[p4][2] for \
p1, p2, p3, p4 in self._PLANES]
highs = np.array([avgz[2*i] < avgz[2*i+1] for i in range(3)])
return mins, maxs, centers, deltas, tc, highs
def draw_pane(self, renderer):
renderer.open_group('pane3d')
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
info = self._axinfo
index = info['i']
if not highs[index]:
plane = self._PLANES[2 * index]
else:
plane = self._PLANES[2 * index + 1]
xys = [tc[p] for p in plane]
self.set_pane_pos(xys)
self.pane.draw(renderer)
renderer.close_group('pane3d')
def draw(self, renderer):
self.label._transform = self.axes.transData
renderer.open_group('axis3d')
# code from XAxis
majorTicks = self.get_major_ticks()
majorLocs = self.major.locator()
info = self._axinfo
index = info['i']
# filter locations here so that no extra grid lines are drawn
locmin, locmax = self.get_view_interval()
if locmin > locmax:
locmin, locmax = locmax, locmin
# Rudimentary clipping
majorLocs = [loc for loc in majorLocs if
locmin <= loc <= locmax]
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i)
for i, val in enumerate(majorLocs)]
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
# Determine grid lines
minmax = np.where(highs, maxs, mins)
# Draw main axis line
juggled = info['juggled']
edgep1 = minmax.copy()
edgep1[juggled[0]] = get_flip_min_max(edgep1, juggled[0], mins, maxs)
edgep2 = edgep1.copy()
edgep2[juggled[1]] = get_flip_min_max(edgep2, juggled[1], mins, maxs)
pep = proj3d.proj_trans_points([edgep1, edgep2], renderer.M)
centpt = proj3d.proj_transform(centers[0], centers[1], centers[2], renderer.M)
self.line.set_data((pep[0][0], pep[0][1]), (pep[1][0], pep[1][1]))
self.line.draw(renderer)
# Grid points where the planes meet
xyz0 = []
for val in majorLocs:
coord = minmax.copy()
coord[index] = val
xyz0.append(coord)
# Draw labels
peparray = np.asanyarray(pep)
# The transAxes transform is used because the Text object
# rotates the text relative to the display coordinate system.
# Therefore, if we want the labels to remain parallel to the
# axis regardless of the aspect ratio, we need to convert the
# edge points of the plane to display coordinates and calculate
# an angle from that.
# TODO: Maybe Text objects should handle this themselves?
dx, dy = (self.axes.transAxes.transform([peparray[0:2, 1]]) -
self.axes.transAxes.transform([peparray[0:2, 0]]))[0]
lxyz = 0.5*(edgep1 + edgep2)
# A rough estimate; points are ambiguous since 3D plots rotate
ax_scale = self.axes.bbox.size / self.figure.bbox.size
ax_inches = np.multiply(ax_scale, self.figure.get_size_inches())
ax_points_estimate = sum(72. * ax_inches)
deltas_per_point = 48. / ax_points_estimate
default_offset = 21.
labeldeltas = (self.labelpad + default_offset) * deltas_per_point\
* deltas
axmask = [True, True, True]
axmask[index] = False
lxyz = move_from_center(lxyz, centers, labeldeltas, axmask)
tlx, tly, tlz = proj3d.proj_transform(lxyz[0], lxyz[1], lxyz[2], \
renderer.M)
self.label.set_position((tlx, tly))
if self.get_rotate_label(self.label.get_text()):
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.label.set_rotation(angle)
self.label.set_va(info['label']['va'])
self.label.set_ha(info['label']['ha'])
self.label.draw(renderer)
# Draw Offset text
# Which of the two edge points do we want to
# use for locating the offset text?
if juggled[2] == 2 :
outeredgep = edgep1
outerindex = 0
else :
outeredgep = edgep2
outerindex = 1
pos = copy.copy(outeredgep)
pos = move_from_center(pos, centers, labeldeltas, axmask)
olx, oly, olz = proj3d.proj_transform(pos[0], pos[1], pos[2], renderer.M)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.set_position( (olx, oly) )
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.offsetText.set_rotation(angle)
# Must set rotation mode to "anchor" so that
# the alignment point is used as the "fulcrum" for rotation.
self.offsetText.set_rotation_mode('anchor')
#-----------------------------------------------------------------------
# Note: the following statement for determining the proper alignment of
# the offset text. This was determined entirely by trial-and-error
# and should not be in any way considered as "the way". There are
# still some edge cases where alignment is not quite right, but
# this seems to be more of a geometry issue (in other words, I
# might be using the wrong reference points).
#
# (TT, FF, TF, FT) are the shorthand for the tuple of
# (centpt[info['tickdir']] <= peparray[info['tickdir'], outerindex],
# centpt[index] <= peparray[index, outerindex])
#
# Three-letters (e.g., TFT, FTT) are short-hand for the array
# of bools from the variable 'highs'.
# ---------------------------------------------------------------------
if centpt[info['tickdir']] > peparray[info['tickdir'], outerindex] :
# if FT and if highs has an even number of Trues
if (centpt[index] <= peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually, this means align right, except for the FTT case,
# in which offset for axis 1 and 2 are aligned left.
if highs.tolist() == [False, True, True] and index in (1, 2) :
align = 'left'
else :
align = 'right'
else :
# The FF case
align = 'left'
else :
# if TF and if highs has an even number of Trues
if (centpt[index] > peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually mean align left, except if it is axis 2
if index == 2 :
align = 'right'
else :
align = 'left'
else :
# The TT case
align = 'right'
self.offsetText.set_va('center')
self.offsetText.set_ha(align)
self.offsetText.draw(renderer)
# Draw grid lines
if len(xyz0) > 0:
# Grid points at end of one plane
xyz1 = copy.deepcopy(xyz0)
newindex = (index + 1) % 3
newval = get_flip_min_max(xyz1[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz1[i][newindex] = newval
# Grid points at end of the other plane
xyz2 = copy.deepcopy(xyz0)
newindex = (index + 2) % 3
newval = get_flip_min_max(xyz2[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz2[i][newindex] = newval
lines = list(zip(xyz1, xyz0, xyz2))
if self.axes._draw_grid:
self.gridlines.set_segments(lines)
self.gridlines.set_color([info['grid']['color']] * len(lines))
self.gridlines.set_linewidth(
[info['grid']['linewidth']] * len(lines))
self.gridlines.set_linestyle(
[info['grid']['linestyle']] * len(lines))
self.gridlines.draw(renderer, project=True)
# Draw ticks
tickdir = info['tickdir']
tickdelta = deltas[tickdir]
if highs[tickdir]:
ticksign = 1
else:
ticksign = -1
for tick, loc, label in zip(majorTicks, majorLocs, majorLabels):
if tick is None:
continue
# Get tick line positions
pos = copy.copy(edgep1)
pos[index] = loc
pos[tickdir] = edgep1[tickdir] + info['tick']['outward_factor'] * \
ticksign * tickdelta
x1, y1, z1 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
pos[tickdir] = edgep1[tickdir] - info['tick']['inward_factor'] * \
ticksign * tickdelta
x2, y2, z2 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
# Get position of label
default_offset = 8. # A rough estimate
labeldeltas = (tick.get_pad() + default_offset) * deltas_per_point\
* deltas
axmask = [True, True, True]
axmask[index] = False
pos[tickdir] = edgep1[tickdir]
pos = move_from_center(pos, centers, labeldeltas, axmask)
lx, ly, lz = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
tick_update_position(tick, (x1, x2), (y1, y2), (lx, ly))
tick.tick1line.set_linewidth(info['tick']['linewidth'])
tick.tick1line.set_color(info['tick']['color'])
tick.set_label1(label)
tick.set_label2(label)
tick.draw(renderer)
renderer.close_group('axis3d')
self.stale = False
def get_view_interval(self):
"""return the Interval instance for this 3d axis view limits"""
return self.v_interval
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.v_interval = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.v_interval = min(vmin, Vmin), max(vmax, Vmax)
# TODO: Get this to work properly when mplot3d supports
# the transforms framework.
def get_tightbbox(self, renderer) :
# Currently returns None so that Axis.get_tightbbox
# doesn't return junk info.
return None
# Use classes to look at different data limits
class XAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervalx
class YAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervaly
class ZAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.zz_dataLim.intervalx
| 39.393509
| 87
| 0.530045
|
18857771b04e321d3dd8ce1933ef2e836bca59e7
| 19,703
|
py
|
Python
|
dist_matrix/njit_dist_matrix_full.py
|
sparks-baird/wasserstein-distance
|
a37b5c2a0f7322cd8edb1432148445cf5de468a2
|
[
"MIT"
] | 5
|
2021-12-10T15:48:55.000Z
|
2022-03-29T15:28:06.000Z
|
dist_matrix/njit_dist_matrix_full.py
|
sparks-baird/wasserstein-distance
|
a37b5c2a0f7322cd8edb1432148445cf5de468a2
|
[
"MIT"
] | null | null | null |
dist_matrix/njit_dist_matrix_full.py
|
sparks-baird/wasserstein-distance
|
a37b5c2a0f7322cd8edb1432148445cf5de468a2
|
[
"MIT"
] | null | null | null |
"""Nopython version of dist_matrix."""
import os
import numpy as np
from math import sqrt
from numba import prange, njit
from numba.types import int32, float32, int64, float64
from dist_matrix.utils import cpu_helper as hp
# settings
inline = os.environ.get("INLINE", "never")
fastmath = bool(os.environ.get("FASTMATH", "1"))
parallel = bool(os.environ.get("PARALLEL", "1"))
debug = bool(os.environ.get("DEBUG", "0"))
def dist_matrix(
U,
V=None,
U_weights=None,
V_weights=None,
pairs=None,
metric="euclidean",
USE_64=False,
):
"""
Compute pairwise distances using Numba/CUDA.
Parameters
----------
mat : array
First set of vectors for which to compute pairwise distances.
mat2 : array, optional
Second set of vectors for which to compute pairwise distances. If not specified,
then mat2 is a copy of mat.
pairs : array, optional
List of 2-tuples which contain the indices for which to compute distances for.
If mat2 was specified, then the second index accesses mat2 instead of mat.
If not specified, then the pairs are auto-generated. If mat2 was specified,
all combinations of the two vector sets are used. If mat2 isn't specified,
then only the upper triangle (minus diagonal) pairs are computed.
metric : str, optional
Possible options are 'euclidean', 'wasserstein'.
Defaults to Euclidean distance. These are converted to integers internally
due to Numba's lack of support for string arguments (2021-08-14).
See compute_distance() for other keys. For example, 0 corresponds to Euclidean
distance and 1 corresponds to Wasserstein distance.
Returns
-------
out : array
A pairwise distance matrix, or if pairs are specified, then a vector of
distances corresponding to the pairs.
"""
cols = U.shape[1]
# %% Metrics specific to njit / CPU implementations
cols_plus_1 = cols + 1
tot_cols = cols * 2
tot_cols_minus_1 = tot_cols - 1
if USE_64:
bits = 64
bytes = 8
nb_float = float64
nb_int = int64
np_float = np.float64
np_int = np.int64
else:
bits = 32
bytes = 4
nb_float = float32
nb_int = int32
np_float = np.float32
np_int = np.int32
# @njit(fastmath=fastmath, debug=debug)
@njit(
"f{0}(f{0}[:], f{0}[:], f{0}[:], f{0}[:], i{0}, b1, b1, b1)".format(bytes),
fastmath=fastmath,
debug=debug,
)
def cdf_distance(
u, v, u_weights, v_weights, p, presorted, cumweighted, prepended
): # noqa
r"""# noqa
Compute distance between two 1D distributions :math:`u` and :math:`v`.
The respective CDFs are :math:`U` and :math:`V`, and the
statistical distance is defined as:
.. math::
l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
p is a positive parameter; p = 1 gives the Wasserstein distance,
p = 2 gives the energy distance.
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like
Weight for each value.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from
1, it must still be positive and finite so that the weights can
be normalized to sum to 1.
p : scalar
positive parameter that determines the type of cdf distance.
presorted : bool
Whether u and v have been sorted already *and* u_weights and
v_weights have been sorted using the same indices used to sort
u and v, respectively.
cumweighted : bool
Whether u_weights and v_weights have been converted to their
cumulative weights via e.g. np.cumsum().
prepended : bool
Whether a zero has been prepended to accumated, sorted
u_weights and v_weights.
By setting presorted, cumweighted, *and* prepended to False, the
computationproceeds proceeds in the same fashion as _cdf_distance
from scipy.stats.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from
samples whose values are effectively inputs of the function, or
they can be seen as generalized functions, in which case they are
weighted sums of Dirac delta functions located at the specified
values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan,
Hoyer, Munos "The Cramer Distance as a Solution to Biased
Wasserstein Gradients" (2017). :arXiv:`1705.10743`.
"""
# allocate local float arrays
# combined vector
uv = np.zeros(tot_cols, dtype=np_float)
uv_deltas = np.zeros(tot_cols_minus_1, dtype=np_float)
# CDFs
u_cdf = np.zeros(tot_cols_minus_1, dtype=np_float)
v_cdf = np.zeros(tot_cols_minus_1, dtype=np_float)
# allocate local int arrays
# CDF indices via binary search
u_cdf_indices = np.zeros(tot_cols_minus_1, dtype=np_int)
v_cdf_indices = np.zeros(tot_cols_minus_1, dtype=np_int)
u_cdf_sorted_cumweights = np.zeros(tot_cols_minus_1, dtype=np_float)
v_cdf_sorted_cumweights = np.zeros(tot_cols_minus_1, dtype=np_float)
# short-circuit
if presorted and cumweighted and prepended:
u_sorted = u
v_sorted = v
u_0_cumweights = u_weights
v_0_cumweights = v_weights
# sorting, accumulating, and prepending (for compatibility)
else:
# check arguments
if not presorted and (cumweighted or prepended):
raise ValueError(
"if cumweighted or prepended are True, then presorted cannot be False"
) # noqa
if (not presorted or not cumweighted) and prepended:
raise ValueError(
"if prepended is True, then presorted and cumweighted must both be True"
) # noqa
# sorting
if not presorted:
# local arrays
u_sorted = np.zeros(cols, dtype=np_float)
v_sorted = np.zeros(cols, dtype=np_float)
u_sorter = np.zeros(cols, dtype=np_int)
v_sorter = np.zeros(cols, dtype=np_int)
u_sorted_weights = np.zeros(cols, dtype=np_float)
v_sorted_weights = np.zeros(cols, dtype=np_float)
# local copy since quickArgSortIterative sorts in-place
hp.copy(u, u_sorted)
hp.copy(v, v_sorted)
# sorting
hp.insertionArgSort(u_sorted, u_sorter)
hp.insertionArgSort(v_sorted, v_sorter)
# inplace to avoid extra cuda local array
hp.sort_by_indices(u_weights, u_sorter, u_sorted_weights)
hp.sort_by_indices(v_weights, v_sorter, v_sorted_weights)
# cumulative weights
if not cumweighted:
# local arrays
u_cumweights = np.zeros(cols, dtype=np_float)
v_cumweights = np.zeros(cols, dtype=np_float)
# accumulate
hp.cumsum(u_sorted_weights, u_cumweights)
hp.cumsum(v_sorted_weights, v_cumweights)
# prepend weights with zero
if not prepended:
zero = np.zeros(1, dtype=np_float)
u_0_cumweights = np.zeros(cols_plus_1, dtype=np_float)
v_0_cumweights = np.zeros(cols_plus_1, dtype=np_float)
hp.concatenate(zero, u_cumweights, u_0_cumweights)
hp.concatenate(zero, v_cumweights, v_0_cumweights)
# concatenate u and v into uv
hp.concatenate(u_sorted, v_sorted, uv)
# sorting
# quickSortIterative(uv, uv_stack)
hp.insertionSort(uv)
# Get the respective positions of the values of u and v among the
# values of both distributions. See also np.searchsorted
hp.bisect_right(u_sorted, uv[:-1], u_cdf_indices)
hp.bisect_right(v_sorted, uv[:-1], v_cdf_indices)
# empirical CDFs
hp.sort_by_indices(u_0_cumweights, u_cdf_indices, u_cdf_sorted_cumweights)
hp.divide(u_cdf_sorted_cumweights, u_0_cumweights[-1], u_cdf)
hp.sort_by_indices(v_0_cumweights, v_cdf_indices, v_cdf_sorted_cumweights)
hp.divide(v_cdf_sorted_cumweights, v_0_cumweights[-1], v_cdf)
# # Integration
hp.diff(uv, uv_deltas) # See also np.diff
out = hp.integrate(u_cdf, v_cdf, uv_deltas, p)
return out
# @njit(fastmath=fastmath, debug=debug)
@njit(
"f{0}(f{0}[:], f{0}[:], f{0}[:], f{0}[:], b1, b1, b1)".format(bytes),
fastmath=fastmath,
debug=debug,
)
def wasserstein_distance(
u, v, u_weights, v_weights, presorted, cumweighted, prepended
): # noqa
r"""
Compute the first Wasserstein distance between two 1D distributions.
This distance is also known as the earth mover's distance, since it can be
seen as the minimum amount of "work" required to transform :math:`u` into
:math:`v`, where "work" is measured as the amount of distribution weight
that must be moved, multiplied by the distance it has to be moved.
Source
------
https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/stats/stats.py#L8245-L8319 # noqa
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
"""
return cdf_distance(
u, v, u_weights, v_weights, np_int(1), presorted, cumweighted, prepended
) # noqa
# @njit(fastmath=fastmath, debug=debug)
@njit(
"f{0}(f{0}[:], f{0}[:])".format(bytes),
fastmath=fastmath,
debug=debug,
)
def euclidean_distance(a, b):
"""
Calculate Euclidean distance between vectors a and b.
Parameters
----------
a : 1D array
First vector.
b : 1D array
Second vector.
Returns
-------
d : numeric scalar
Euclidean distance between vectors a and b.
"""
d = 0
for i in range(len(a)):
d += (b[i] - a[i]) ** 2
d = sqrt(d)
return d
# %% Top-level distance calculation functions
# @njit(fastmath=fastmath, debug=debug)
@njit(
"f{0}(f{0}[:], f{0}[:], f{0}[:], f{0}[:], i{0})".format(bytes),
fastmath=fastmath,
debug=debug,
)
def compute_distance(u, v, u_weights, v_weights, metric_num):
"""
Calculate weighted distance between two vectors, u and v.
Parameters
----------
u : 1D array of float
First vector.
v : 1D array of float
Second vector.
u_weights : 1D array of float
Weights for u.
v_weights : 1D array of float
Weights for v.
metric_num : int
Which metric to use (0 == "euclidean", 1=="wasserstein").
Raises
------
NotImplementedError
"Specified metric is mispelled or has not been implemented yet.
If not implemented, consider submitting a pull request."
Returns
-------
d : float
Weighted distance between u and v.
"""
if metric_num == 0:
# d = np.linalg.norm(vec - vec2)
d = euclidean_distance(u, v)
elif metric_num == 1:
# d = my_wasserstein_distance(vec, vec2)
# d = wasserstein_distance(
# u, v, u_weights=u_weights, v_weights=v_weights, p=1, presorted=True
# )
d = wasserstein_distance(u, v, u_weights, v_weights, True, True, True)
else:
raise NotImplementedError(
"Specified metric is mispelled or has not been implemented yet. \
If not implemented, consider submitting a pull request."
)
return d
# @njit(fastmath=fastmath, parallel=parallel, debug=debug)
@njit(
"void(f{0}[:,:], f{0}[:,:], f{0}[:,:], f{0}[:,:], i{0}[:,:], f{0}[:], i{0})".format(
bytes
),
fastmath=fastmath,
parallel=parallel,
debug=debug,
)
def sparse_distance_matrix(U, V, U_weights, V_weights, pairs, out, metric_num):
"""
Calculate sparse pairwise distances between two sets of vectors for pairs.
Parameters
----------
mat : numeric cuda array
First set of vectors for which to compute a single pairwise distance.
mat2 : numeric cuda array
Second set of vectors for which to compute a single pairwise distance.
pairs : cuda array of 2-tuples
All pairs for which distances are to be computed.
out : numeric cuda array
The initialized array which will be populated with distances.
Raises
------
ValueError
Both matrices should have the same number of columns.
Returns
-------
None.
"""
npairs = pairs.shape[0]
for k in prange(npairs):
pair = pairs[k]
i, j = pair
u = U[i]
v = V[j]
uw = U_weights[i]
vw = V_weights[j]
d = compute_distance(u, v, uw, vw, metric_num)
out[k] = d
# @njit(fastmath=fastmath, parallel=parallel, debug=debug)
@njit(
"void(f{0}[:,:], f{0}[:,:], f{0}[:,:], i{0})".format(bytes),
fastmath=fastmath,
parallel=parallel,
debug=debug,
)
def one_set_distance_matrix(U, U_weights, out, metric_num):
"""
Calculate pairwise distances within single set of vectors.
Parameters
----------
U : 2D array of float
Vertically stacked vectors.
U_weights : 2D array of float
Vertically stacked weight vectors.
out : 2D array of float
Initialized matrix to populate with pairwise distances.
metric_num : int
Which metric to use (0 == "euclidean", 1=="wasserstein").
Returns
-------
None.
"""
dm_rows = U.shape[0]
dm_cols = U.shape[0]
for i in prange(dm_rows):
for j in range(dm_cols):
if i < j:
u = U[i]
v = U[j]
uw = U_weights[i]
vw = U_weights[j]
d = compute_distance(u, v, uw, vw, metric_num)
out[i, j] = d
out[j, i] = d
# faster compilation *and* runtimes with explicit signature (tested on cuda.jit)
# @njit(fastmath=fastmath, parallel=parallel, debug=debug)
@njit(
"void(f{0}[:,:], f{0}[:,:], f{0}[:,:], f{0}[:,:], f{0}[:,:], i{0})".format(
bytes
),
fastmath=fastmath,
parallel=parallel,
debug=debug,
)
def two_set_distance_matrix(U, V, U_weights, V_weights, out, metric_num):
"""Calculate distance matrix between two sets of vectors."""
dm_rows = U.shape[0]
dm_cols = V.shape[0]
for i in prange(dm_rows):
for j in range(dm_cols):
u = U[i]
v = V[j]
uw = U_weights[i]
vw = V_weights[j]
d = compute_distance(u, v, uw, vw, metric_num)
out[i, j] = d
# is it distance matrix between two sets of vectors rather than within a single set?
isXY = V is not None
# were pairs specified? (useful for sparse matrix generation)
pairQ = pairs is not None
# assign metric_num based on specified metric (Numba doesn't support strings)
metric_dict = {"euclidean": np_int(0), "wasserstein": np_int(1)}
metric_num = metric_dict[metric]
m = U.shape[0]
if isXY:
m2 = V.shape[0]
else:
m2 = m
if pairQ:
npairs = pairs.shape[0]
shape = (npairs,)
else:
shape = (m, m2)
# sorting and cumulative weights
if metric == "wasserstein":
# presort values (and weights by sorted value indices)
U_sorter = np.argsort(U)
U = np.take_along_axis(U, U_sorter, axis=-1)
U_weights = np.take_along_axis(U_weights, U_sorter, axis=-1)
# calculate cumulative weights
U_weights = np.cumsum(U_weights, axis=1)
# prepend a column of zeros
zero = np.zeros((U_weights.shape[0], 1))
U_weights = np.column_stack((zero, U_weights))
# do the same for V and V_weights
if isXY:
V_sorter = np.argsort(V)
V = np.take_along_axis(V, V_sorter, axis=-1)
V_weights = np.take_along_axis(V_weights, V_sorter, axis=-1)
V_weights = np.cumsum(V_weights, axis=1)
V_weights = np.column_stack((zero, V_weights))
out = np.zeros(shape, dtype=np_float)
U = U.astype(np_float)
if V is not None:
V = V.astype(np_float)
if U_weights is not None:
U_weights = U_weights.astype(np_float)
if V_weights is not None:
V_weights = V_weights.astype(np_float)
if pairs is not None:
pairs = pairs.astype(np_int)
if isXY and not pairQ:
# distance matrix between two sets of vectors
two_set_distance_matrix(U, V, U_weights, V_weights, out, metric_num)
elif not isXY and pairQ:
# specified pairwise distances within single set of vectors
sparse_distance_matrix(U, U, U_weights, U_weights, pairs, out, metric_num)
elif not isXY and not pairQ:
# distance matrix within single set of vectors
one_set_distance_matrix(U, U_weights, out, metric_num)
elif isXY and pairQ:
# specified pairwise distances between two sets of vectors
sparse_distance_matrix(U, V, U_weights, V_weights, pairs, out, metric_num)
return out
| 34.266087
| 124
| 0.583667
|
c68d83c5b7bb5794171d3a0a8f5ba2dc82c3db25
| 10,628
|
py
|
Python
|
nova/network/quantum/melange_ipam_lib.py
|
bopopescu/trusted-nova
|
b440afb89f6f170c0831f5d6318a08ec41bc8c0a
|
[
"Apache-2.0"
] | 1
|
2015-07-15T08:51:16.000Z
|
2015-07-15T08:51:16.000Z
|
nova/network/quantum/melange_ipam_lib.py
|
bopopescu/trusted-nova
|
b440afb89f6f170c0831f5d6318a08ec41bc8c0a
|
[
"Apache-2.0"
] | null | null | null |
nova/network/quantum/melange_ipam_lib.py
|
bopopescu/trusted-nova
|
b440afb89f6f170c0831f5d6318a08ec41bc8c0a
|
[
"Apache-2.0"
] | 2
|
2019-06-12T00:52:15.000Z
|
2020-07-24T10:35:29.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.network.quantum import melange_connection
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def get_ipam_lib(net_man):
return QuantumMelangeIPAMLib()
class QuantumMelangeIPAMLib(object):
"""Implements Quantum IP Address Management (IPAM) interface
using the Melange service, which is access using the Melange
web services API.
"""
def __init__(self):
"""Initialize class used to connect to Melange server"""
self.m_conn = melange_connection.MelangeConnection()
def create_subnet(self, context, label, project_id,
quantum_net_id, priority, cidr=None,
gateway=None, gateway_v6=None, cidr_v6=None,
dns1=None, dns2=None):
"""Contact Melange and create a subnet for any non-NULL
IPv4 or IPv6 subnets.
Also create a entry in the Nova networks DB, but only
to store values not represented in Melange or to
temporarily provide compatibility with Nova code that
accesses IPAM data directly via the DB (e.g., nova-api)
"""
tenant_id = project_id or FLAGS.quantum_default_tenant_id
if cidr:
self.m_conn.create_block(quantum_net_id, cidr,
project_id=tenant_id,
gateway=gateway,
dns1=dns1, dns2=dns2)
if cidr_v6:
self.m_conn.create_block(quantum_net_id, cidr_v6,
project_id=tenant_id,
gateway=gateway_v6,
dns1=dns1, dns2=dns2)
net = {"uuid": quantum_net_id,
"project_id": tenant_id,
"priority": priority,
"label": label}
if FLAGS.quantum_use_dhcp:
if cidr:
n = netaddr.IPNetwork(cidr)
net['dhcp_start'] = netaddr.IPAddress(n.first + 2)
else:
net['dhcp_start'] = None
admin_context = context.elevated()
network = db.network_create_safe(admin_context, net)
def allocate_fixed_ips(self, context, project_id, quantum_net_id,
network_tenant_id, vif_ref):
"""Pass call to allocate fixed IP on to Melange"""
ips = self.m_conn.allocate_ip(quantum_net_id, network_tenant_id,
vif_ref['uuid'], project_id,
vif_ref['address'])
return [ip['address'] for ip in ips]
def delete_subnets_by_net_id(self, context, net_id, project_id):
"""Find Melange block associated with the Quantum UUID,
then tell Melange to delete that block.
"""
admin_context = context.elevated()
tenant_id = project_id or FLAGS.quantum_default_tenant_id
all_blocks = self.m_conn.get_blocks(tenant_id)
for b in all_blocks['ip_blocks']:
if b['network_id'] == net_id:
self.m_conn.delete_block(b['id'], tenant_id)
network = db.network_get_by_uuid(admin_context, net_id)
db.network_delete_safe(context, network['id'])
def get_networks_by_tenant(self, admin_context, tenant_id):
nets = {}
blocks = self.m_conn.get_blocks(tenant_id)
for ip_block in blocks['ip_blocks']:
network_id = ip_block['network_id']
network = db.network_get_by_uuid(admin_context, network_id)
nets[network_id] = network
return nets.values()
def get_global_networks(self, admin_context):
return self.get_networks_by_tenant(admin_context,
FLAGS.quantum_default_tenant_id)
def get_project_networks(self, admin_context):
try:
nets = db.network_get_all(admin_context.elevated())
except exception.NoNetworksFound:
return []
# only return networks with a project_id set
return [net for net in nets if net['project_id']]
def get_project_and_global_net_ids(self, context, project_id):
"""Fetches all networks associated with this project, or
that are "global" (i.e., have no project set).
Returns list sorted by 'priority' (lowest integer value
is highest priority).
"""
if project_id is None:
raise Exception(_("get_project_and_global_net_ids must be called"
" with a non-null project_id"))
admin_context = context.elevated()
# Decorate with priority
priority_nets = []
for tenant_id in (project_id, FLAGS.quantum_default_tenant_id):
nets = self.get_networks_by_tenant(admin_context, tenant_id)
for network in nets:
priority = network['priority']
priority_nets.append((priority, network['uuid'], tenant_id))
# Sort by priority
priority_nets.sort()
# Undecorate
return [(network_id, tenant_id)
for priority, network_id, tenant_id in priority_nets]
def get_tenant_id_by_net_id(self, context, net_id, vif_id, project_id):
ipam_tenant_id = None
tenant_ids = [FLAGS.quantum_default_tenant_id, project_id, None]
# This is confusing, if there are IPs for the given net, vif,
# tenant trifecta we assume that is the tenant for that network
for tid in tenant_ids:
try:
self.m_conn.get_allocated_ips(net_id, vif_id, tid)
except KeyError:
continue
ipam_tenant_id = tid
break
return ipam_tenant_id
# TODO(bgh): Rename this method .. it's now more of a
# "get_subnets_by_net_id_and_vif_id" method, but we could probably just
# call it "get_subnets".
def get_subnets_by_net_id(self, context, tenant_id, net_id, vif_id):
"""Returns information about the IPv4 and IPv6 subnets
associated with a Quantum Network UUID.
"""
subnets = []
ips = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id)
for ip_address in ips:
block = ip_address['ip_block']
subnet = {'network_id': block['network_id'],
'id': block['id'],
'cidr': block['cidr'],
'gateway': block['gateway'],
'broadcast': block['broadcast'],
'netmask': block['netmask'],
'dns1': block['dns1'],
'dns2': block['dns2']}
if ip_address['version'] == 4:
subnet['version'] = 4
else:
subnet['version'] = 6
subnets.append(subnet)
return subnets
def get_routes_by_ip_block(self, context, block_id, project_id):
"""Returns the list of routes for the IP block"""
return self.m_conn.get_routes(block_id, project_id)
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
"""Returns a list of IPv4 address strings associated with
the specified virtual interface.
"""
return self._get_ips_by_interface(context, net_id, vif_id,
project_id, 4)
def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id):
"""Returns a list of IPv6 address strings associated with
the specified virtual interface.
"""
return self._get_ips_by_interface(context, net_id, vif_id,
project_id, 6)
def _get_ips_by_interface(self, context, net_id, vif_id, project_id,
ip_version):
"""Helper method to fetch v4 or v6 addresses for a particular
virtual interface.
"""
tenant_id = project_id or FLAGS.quantum_default_tenant_id
ip_list = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id)
return [ip['address'] for ip in ip_list
if netaddr.IPNetwork(ip['address']).version == ip_version]
def verify_subnet_exists(self, context, project_id, quantum_net_id):
"""Confirms that a subnet exists that is associated with the
specified Quantum Network UUID.
"""
# TODO(bgh): Would be nice if we could just do something like:
# GET /ipam/tenants/{tenant_id}/networks/{network_id}/ instead
# of searching through all the blocks. Checking for a 404
# will then determine whether it exists.
tenant_id = project_id or FLAGS.quantum_default_tenant_id
all_blocks = self.m_conn.get_blocks(tenant_id)
for b in all_blocks['ip_blocks']:
if b['network_id'] == quantum_net_id:
return True
return False
def deallocate_ips_by_vif(self, context, project_id, net_id, vif_ref):
"""Deallocate all fixed IPs associated with the specified
virtual interface.
"""
tenant_id = project_id or FLAGS.quantum_default_tenant_id
self.m_conn.deallocate_ips(net_id, vif_ref['uuid'], tenant_id)
def get_allocated_ips(self, context, subnet_id, project_id):
ips = self.m_conn.get_allocated_ips_for_network(subnet_id, project_id)
return [(ip['address'], ip['interface_id']) for ip in ips]
def create_vif(self, vif_id, instance_id, project_id=None):
"""Create a new vif with the specified information.
"""
tenant_id = project_id or FLAGS.quantum_default_tenant_id
return self.m_conn.create_vif(vif_id, instance_id, tenant_id)
def get_floating_ips_by_fixed_address(self, context, fixed_address):
"""This call is not supported in quantum yet"""
return []
| 41.354086
| 78
| 0.616014
|
a073705505eac8829812e10ea128977165683972
| 10,316
|
py
|
Python
|
src/twisted/python/test/test_sendmsg.py
|
apjanke/twisted
|
22f949f7ce187513f0c218b73186c8a73baa00b4
|
[
"Unlicense",
"MIT"
] | 1
|
2021-01-03T01:54:14.000Z
|
2021-01-03T01:54:14.000Z
|
src/twisted/python/test/test_sendmsg.py
|
zerospam/twisted
|
e23b5e2040a4d643bc6a43785621358569886a0d
|
[
"MIT",
"Unlicense"
] | null | null | null |
src/twisted/python/test/test_sendmsg.py
|
zerospam/twisted
|
e23b5e2040a4d643bc6a43785621358569886a0d
|
[
"MIT",
"Unlicense"
] | null | null | null |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.sendmsg}.
"""
import os
import sys
import errno
import warnings
from os import pipe, read, close, pathsep
from struct import pack
from socket import SOL_SOCKET, AF_INET, AF_INET6, socket, error
try:
from socket import AF_UNIX, socketpair
except ImportError:
nonUNIXSkip = True
else:
nonUNIXSkip = False
from unittest import skipIf
from twisted.internet import reactor
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.internet.error import ProcessDone
from twisted.internet.protocol import ProcessProtocol
from twisted.python.filepath import FilePath
from twisted.python.runtime import platform
from twisted.trial.unittest import TestCase
if platform.isLinux():
from socket import MSG_DONTWAIT
dontWaitSkip = False
else:
# It would be nice to be able to test flags on more platforms, but finding
# a flag that works *at all* is somewhat challenging.
dontWaitSkip = True
try:
from twisted.python.sendmsg import sendmsg, recvmsg
from twisted.python.sendmsg import SCM_RIGHTS, getSocketFamily
except ImportError:
doImportSkip = True
importSkipReason = "Platform doesn't support sendmsg."
else:
doImportSkip = False
importSkipReason = ""
class _FDHolder:
"""
A wrapper around a FD that will remember if it has been closed or not.
"""
def __init__(self, fd):
self._fd = fd
def fileno(self):
"""
Return the fileno of this FD.
"""
return self._fd
def close(self):
"""
Close the FD. If it's already been closed, do nothing.
"""
if self._fd:
close(self._fd)
self._fd = None
def __del__(self):
"""
If C{self._fd} is unclosed, raise a warning.
"""
if self._fd:
warnings.warn("FD %s was not closed!" % (self._fd,), ResourceWarning)
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _makePipe():
"""
Create a pipe, and return the two FDs wrapped in L{_FDHolders}.
"""
r, w = pipe()
return (_FDHolder(r), _FDHolder(w))
class ExitedWithStderr(Exception):
"""
A process exited with some stderr.
"""
def __str__(self) -> str:
"""
Dump the errors in a pretty way in the event of a subprocess traceback.
"""
result = b"\n".join([b""] + list(self.args))
return repr(result)
class StartStopProcessProtocol(ProcessProtocol):
"""
An L{IProcessProtocol} with a Deferred for events where the subprocess
starts and stops.
@ivar started: A L{Deferred} which fires with this protocol's
L{IProcessTransport} provider when it is connected to one.
@ivar stopped: A L{Deferred} which fires with the process output or a
failure if the process produces output on standard error.
@ivar output: A C{str} used to accumulate standard output.
@ivar errors: A C{str} used to accumulate standard error.
"""
def __init__(self):
self.started = Deferred()
self.stopped = Deferred()
self.output = b""
self.errors = b""
def connectionMade(self):
self.started.callback(self.transport)
def outReceived(self, data):
self.output += data
def errReceived(self, data):
self.errors += data
def processEnded(self, reason):
if reason.check(ProcessDone):
self.stopped.callback(self.output)
else:
self.stopped.errback(ExitedWithStderr(self.errors, self.output))
def _spawn(script, outputFD):
"""
Start a script that is a peer of this test as a subprocess.
@param script: the module name of the script in this directory (no
package prefix, no '.py')
@type script: C{str}
@rtype: L{StartStopProcessProtocol}
"""
pyExe = FilePath(sys.executable).asTextMode().path
env = dict(os.environ)
env["PYTHONPATH"] = FilePath(pathsep.join(sys.path)).asTextMode().path
sspp = StartStopProcessProtocol()
reactor.spawnProcess(
sspp,
pyExe,
[
pyExe,
FilePath(__file__).sibling(script + ".py").asTextMode().path,
b"%d" % (outputFD,),
],
env=env,
childFDs={0: "w", 1: "r", 2: "r", outputFD: outputFD},
)
return sspp
@skipIf(doImportSkip, importSkipReason)
class SendmsgTests(TestCase):
"""
Tests for the Python2/3 compatible L{sendmsg} interface.
"""
def setUp(self):
"""
Create a pair of UNIX sockets.
"""
self.input, self.output = socketpair(AF_UNIX)
def tearDown(self):
"""
Close the sockets opened by setUp.
"""
self.input.close()
self.output.close()
def test_syscallError(self):
"""
If the underlying C{sendmsg} call fails, L{send1msg} raises
L{socket.error} with its errno set to the underlying errno value.
"""
self.input.close()
exc = self.assertRaises(error, sendmsg, self.input, b"hello, world")
self.assertEqual(exc.args[0], errno.EBADF)
def test_syscallErrorWithControlMessage(self):
"""
The behavior when the underlying C{sendmsg} call fails is the same
whether L{sendmsg} is passed ancillary data or not.
"""
self.input.close()
exc = self.assertRaises(
error, sendmsg, self.input, b"hello, world", [(0, 0, b"0123")], 0
)
self.assertEqual(exc.args[0], errno.EBADF)
def test_roundtrip(self):
"""
L{recvmsg} will retrieve a message sent via L{sendmsg}.
"""
message = b"hello, world!"
self.assertEqual(len(message), sendmsg(self.input, message))
result = recvmsg(self.output)
self.assertEqual(result.data, b"hello, world!")
self.assertEqual(result.flags, 0)
self.assertEqual(result.ancillary, [])
def test_shortsend(self):
"""
L{sendmsg} returns the number of bytes which it was able to send.
"""
message = b"x" * 1024 * 1024 * 16
self.input.setblocking(False)
sent = sendmsg(self.input, message)
# Sanity check - make sure the amount of data we sent was less than the
# message, but not the whole message, as we should have filled the send
# buffer. This won't work if the send buffer is large enough for
# message, though.
self.assertTrue(sent < len(message))
received = recvmsg(self.output, len(message))
self.assertEqual(len(received[0]), sent)
def test_roundtripEmptyAncillary(self):
"""
L{sendmsg} treats an empty ancillary data list the same way it treats
receiving no argument for the ancillary parameter at all.
"""
sendmsg(self.input, b"hello, world!", [], 0)
result = recvmsg(self.output)
self.assertEqual(result, (b"hello, world!", [], 0))
@skipIf(dontWaitSkip, "MSG_DONTWAIT is only known to work as intended on Linux")
def test_flags(self):
"""
The C{flags} argument to L{sendmsg} is passed on to the underlying
C{sendmsg} call, to affect it in whatever way is defined by those
flags.
"""
# Just exercise one flag with simple, well-known behavior. MSG_DONTWAIT
# makes the send a non-blocking call, even if the socket is in blocking
# mode. See also test_flags in RecvmsgTests
for i in range(8 * 1024):
try:
sendmsg(self.input, b"x" * 1024, flags=MSG_DONTWAIT)
except error as e:
self.assertEqual(e.args[0], errno.EAGAIN)
break
else:
self.fail(
"Failed to fill up the send buffer, "
"or maybe send1msg blocked for a while"
)
@inlineCallbacks
def test_sendSubProcessFD(self):
"""
Calling L{sendmsg} with SOL_SOCKET, SCM_RIGHTS, and a platform-endian
packed file descriptor number should send that file descriptor to a
different process, where it can be retrieved by using L{recv1msg}.
"""
sspp = _spawn("pullpipe", self.output.fileno())
yield sspp.started
pipeOut, pipeIn = _makePipe()
self.addCleanup(pipeOut.close)
self.addCleanup(pipeIn.close)
with pipeIn:
sendmsg(
self.input,
b"blonk",
[(SOL_SOCKET, SCM_RIGHTS, pack("i", pipeIn.fileno()))],
)
yield sspp.stopped
self.assertEqual(read(pipeOut.fileno(), 1024), b"Test fixture data: blonk.\n")
# Make sure that the pipe is actually closed now.
self.assertEqual(read(pipeOut.fileno(), 1024), b"")
@skipIf(doImportSkip, importSkipReason)
class GetSocketFamilyTests(TestCase):
"""
Tests for L{getSocketFamily}.
"""
def _socket(self, addressFamily):
"""
Create a new socket using the given address family and return that
socket's file descriptor. The socket will automatically be closed when
the test is torn down.
"""
s = socket(addressFamily)
self.addCleanup(s.close)
return s
def test_inet(self):
"""
When passed the file descriptor of a socket created with the C{AF_INET}
address family, L{getSocketFamily} returns C{AF_INET}.
"""
self.assertEqual(AF_INET, getSocketFamily(self._socket(AF_INET)))
def test_inet6(self):
"""
When passed the file descriptor of a socket created with the
C{AF_INET6} address family, L{getSocketFamily} returns C{AF_INET6}.
"""
self.assertEqual(AF_INET6, getSocketFamily(self._socket(AF_INET6)))
@skipIf(nonUNIXSkip, "Platform does not support AF_UNIX sockets")
def test_unix(self):
"""
When passed the file descriptor of a socket created with the C{AF_UNIX}
address family, L{getSocketFamily} returns C{AF_UNIX}.
"""
self.assertEqual(AF_UNIX, getSocketFamily(self._socket(AF_UNIX)))
| 30.341176
| 86
| 0.623691
|
f94508f58d5279d62b6ad98792f48a0b250b569c
| 9,517
|
py
|
Python
|
openstates/models/tests/test_models.py
|
openstates/openstates-core
|
5590e1fa5de7794325ca1666bac1015f0ce7b102
|
[
"MIT"
] | 9
|
2020-04-04T00:19:07.000Z
|
2022-02-27T02:24:12.000Z
|
openstates/models/tests/test_models.py
|
openstates/openstates-core
|
5590e1fa5de7794325ca1666bac1015f0ce7b102
|
[
"MIT"
] | 17
|
2020-03-31T18:19:59.000Z
|
2022-01-03T15:18:48.000Z
|
openstates/models/tests/test_models.py
|
openstates/openstates-core
|
5590e1fa5de7794325ca1666bac1015f0ce7b102
|
[
"MIT"
] | 19
|
2020-04-10T21:32:21.000Z
|
2022-03-02T20:23:21.000Z
|
import pytest # type: ignore
import datetime
from pydantic import ValidationError
from openstates.models.common import (
validate_fuzzy_date,
validate_ocd_jurisdiction,
validate_ocd_person,
validate_str_no_newline,
validate_url,
Link,
OtherName,
OtherIdentifier,
)
from openstates.models.people import (
Person,
Party,
RoleType,
OfficeType,
Office,
PersonIdBlock,
Role,
)
from openstates.models.committees import (
Membership,
ScrapeCommittee,
Committee,
)
VALID_PERSON_ID = "ocd-person/abcdef98-0123-7777-8888-1234567890ab"
VALID_ORG_ID = "ocd-organization/abcdef98-0123-7777-8888-1234567890ab"
VALID_JURISDICTION_ID = "ocd-jurisdiction/country:us/state:nc/government"
@pytest.mark.parametrize(
"validator,val,valid",
[
(validate_fuzzy_date, "2020", True),
(validate_fuzzy_date, "2020-01", True),
(validate_fuzzy_date, "2020-01-22", True),
(validate_fuzzy_date, datetime.date(2020, 1, 22), True),
(validate_fuzzy_date, "2020-1-22", False),
(validate_fuzzy_date, "2020/1/22", False),
(validate_fuzzy_date, "x", False),
(validate_ocd_person, VALID_PERSON_ID, True),
(validate_ocd_person, "abcdef98-0123-7777-8888-1234567890ab", False),
(validate_ocd_person, "ocd-person/abcdef980123777788881234567890ab", False),
(
validate_ocd_jurisdiction,
"ocd-jurisdiction/country:us/state:nc/government",
True,
),
(validate_ocd_jurisdiction, "ocd-jurisdiction/country:us/state:nc", False),
(
validate_ocd_jurisdiction,
"ocd-jurisdiction/country:us/state:xy/government",
False,
),
(
validate_ocd_jurisdiction,
"ocd-jurisdiction/country:us/state:nc/county:wake",
False,
),
(validate_str_no_newline, "long string with no breaks", True),
(validate_str_no_newline, "multi\nline", False),
(validate_url, "http://example.com", True),
(validate_url, "https://example.com", True),
(validate_url, "example.com", False),
],
)
def test_common_validators(validator, val, valid):
if valid:
assert validator(val) == val
else:
with pytest.raises(ValueError):
validator(val)
def test_link():
good = Link(url="https://example.com", note="simple note")
assert good.url and good.note
with pytest.raises(ValidationError):
Link(url="bad-url")
with pytest.raises(ValidationError):
Link(url="https://good.url", note="no \n newlines!")
with pytest.raises(ValidationError):
Link(note="missing URL!")
def test_other_name():
good = OtherName(name="fine", start_date="2021")
assert good.name
with pytest.raises(ValidationError):
OtherName(name="newline \n not allowed!")
with pytest.raises(ValidationError):
OtherName(name="bad date", start_date="2")
with pytest.raises(ValidationError):
OtherName(name="bad date", end_date="2")
with pytest.raises(ValidationError):
OtherName(start_date="2021")
def test_other_ids():
good = OtherIdentifier(identifier="fine", scheme="openstates", start_date="2021")
assert good.identifier
with pytest.raises(ValidationError):
OtherIdentifier(identifier="newline \n not allowed!", scheme="openstates")
with pytest.raises(ValidationError):
OtherIdentifier(identifier="no scheme")
with pytest.raises(ValidationError):
OtherIdentifier(identifier="bad date", scheme="openstates", start_date="x")
with pytest.raises(ValidationError):
OtherIdentifier(identifier="bad date", scheme="openstates", end_date="x")
def test_person_basics():
with pytest.raises(ValidationError):
Person(name="missing fields")
good = Person(
id="ocd-person/11111111-2222-3333-4444-555555555555",
name="Joan Jones",
party=[Party(name="Democratic")],
roles=[],
)
assert good.name
with pytest.raises(ValidationError):
good.death_date = "X"
with pytest.raises(ValidationError):
good.birth_date = "X"
with pytest.raises(ValidationError):
good.birth_date = "X"
with pytest.raises(ValidationError):
good.id = "123"
with pytest.raises(ValidationError):
good.image = "/fragment"
def test_person_commas():
with pytest.raises(ValidationError):
Person(
id="ocd-person/11111111-2222-3333-4444-555555555555",
name="Jones, Joan",
party=[Party(name="Democratic")],
roles=[],
)
good_comma = Person(
id="ocd-person/11111111-2222-3333-4444-555555555555",
name="Joan Jones, Jr.",
party=[Party(name="Democratic")],
roles=[],
)
assert good_comma.name
def test_party_cls():
party = Party(name="Democratic")
assert party.name
with pytest.raises(ValidationError):
party.end_date = "x"
def test_office():
# need at least one type
with pytest.raises(ValidationError):
Office(classification=OfficeType.DISTRICT)
cd = Office(classification=OfficeType.DISTRICT, address="123 Boogie Woogie Ave")
# no newline
with pytest.raises(ValidationError):
cd.address = "123 Boogie Woogie Avenue\nSpringfield, MA"
# phone number regex
with pytest.raises(ValidationError):
cd.voice = "911"
with pytest.raises(ValidationError):
cd.fax = "911"
cd.fax = "919-555-1234"
cd.voice = "1-123-555-6666 ext. 3333"
# no such field
with pytest.raises(ValueError):
cd.phone = "911"
def test_person_id_block():
assert PersonIdBlock(twitter="realFoolish")
with pytest.raises(ValidationError):
PersonIdBlock(twitter="@realFoolish")
with pytest.raises(ValidationError):
PersonIdBlock(youtube="https://youtube.com/test")
def test_role_basics():
with pytest.raises(ValidationError):
Role(type=RoleType.UPPER, jurisdiction="us")
with pytest.raises(ValidationError):
Role(
type=RoleType.UPPER,
jurisdiction=VALID_JURISDICTION_ID,
end_reason="stuff\nhere",
)
def test_role_conditional_requires():
assert Role(
type=RoleType.UPPER,
district=4,
end_date="2010",
jurisdiction=VALID_JURISDICTION_ID,
)
assert Role(
type=RoleType.GOVERNOR,
start_date="2010",
end_date="2016",
jurisdiction=VALID_JURISDICTION_ID,
)
with pytest.raises(ValidationError):
assert Role(
type=RoleType.UPPER, end_date="2010", jurisdiction=VALID_JURISDICTION_ID
)
with pytest.raises(ValidationError):
assert Role(
type=RoleType.GOVERNOR,
start_date="2010",
jurisdiction=VALID_JURISDICTION_ID,
)
def test_party_on_person():
p = Person(
id=VALID_PERSON_ID,
name="Tony Tigre",
party=[Party(name="Democratic")],
roles=[],
)
with pytest.raises(ValidationError):
# no such party
p.party = [Party(name="Vampire")]
def test_party_required_on_legislator():
p = Person(
id=VALID_PERSON_ID,
name="Tony Tigre",
party=[Party(name="Democratic")],
roles=[
Role(type=RoleType.UPPER, district=1, jurisdiction=VALID_JURISDICTION_ID)
],
)
with pytest.raises(ValidationError):
# no party!
p.party = []
def test_multiple_parties():
p = Person(
id=VALID_PERSON_ID,
name="Tony Tigre",
party=[Party(name="Democratic")],
roles=[],
)
with pytest.raises(ValidationError):
# can't have two active major parties
p.party = [Party(name="Democratic"), Party(name="Republican")]
# can be in multiple parties as long as one is non-major
p.party = [Party(name="Democratic"), Party(name="Green")]
# or if one is obsolete
p.party = [Party(name="Democratic", end_date="2010"), Party(name="Republican")]
def test_committee_membership():
assert Membership(name="Franz Ferdinand", role="member")
assert Membership(name="Franz Ferdinand", role="member", person_id=VALID_PERSON_ID)
with pytest.raises(ValidationError):
Membership(name="No Role", person_id=VALID_PERSON_ID)
with pytest.raises(ValidationError):
Membership(name="Bad ID", role="chair", person_id="123")
def test_scrapecommittee():
assert ScrapeCommittee(name="Health", chamber="upper")
with pytest.raises(ValidationError):
ScrapeCommittee(name="Health \n Roads", chamber="upper")
def test_committee():
assert Committee(
name="Health",
chamber="upper",
id=VALID_ORG_ID,
jurisdiction=VALID_JURISDICTION_ID,
)
with pytest.raises(ValidationError):
Committee(
name="Health", chamber="upper", id="123", jurisdiction=VALID_JURISDICTION_ID
)
with pytest.raises(ValidationError):
Committee(
name="Health", chamber="upper", id=VALID_ORG_ID, jurisdiction="canada"
)
def test_committee_dict_order():
c = Committee(
name="Health",
chamber="upper",
id=VALID_ORG_ID,
jurisdiction=VALID_JURISDICTION_ID,
)
assert list(c.to_dict().keys())[:4] == [
"id",
"jurisdiction",
"classification",
"name",
]
| 30.022082
| 88
| 0.641168
|
f551486beaf40cb106a3d431658a394b4631732b
| 203
|
py
|
Python
|
src/icotools/icosoc/mod_pmodssd/mod_pmodssd.py
|
dm7h/fpga-event-recorder
|
4e53babbbb514ee375f4b5585b1d24e5b40f8df7
|
[
"0BSD"
] | null | null | null |
src/icotools/icosoc/mod_pmodssd/mod_pmodssd.py
|
dm7h/fpga-event-recorder
|
4e53babbbb514ee375f4b5585b1d24e5b40f8df7
|
[
"0BSD"
] | null | null | null |
src/icotools/icosoc/mod_pmodssd/mod_pmodssd.py
|
dm7h/fpga-event-recorder
|
4e53babbbb514ee375f4b5585b1d24e5b40f8df7
|
[
"0BSD"
] | null | null | null |
def generate_c_code(icosoc_h, icosoc_c, mod):
icosoc_h.append("""
static inline void icosoc_%s_set(uint8_t x)
{
*(uint32_t*)(0x20000000 + %s * 0x10000) = x;
}
""" % (mod["name"], mod["addr"]))
| 20.3
| 48
| 0.630542
|
df72a83158a4b5c6bbe449a19505acaeeb7f77bd
| 3,563
|
py
|
Python
|
chatsite/settings.py
|
Twicefan98/django-chatting-app
|
d77aad66f3a401384bb766dd6fb5d2246e6712a1
|
[
"BSD-3-Clause"
] | null | null | null |
chatsite/settings.py
|
Twicefan98/django-chatting-app
|
d77aad66f3a401384bb766dd6fb5d2246e6712a1
|
[
"BSD-3-Clause"
] | 1
|
2022-02-21T15:16:03.000Z
|
2022-02-21T15:16:03.000Z
|
chatsite/settings.py
|
Twicefan98/django-chatting-app
|
d77aad66f3a401384bb766dd6fb5d2246e6712a1
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Django settings for chatsite project.
Generated by 'django-admin startproject' using Django 3.2.10.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-u$x9s17*))qcr!nbd#o6iq77hd_6l7peafw5t^(ej__v=@ubat'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'chat',
'channels'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chatsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chatsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Set Auth User Model
AUTH_USER_MODEL = 'chat.User'
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Channels
ASGI_APPLICATION = 'chatsite.asgi.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
| 25.45
| 91
| 0.691271
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.