hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
84f2947f375499147d95a18c35c81380c7dc0bb9
| 11,963
|
py
|
Python
|
radionets/dl_framework/architectures/res_exp.py
|
Kevin2/radionets
|
44e10a85a096f5cea8e9d83f96db65bdd4df9517
|
[
"MIT"
] | null | null | null |
radionets/dl_framework/architectures/res_exp.py
|
Kevin2/radionets
|
44e10a85a096f5cea8e9d83f96db65bdd4df9517
|
[
"MIT"
] | 16
|
2019-10-09T12:30:27.000Z
|
2020-12-09T14:03:03.000Z
|
radionets/dl_framework/architectures/res_exp.py
|
Kevin2/radionets
|
44e10a85a096f5cea8e9d83f96db65bdd4df9517
|
[
"MIT"
] | 3
|
2020-01-08T09:01:09.000Z
|
2020-10-19T18:53:13.000Z
|
import torch
from torch import nn
from radionets.dl_framework.model import (
SRBlock,
Lambda,
symmetry,
GeneralELU,
)
from functools import partial
from math import pi
class SRResNet_shuffle(nn.Module):
def __init__(self):
super().__init__()
self.preBlock = nn.Sequential(
nn.Conv2d(2, 64, 9, stride=1, padding=4, groups=2), nn.PReLU()
)
# ResBlock 14
self.blocks = nn.Sequential(
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
)
self.postBlock = nn.Sequential(
nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64)
)
self.shuffle = nn.Sequential(
nn.Conv2d(64, 252, 3, stride=1, padding=1, bias=True),
nn.PixelShuffle(3),
nn.PReLU(),
)
self.final = nn.Sequential(
nn.Conv2d(28, 2, 9, stride=1, padding=4, groups=2),
)
self.symmetry_amp = Lambda(partial(symmetry, mode="real"))
self.symmetry_imag = Lambda(partial(symmetry, mode="imag"))
self.hardtanh = nn.Hardtanh(-pi, pi)
def forward(self, x):
x = self.preBlock(x)
x = x + self.postBlock(self.blocks(x))
x = self.shuffle(x)
x = self.final(x)
s = x.shape[-1]
x0 = self.symmetry_amp(x[:, 0]).reshape(-1, 1, s, s)
x1 = self.symmetry_imag(x[:, 1]).reshape(-1, 1, s, s)
x1 = self.hardtanh(x1)
return torch.cat([x0, x1], dim=1)
class SRResNet_bigger(nn.Module):
def __init__(self):
super().__init__()
self.preBlock = nn.Sequential(
nn.Conv2d(2, 64, 9, stride=1, padding=4, groups=2), nn.PReLU()
)
# ResBlock 8
self.blocks = nn.Sequential(
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
)
self.postBlock = nn.Sequential(
nn.Conv2d(64, 64, 3, stride=1, padding=1), nn.BatchNorm2d(64)
)
self.final = nn.Sequential(
nn.Conv2d(64, 2, 9, stride=1, padding=4, groups=2),
)
self.symmetry_amp = Lambda(partial(symmetry, mode="real"))
self.symmetry_imag = Lambda(partial(symmetry, mode="imag"))
self.hardtanh = nn.Hardtanh(-pi, pi)
def forward(self, x):
s = x.shape[-1]
x = self.preBlock(x)
x = x + self.postBlock(self.blocks(x))
x = self.final(x)
x0 = self.symmetry_amp(x[:, 0]).reshape(-1, 1, s, s)
x1 = self.hardtanh(x[:, 1]).reshape(-1, 1, s, s)
x1 = self.symmetry_imag(x1).reshape(-1, 1, s, s)
return torch.cat([x0, x1], dim=1)
class SRResNet_bigger_16(nn.Module):
def __init__(self):
super().__init__()
self.preBlock = nn.Sequential(
nn.Conv2d(2, 64, 9, stride=1, padding=4, groups=2), nn.PReLU()
)
# ResBlock 16
self.blocks = nn.Sequential(
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
)
self.postBlock = nn.Sequential(
nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64)
)
self.final = nn.Sequential(nn.Conv2d(64, 2, 9, stride=1, padding=4, groups=2),)
self.symmetry_amp = Lambda(partial(symmetry, mode="real"))
self.symmetry_imag = Lambda(partial(symmetry, mode="imag"))
def forward(self, x):
s = x.shape[-1]
x = self.preBlock(x)
x = x + self.postBlock(self.blocks(x))
x = self.final(x)
x0 = self.symmetry_amp(x[:, 0]).reshape(-1, 1, s, s)
x1 = self.symmetry_imag(x[:, 1]).reshape(-1, 1, s, s)
return torch.cat([x0, x1], dim=1)
class SRResNet_amp(nn.Module):
def __init__(self):
super().__init__()
self.preBlock = nn.Sequential(
nn.Conv2d(2, 64, 9, stride=1, padding=4, groups=2), nn.PReLU()
)
# ResBlock 16
self.blocks = nn.Sequential(
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
)
self.postBlock = nn.Sequential(
nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64)
)
self.final = nn.Sequential(
nn.Conv2d(64, 1, 9, stride=1, padding=4, groups=1),
)
self.symmetry_amp = Lambda(partial(symmetry, mode="real"))
def forward(self, x):
s = x.shape[-1]
x = self.preBlock(x)
x = x + self.postBlock(self.blocks(x))
x = self.final(x)
x = self.symmetry_amp(x).reshape(-1, 1, s, s)
return x
class SRResNet_phase(nn.Module):
def __init__(self):
super().__init__()
self.preBlock = nn.Sequential(
nn.Conv2d(2, 64, 9, stride=1, padding=4, groups=2), nn.PReLU()
)
# ResBlock 16
self.blocks = nn.Sequential(
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
SRBlock(64, 64),
)
self.postBlock = nn.Sequential(
nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64)
)
self.final = nn.Sequential(
nn.Conv2d(64, 1, 9, stride=1, padding=4, groups=1),
)
self.symmetry_imag = Lambda(partial(symmetry, mode="imag"))
self.hardtanh = nn.Hardtanh(-pi, pi)
def forward(self, x):
s = x.shape[-1]
x = self.preBlock(x)
x = x + self.postBlock(self.blocks(x))
x = self.final(x)
x = self.hardtanh(x).reshape(-1, 1, s, s)
x = self.symmetry_imag(x).reshape(-1, 1, s, s)
return x
class SRResNet_unc(nn.Module):
def __init__(self):
super().__init__()
n_channel = 64
self.preBlock = nn.Sequential(
nn.Conv2d(2, n_channel, 9, stride=1, padding=4, groups=2), nn.PReLU()
)
# ResBlock 8
self.blocks = nn.Sequential(
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
)
self.postBlock = nn.Sequential(
nn.Conv2d(n_channel, n_channel, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(n_channel),
)
self.final = nn.Sequential(
nn.Conv2d(n_channel, 4, 9, stride=1, padding=4, groups=2),
)
self.symmetry_amp = Lambda(partial(symmetry, mode="real"))
self.symmetry_imag = Lambda(partial(symmetry, mode="imag"))
self.elu = GeneralELU(add=+(1 + 1e-10))
def forward(self, x):
s = x.shape[-1]
x = self.preBlock(x)
x = x + self.postBlock(self.blocks(x))
x = self.final(x)
x0 = self.symmetry_amp(x[:, 0]).reshape(-1, 1, s, s)
x0_unc = self.symmetry_amp(x[:, 1]).reshape(-1, 1, s, s)
x0_unc = self.elu(x0_unc)
x1 = self.symmetry_imag(x[:, 2]).reshape(-1, 1, s, s)
x1_unc = self.symmetry_amp(x[:, 3]).reshape(-1, 1, s, s)
x1_unc = self.elu(x1_unc)
return torch.cat([x0, x0_unc, x1, x1_unc], dim=1)
class SRResNet_unc_amp(nn.Module):
def __init__(self):
super().__init__()
n_channel = 56
self.preBlock = nn.Sequential(
nn.Conv2d(1, n_channel, 9, stride=1, padding=4, groups=1), nn.PReLU()
)
# ResBlock 8
self.blocks = nn.Sequential(
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
)
self.postBlock = nn.Sequential(
nn.Conv2d(n_channel, n_channel, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(n_channel),
)
self.final = nn.Sequential(
nn.Conv2d(n_channel, 2, 9, stride=1, padding=4, groups=1),
)
self.symmetry_amp = Lambda(partial(symmetry, mode="real"))
self.symmetry_imag = Lambda(partial(symmetry, mode="imag"))
self.elu = GeneralELU(add=+(1 + 1e-5))
def forward(self, x):
s = x.shape[-1]
x = self.preBlock(x[:, 0].unsqueeze(1))
x = x + self.postBlock(self.blocks(x))
x = self.final(x)
x0 = self.symmetry_amp(x[:, 0]).reshape(-1, 1, s, s)
x0_unc = self.symmetry_amp(x[:, 1]).reshape(-1, 1, s, s)
x0_unc = self.elu(x0_unc)
return torch.cat([x0, x0_unc], dim=1)
class SRResNet_unc_phase(nn.Module):
def __init__(self):
super().__init__()
n_channel = 56
self.preBlock = nn.Sequential(
nn.Conv2d(1, n_channel, 9, stride=1, padding=4, groups=1), nn.PReLU()
)
# ResBlock 8
self.blocks = nn.Sequential(
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
SRBlock(n_channel, n_channel),
)
self.postBlock = nn.Sequential(
nn.Conv2d(n_channel, n_channel, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(n_channel),
)
self.final = nn.Sequential(
nn.Conv2d(n_channel, 2, 9, stride=1, padding=4, groups=1),
)
self.symmetry_amp = Lambda(partial(symmetry, mode="real"))
self.symmetry_imag = Lambda(partial(symmetry, mode="imag"))
self.elu = GeneralELU(add=+(1 + 1e-10))
def forward(self, x):
s = x.shape[-1]
x = self.preBlock(x[:, 1].unsqueeze(1))
x = x + self.postBlock(self.blocks(x))
x = self.final(x)
x0 = self.symmetry_imag(x[:, 0]).reshape(-1, 1, s, s)
x0_unc = self.symmetry_amp(x[:, 1]).reshape(-1, 1, s, s)
x0_unc = self.elu(x0_unc)
return torch.cat([x0, x0_unc], dim=1)
| 27.438073
| 87
| 0.52119
|
2cf7405709b632550c026effeb77b6d7168d4731
| 2,116
|
py
|
Python
|
Variational/sbm_run.py
|
vveitch/community_detection
|
6342a6c1aa9424f0a73b6027d971a0c646980f0c
|
[
"MIT"
] | 4
|
2018-01-24T08:11:21.000Z
|
2022-03-27T12:21:32.000Z
|
Variational/sbm_run.py
|
vveitch/community_detection
|
6342a6c1aa9424f0a73b6027d971a0c646980f0c
|
[
"MIT"
] | 1
|
2020-07-07T06:32:30.000Z
|
2020-07-07T06:32:30.000Z
|
Variational/sbm_run.py
|
vveitch/community_detection
|
6342a6c1aa9424f0a73b6027d971a0c646980f0c
|
[
"MIT"
] | 2
|
2019-02-16T12:16:13.000Z
|
2021-05-01T02:09:35.000Z
|
import edward as ed
import tensorflow as tf
from edward.models import Dirichlet, Categorical, Gamma
# debugging
import sys
sys.path.append("/home/victor/Documents/community_detection/Variational")
from sbm import SBM
# SBM parameters
n_vert = 100
n_comm = 3
# fake a dataset
# sort the ground truth community identities to make it easy to parse them
z_gt = tf.Variable(tf.nn.top_k(Categorical(p=tf.ones([n_vert, n_comm])/n_comm).sample(),k=n_vert).values)
eta_gt = tf.Variable(Gamma(tf.ones([n_comm, n_comm]), tf.ones([n_comm, n_comm])).sample())
g=SBM(zs = z_gt, eta = eta_gt, n_comm = n_comm)
data = SBM(zs = z_gt, eta = eta_gt, n_comm = n_comm).sample()
with tf.Session() as sess:
init = tf.global_variables_initializer()
init.run()
dataset = data.eval()
z_gt = z_gt.eval()
eta_gt = eta_gt.eval()
# Model
# higher level parameters
# alpha = tf.Variable(3.0,dtype=tf.float32)
# lam = tf.Variable(1,dtype=tf.float32)
# kap = tf.Variable(1,dtype=tf.float32)
# communities
# pi = Dirichlet(alpha=alpha*tf.ones([n_comm]))
# z = Categorical(p=tf.ones([n_vert, n_comm]) * pi)
z = Categorical(p=tf.ones([n_vert, n_comm]) / 3. ) # z.sample().eval()
# comm-comm weights
# eta = Gamma(lam*tf.ones([n_comm, n_comm]), kap*tf.ones([n_comm, n_comm]))
eta = Gamma(tf.ones([n_comm, n_comm]), tf.ones([n_comm, n_comm]))
g = SBM(zs=z,eta=eta,n_comm=n_comm)
# Variational posterior
# qpi = Dirichlet( alpha = tf.Variable(tf.ones([n_comm])) )
qz = Categorical( tf.Variable(tf.ones([n_vert, n_comm])))
qeta = Gamma(tf.Variable(tf.ones([n_comm, n_comm])), tf.Variable(tf.ones([n_comm, n_comm])))
with tf.Session() as sess:
init = tf.global_variables_initializer()
init.run()
print g.log_prob(dataset).eval()
# Inference
inference = ed.KLqp({z: qz}, data={g: dataset, eta: eta_gt})
inference.initialize(n_samples=20, n_iter=10000)
tf.global_variables_initializer().run()
for _ in range(inference.n_iter):
info_dict = inference.update()
inference.print_progress(info_dict)
# sess = tf.InteractiveSession()
# init = tf.global_variables_initializer()
# init.run()
# dataset.eval()
| 28.594595
| 105
| 0.705104
|
3c7c9cc63d8ba1036e902dac09c2689a26552d75
| 294
|
py
|
Python
|
ch_3/hw_func.py
|
ProhardONE/python_primer
|
211e37c1f2fd169269fc4f3c08e8b7e5225f2ad0
|
[
"MIT"
] | 51
|
2016-04-05T16:56:11.000Z
|
2022-02-08T00:08:47.000Z
|
ch_3/hw_func.py
|
zhangxiao921207/python_primer
|
211e37c1f2fd169269fc4f3c08e8b7e5225f2ad0
|
[
"MIT"
] | null | null | null |
ch_3/hw_func.py
|
zhangxiao921207/python_primer
|
211e37c1f2fd169269fc4f3c08e8b7e5225f2ad0
|
[
"MIT"
] | 47
|
2016-05-02T07:51:37.000Z
|
2022-02-08T01:28:15.000Z
|
# Exercise 3.12
# Author: Noah Waterfield Price
def hw1():
return 'Hello, World!'
def hw2():
print 'Hello, World!'
def hw3(s1, s2):
print s1 + ', ' + s2
print hw1()
hw2()
hw3('Hello', 'World!')
"""
Sample run:
python hw_func.py
Hello, World!
Hello, World!
Hello, World!
"""
| 10.888889
| 31
| 0.598639
|
d60f48f78d0d95d14ee3469127e37a48dff2e1bc
| 7,160
|
py
|
Python
|
test/programytest/services/test_pannous.py
|
ItsPhant/program-y
|
c2b211fcaf8cedc7d6d95a8ea9470a913efa1622
|
[
"MIT"
] | null | null | null |
test/programytest/services/test_pannous.py
|
ItsPhant/program-y
|
c2b211fcaf8cedc7d6d95a8ea9470a913efa1622
|
[
"MIT"
] | null | null | null |
test/programytest/services/test_pannous.py
|
ItsPhant/program-y
|
c2b211fcaf8cedc7d6d95a8ea9470a913efa1622
|
[
"MIT"
] | 1
|
2020-02-21T17:58:05.000Z
|
2020-02-21T17:58:05.000Z
|
import unittest
import os
import json
from programy.utils.license.keys import LicenseKeys
from programy.services.pannous import PannousService, PannousAPI
from programy.services.service import BrainServiceConfiguration
from programytest.services.mock_requests import MockRequestsAPI
class PannousAPITests(unittest.TestCase):
def test_ask_question_valid_json(self):
request_api = MockRequestsAPI()
pannous_api = PannousAPI(request_api=request_api)
request_api._response = json.loads("""
{
"output": [
{ "actions": { "say": {"text": "Hello"} } }
]
}
""")
response = pannous_api.ask_question("http://testurl", "Hello", "testid")
self.assertEquals(response, "Hello")
def test_ask_question_no_response(self):
with self.assertRaises(Exception) as raised:
request_api = MockRequestsAPI(response=None)
pannous_api = PannousAPI(request_api=request_api)
response = pannous_api.ask_question("http://testurl", "Hello", "testid")
self.assertEqual(raised.exception.args[0], "No response from pannous service")
def test_ask_question_missing_text(self):
with self.assertRaises(Exception) as raised:
request_api = MockRequestsAPI()
pannous_api = PannousAPI(request_api=request_api)
request_api._response = json.loads("""
{
"output": [
{ "actions": { "say": {"response": "Hello"} } }
]
}
""")
response = pannous_api.ask_question("http://testurl", "Hello", "testid")
self.assertEqual(raised.exception.args[0], "'text' section missing from output[0]['actions']['say'] in pannous json_data")
def test_ask_question_missing_say(self):
with self.assertRaises(Exception) as raised:
request_api = MockRequestsAPI()
pannous_api = PannousAPI(request_api=request_api)
request_api._response = json.loads("""
{
"output": [
{ "actions": { "said": {"response": "Hello"} } }
]
}
""")
response = pannous_api.ask_question("http://testurl", "Hello", "testid")
self.assertEqual(raised.exception.args[0], "'say' section missing from output[0]['actions'] in pannous json_data")
def test_ask_question_missing_actions(self):
with self.assertRaises(Exception) as raised:
request_api = MockRequestsAPI()
pannous_api = PannousAPI(request_api=request_api)
request_api._response = json.loads("""
{
"output": [
{ "items": { "say": {"response": "Hello"} } }
]
}
""")
response = pannous_api.ask_question("http://testurl", "Hello", "testid")
self.assertEqual(raised.exception.args[0], "'actions' section in output[0] in pannous json_data")
def test_ask_question_empty_output(self):
with self.assertRaises(Exception) as raised:
request_api = MockRequestsAPI()
pannous_api = PannousAPI(request_api=request_api)
request_api._response = json.loads("""
{
"output": []
}
""")
response = pannous_api.ask_question("http://testurl", "Hello", "testid")
self.assertEqual(raised.exception.args[0], "'output' section has no elements in pannous json_data")
with self.assertRaises(Exception) as raised:
request_api = MockRequestsAPI()
pannous_api = PannousAPI(request_api=request_api)
request_api._response = json.loads("""
{
"output": null
}
""")
response = pannous_api.ask_question("http://testurl", "Hello", "testid")
self.assertEqual(raised.exception.args[0], "'output' section has no elements in pannous json_data")
def test_ask_question_missing_output(self):
with self.assertRaises(Exception) as raised:
request_api = MockRequestsAPI()
pannous_api = PannousAPI(request_api=request_api)
request_api._response = json.loads("""
{
"result": [
{ "items": { "say": {"response": "Hello"} } }
]
}
""")
response = pannous_api.ask_question("http://testurl", "Hello", "testid")
self.assertEqual(raised.exception.args[0], "'output' section missing from pannous json_data")
class TestBot:
def __init__(self):
self.license_keys = None
class MockPannousAPI(object):
def __init__(self, response=None, throw_exception=False):
self._throw_exception = throw_exception
self._response = response
def ask_question(self, url, question, login):
if self._throw_exception is True:
raise Exception(self._response)
else:
return self._response
class PannousServiceTests(unittest.TestCase):
def setUp(self):
self.bot = TestBot()
self.bot.license_keys = LicenseKeys()
self.bot.license_keys.load_license_key_file(os.path.dirname(__file__)+ os.sep + "test.keys")
def test_ask_question(self):
config = BrainServiceConfiguration("pannous")
config._url = "http://test.pandora.url"
service = PannousService(config=config, api=MockPannousAPI(response="Test pannous response"))
self.assertIsNotNone(service)
response = service.ask_question(self.bot, "testid", "what is a cat")
self.assertEquals("Test pannous response", response)
def test_ask_question_no_url(self):
config = BrainServiceConfiguration("pannous")
with self.assertRaises(Exception) as raised:
service = PannousService(config=config, api=MockPannousAPI(response="Test pannous response"))
self.assertIsNotNone(service)
response = service.ask_question(self.bot, "testid", "what is a cat")
self.assertEquals("", response)
self.assertEqual(raised.exception.args[0], "Undefined url parameter")
def test_ask_question_no_license_key(self):
self.bot = TestBot()
self.bot.license_keys = LicenseKeys()
config = BrainServiceConfiguration("pannous")
config._url = "http://test.pandora.url"
service = PannousService(config=config, api=MockPannousAPI(response="Test pannous response"))
self.assertIsNotNone(service)
response = service.ask_question(self.bot, "testid", "what is a cat")
self.assertEquals("", response)
def test_ask_question_with_exception(self):
config = BrainServiceConfiguration("pannous")
config._url = "http://test.pandora.url"
service = PannousService(config=config, api=MockPannousAPI(response="Some wierd error", throw_exception=True))
self.assertIsNotNone(service)
response = service.ask_question(self.bot, "testid", "what is a cat")
self.assertEquals("", response)
| 37.098446
| 130
| 0.619553
|
0f197fdbfc77e5863d08383d327748f485b4ba2b
| 69
|
py
|
Python
|
Testcases/testcase_1.py
|
sanatb97/Python-Compiler
|
5228a36c9a881e21f9a6419315dd2f52eb2d5165
|
[
"MIT"
] | 2
|
2021-10-01T22:11:46.000Z
|
2021-10-02T19:44:57.000Z
|
Testcases/testcase_1.py
|
vogiralshivani/Python-Compiler
|
5228a36c9a881e21f9a6419315dd2f52eb2d5165
|
[
"MIT"
] | null | null | null |
Testcases/testcase_1.py
|
vogiralshivani/Python-Compiler
|
5228a36c9a881e21f9a6419315dd2f52eb2d5165
|
[
"MIT"
] | 3
|
2019-02-04T03:49:08.000Z
|
2019-10-16T13:15:13.000Z
|
x=1
s=2
if(x==1):
y=4
z=3
if(s==2):
z=1;
else:
print("hi");
| 6.9
| 13
| 0.42029
|
3a958e23807415a8b141b31130337770daa90a75
| 2,578
|
py
|
Python
|
src/options.py
|
Arkazix/PyAutoDock
|
cbc3ffbc2c40c4fa881b663c4d9a81f08ad388f3
|
[
"MIT"
] | null | null | null |
src/options.py
|
Arkazix/PyAutoDock
|
cbc3ffbc2c40c4fa881b663c4d9a81f08ad388f3
|
[
"MIT"
] | null | null | null |
src/options.py
|
Arkazix/PyAutoDock
|
cbc3ffbc2c40c4fa881b663c4d9a81f08ad388f3
|
[
"MIT"
] | null | null | null |
from typing import List
from os import path
ERROR_ARGUMENT = 1
SUCCESS = 0
class Options:
def __init__(self, argv: List[str]) -> None:
self.argv = argv
self.argc = len(argv)
self.options = ".md"
self.path: str
# COMMANDES
def process_argument(self) -> bool:
"""
Process command line argument and return 0
if success a positive value else.
"""
for arg in self.argv[1:]:
if self.is_option(arg):
self.options = "." + arg[1:]
elif self.is_file(arg) or self.is_dir(arg):
self.path = arg
else:
self.__error_argument(arg)
return ERROR_ARGUMENT
return SUCCESS
# REQUETES
def get_path(self) -> str:
"""Return the path of the file or dir."""
return self.path
def get_option(self) -> str:
"""Return the current file format for the documentation."""
return self.options
def get_argc(self) -> int:
"""Return the number of argument on the command line."""
return self.argc
def is_in_argv(self, s: str) -> bool:
"""Return if s is in command line arguments."""
for arg in self.argv:
if arg == s:
return True
return False
def is_option(self, s: str) -> bool:
"""Return if s is a valid option."""
opts = ["-html", "-pdf", "-md"]
return s in opts
def is_file(self, s: str) -> bool:
"""Return if s is a valid file."""
return path.isfile(s)
def is_dir(self, s: str) -> bool:
"""Return if s is a valid dir."""
return path.isdir(s)
# OUTPUT
@staticmethod
def unexpected_argument() -> None:
"""Print unexpected information."""
print("pad: At least one path is expected.")
print("Try 'pad --help' for more information.")
@staticmethod
def help__() -> None:
"""Print help option."""
opt_desc = {
"-html": "Convert output to a html file",
"-pdf": "Convert output to a pdf file.",
"-md": "Convert output to a markdown file."
}
print("Usage: pad [OPTION]... PATH\n")
print("Make a documentation by default a markdown"
" of a python file or directory.\n")
print("Option:")
for name, desc in opt_desc.items():
print("\t" + name + "\t" + desc)
@staticmethod
def __error_argument(s: str) -> None:
print(f"pad: Argument error '{s}'")
| 28.32967
| 67
| 0.53879
|
61828c0b86f44efff27eba4f661a265ae6356a55
| 680
|
py
|
Python
|
puma/primitives/high_precision_auto_reset_event.py
|
gift-surg/puma
|
58beae3459a0c8d96adfe9af323e26868428df4d
|
[
"Apache-2.0"
] | null | null | null |
puma/primitives/high_precision_auto_reset_event.py
|
gift-surg/puma
|
58beae3459a0c8d96adfe9af323e26868428df4d
|
[
"Apache-2.0"
] | 13
|
2020-05-04T14:14:58.000Z
|
2020-07-29T16:37:03.000Z
|
puma/primitives/high_precision_auto_reset_event.py
|
gift-surg/puma
|
58beae3459a0c8d96adfe9af323e26868428df4d
|
[
"Apache-2.0"
] | null | null | null |
import sys
from puma.primitives import AutoResetEvent, HighPrecisionCondition
if sys.platform == "win32":
WINDOWS_WAIT_PRECISION = 0.016
WINDOWS_BUSY_INTERVAL = 0.001
# noqa: E303
class HighPrecisionAutoResetEvent(AutoResetEvent): # noqa: E301
# On windows, the timeout on event.wait() tends to wait for intervals that are a multiple of 15 or 16 milliseconds.
# To get better timing performance, we have to use a more busy wait.
def __init__(self) -> None:
super().__init__()
self._cond = HighPrecisionCondition() # replace the condition with our version
else:
HighPrecisionAutoResetEvent = AutoResetEvent
| 35.789474
| 123
| 0.708824
|
2c2b4a1127a0b7b7e7cd77d41d23e907f9a7b1ee
| 552
|
py
|
Python
|
app/core/tests/test_admin.py
|
boploi/recipe_app_api
|
760b3696f27ea56d2db171413d9921404369ca71
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
boploi/recipe_app_api
|
760b3696f27ea56d2db171413d9921404369ca71
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
boploi/recipe_app_api
|
760b3696f27ea56d2db171413d9921404369ca71
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, client
from django.contrib.auth import get_user_model
from django.urls import reverse
"""Import the test the client that will allow to make test requests to
application in unit test"""
class AdminSiteTest(TestCase):
def setUp(self):
"""Create setup function
that run before every test that we run"""
# Create test client
# Add new user, make sure the user is logged into client
"""Create regular user that is not authenticated which can be
list in admin page"""
| 34.5
| 70
| 0.70471
|
58793b231d26c33ba258aef31c6a91bc61cd82f6
| 4,238
|
py
|
Python
|
pdseg/export_serving_model.py
|
windstamp/PaddleSeg
|
828808ea306adf2e8b94c291b77e7b7cf558bc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-01-18T06:37:40.000Z
|
2021-03-11T07:47:47.000Z
|
pdseg/export_serving_model.py
|
windstamp/PaddleSeg
|
828808ea306adf2e8b94c291b77e7b7cf558bc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
pdseg/export_serving_model.py
|
windstamp/PaddleSeg
|
828808ea306adf2e8b94c291b77e7b7cf558bc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-10-30T10:07:21.000Z
|
2021-10-30T10:07:21.000Z
|
# coding: utf8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import pprint
import cv2
import argparse
import numpy as np
import paddle.fluid as fluid
from utils.config import cfg
from models.model_builder import build_model
from models.model_builder import ModelPhase
def parse_args():
parser = argparse.ArgumentParser(
description='PaddleSeg Inference Model Exporter')
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file for training (and optionally testing)',
default=None,
type=str)
parser.add_argument('opts',
help='See utils/config.py for all options',
default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def export_inference_config():
deploy_cfg = '''DEPLOY:
USE_GPU : 1
USE_PR : 0
MODEL_PATH : "%s"
MODEL_FILENAME : "%s"
PARAMS_FILENAME : "%s"
EVAL_CROP_SIZE : %s
MEAN : %s
STD : %s
IMAGE_TYPE : "%s"
NUM_CLASSES : %d
CHANNELS : %d
PRE_PROCESSOR : "SegPreProcessor"
PREDICTOR_MODE : "ANALYSIS"
BATCH_SIZE : 1
''' % (cfg.FREEZE.SAVE_DIR, cfg.FREEZE.MODEL_FILENAME,
cfg.FREEZE.PARAMS_FILENAME, cfg.EVAL_CROP_SIZE, cfg.MEAN, cfg.STD,
cfg.DATASET.IMAGE_TYPE, cfg.DATASET.NUM_CLASSES, len(cfg.STD))
if not os.path.exists(cfg.FREEZE.SAVE_DIR):
os.mkdir(cfg.FREEZE.SAVE_DIR)
yaml_path = os.path.join(cfg.FREEZE.SAVE_DIR, 'deploy.yaml')
with open(yaml_path, "w") as fp:
fp.write(deploy_cfg)
return yaml_path
def export_serving_model(args):
"""
Export PaddlePaddle inference model for prediction depolyment and serving.
"""
print("Exporting serving model...")
startup_prog = fluid.Program()
infer_prog = fluid.Program()
image, logit_out = build_model(infer_prog,
startup_prog,
phase=ModelPhase.PREDICT)
# Use CPU for exporting inference model instead of GPU
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
infer_prog = infer_prog.clone(for_test=True)
if os.path.exists(cfg.TEST.TEST_MODEL):
print('load test model:', cfg.TEST.TEST_MODEL)
try:
fluid.load(infer_prog, os.path.join(cfg.TEST.TEST_MODEL, 'model'),
exe)
except:
fluid.io.load_params(exe,
cfg.TEST.TEST_MODEL,
main_program=infer_prog)
else:
print("TEST.TEST_MODEL diretory is empty!")
exit(-1)
from paddle_serving_client.io import save_model
save_model(
cfg.FREEZE.SAVE_DIR + "/serving_server",
cfg.FREEZE.SAVE_DIR + "/serving_client",
{image.name: image},
{logit_out.name: logit_out},
infer_prog,
)
print("Serving model exported!")
print("Exporting serving model config...")
deploy_cfg_path = export_inference_config()
print("Serving model saved : [%s]" % (deploy_cfg_path))
def main():
args = parse_args()
if args.cfg_file is not None:
cfg.update_from_file(args.cfg_file)
if args.opts:
cfg.update_from_list(args.opts)
cfg.check_and_infer()
print(pprint.pformat(cfg))
export_serving_model(args)
if __name__ == '__main__':
main()
| 31.161765
| 78
| 0.640396
|
a615fc220d545336ea1bb7e3254303b0f79a27d8
| 2,661
|
py
|
Python
|
departure-server-sdl/departure/renderer/sdl/cli.py
|
spujadas/departure-board-servers-python
|
fb43768b4ddc122c860ac1329dc28410d21576c6
|
[
"MIT"
] | 2
|
2020-11-27T15:27:13.000Z
|
2021-10-01T20:20:38.000Z
|
departure-server-sdl/departure/renderer/sdl/cli.py
|
spujadas/departure-board-servers-python
|
fb43768b4ddc122c860ac1329dc28410d21576c6
|
[
"MIT"
] | null | null | null |
departure-server-sdl/departure/renderer/sdl/cli.py
|
spujadas/departure-board-servers-python
|
fb43768b4ddc122c860ac1329dc28410d21576c6
|
[
"MIT"
] | 1
|
2021-11-19T10:38:46.000Z
|
2021-11-19T10:38:46.000Z
|
from concurrent import futures
import threading
import ctypes
import logging
import sdl2
import grpc
import click
import departure.board.animator as animator
import departure.board.board_updater as board_updater
import departure.board.board as board
import departure.board.departure_pb2_grpc as departure_pb2_grpc
from . import renderer
COMMAND = "sdl"
logger = logging.getLogger(__name__)
class BoardManagerServicer(departure_pb2_grpc.BoardManagerServicer):
def __init__(
self,
target_board_updater: board_updater.BoardUpdater_192_32_3_Rows_From_ProtocolBuffers,
):
self.target_board_updater = target_board_updater
def BoardSectionsUpdate(self, request, context):
return self.target_board_updater.update(request)
@click.command(name="sdl")
@click.option("--small", is_flag=True, help="Render a small departure board.")
def run(small=False):
"""SDL2 back end."""
target_board = board.Board(192, 32)
# initialise renderer
if small:
target_renderer = renderer.SdlRendererActualSize()
else:
target_renderer = renderer.SdlRendererLarge()
target_renderer.initialise((192, 32))
board_lock = threading.RLock()
end_event = threading.Event()
# initialise board animator
animator_thread = animator.BoardAnimator(
board=target_board,
renderer=target_renderer,
time_step_in_s=0.05,
board_lock=board_lock,
end_event=end_event,
)
# initialise board updater (also initialises board with 3 rows)
target_board_updater = (
board_updater.BoardUpdater_192_32_3_Rows_From_ProtocolBuffers(
target_board=target_board, board_lock=board_lock
)
)
# initialise gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=3))
departure_pb2_grpc.add_BoardManagerServicer_to_server(
BoardManagerServicer(target_board_updater), server
)
server.add_insecure_port("[::]:50051")
# start gRPC server and board animator
animator_thread.start()
server.start()
running = True
event = sdl2.SDL_Event()
try:
while running:
if (
sdl2.SDL_PollEvent(ctypes.byref(event)) != 0
and event.type == sdl2.SDL_QUIT
):
logger.info("received SDL QUIT event")
running = False
break
end_event.wait(0.5)
except KeyboardInterrupt:
logger.info("received keyboard interrupt")
end_event.set()
server.stop(0)
animator_thread.join()
target_renderer.terminate()
if __name__ == "__main__":
run()
| 25.834951
| 92
| 0.690342
|
8f5ae4b67c64f5aebab4a3881482dd30bde29201
| 7,946
|
py
|
Python
|
tools/ci/python_packages/gitlab_api.py
|
v1993/esp-idf
|
6c17e3a64c02eff3a4f726ce4b7248ce11810833
|
[
"Apache-2.0"
] | 1
|
2020-09-24T06:59:09.000Z
|
2020-09-24T06:59:09.000Z
|
tools/ci/python_packages/gitlab_api.py
|
v1993/esp-idf
|
6c17e3a64c02eff3a4f726ce4b7248ce11810833
|
[
"Apache-2.0"
] | null | null | null |
tools/ci/python_packages/gitlab_api.py
|
v1993/esp-idf
|
6c17e3a64c02eff3a4f726ce4b7248ce11810833
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import argparse
import tempfile
import tarfile
import zipfile
from functools import wraps
import gitlab
class Gitlab(object):
JOB_NAME_PATTERN = re.compile(r"(\w+)(\s+(\d+)/(\d+))?")
DOWNLOAD_ERROR_MAX_RETRIES = 3
def __init__(self, project_id=None):
config_data_from_env = os.getenv("PYTHON_GITLAB_CONFIG")
if config_data_from_env:
# prefer to load config from env variable
with tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
temp_file.write(config_data_from_env)
config_files = [temp_file.name]
else:
# otherwise try to use config file at local filesystem
config_files = None
self.gitlab_inst = gitlab.Gitlab.from_config(config_files=config_files)
self.gitlab_inst.auth()
if project_id:
self.project = self.gitlab_inst.projects.get(project_id)
else:
self.project = None
def get_project_id(self, name, namespace=None):
"""
search project ID by name
:param name: project name
:param namespace: namespace to match when we have multiple project with same name
:return: project ID
"""
projects = self.gitlab_inst.projects.list(search=name)
for project in projects:
if namespace is None:
if len(projects) == 1:
project_id = project.id
break
if project.namespace["path"] == namespace:
project_id = project.id
break
else:
raise ValueError("Can't find project")
return project_id
def download_artifacts(self, job_id, destination):
"""
download full job artifacts and extract to destination.
:param job_id: Gitlab CI job ID
:param destination: extract artifacts to path.
"""
job = self.project.jobs.get(job_id)
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
job.artifacts(streamed=True, action=temp_file.write)
with zipfile.ZipFile(temp_file.name, "r") as archive_file:
archive_file.extractall(destination)
def retry_download(func):
"""
This wrapper will only catch IOError and retry the whole function.
So only use it with download functions, read() inside and atomic
functions
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
retried = 0
while True:
try:
res = func(self, *args, **kwargs)
except (IOError, EOFError) as e:
retried += 1
if retried > self.DOWNLOAD_ERROR_MAX_RETRIES:
raise e # get out of the loop
else:
print('Retried for the {} time'.format(retried))
continue
else:
break
return res
return wrapper
def download_artifact(self, job_id, artifact_path, destination=None):
"""
download specific path of job artifacts and extract to destination.
:param job_id: Gitlab CI job ID
:param artifact_path: list of path in artifacts (relative path to artifact root path)
:param destination: destination of artifact. Do not save to file if destination is None
:return: A list of artifact file raw data.
"""
job = self.project.jobs.get(job_id)
raw_data_list = []
for a_path in artifact_path:
try:
data = job.artifact(a_path)
except gitlab.GitlabGetError as e:
print("Failed to download '{}' from job {}".format(a_path, job_id))
raise e
raw_data_list.append(data)
if destination:
file_path = os.path.join(destination, a_path)
try:
os.makedirs(os.path.dirname(file_path))
except OSError:
# already exists
pass
with open(file_path, "wb") as f:
f.write(data)
return raw_data_list
def find_job_id(self, job_name, pipeline_id=None, job_status="success"):
"""
Get Job ID from job name of specific pipeline
:param job_name: job name
:param pipeline_id: If None, will get pipeline id from CI pre-defined variable.
:param job_status: status of job. One pipeline could have multiple jobs with same name after retry.
job_status is used to filter these jobs.
:return: a list of job IDs (parallel job will generate multiple jobs)
"""
job_id_list = []
if pipeline_id is None:
pipeline_id = os.getenv("CI_PIPELINE_ID")
pipeline = self.project.pipelines.get(pipeline_id)
jobs = pipeline.jobs.list(all=True)
for job in jobs:
match = self.JOB_NAME_PATTERN.match(job.name)
if match:
if match.group(1) == job_name and job.status == job_status:
job_id_list.append({"id": job.id, "parallel_num": match.group(3)})
return job_id_list
@retry_download
def download_archive(self, ref, destination, project_id=None):
"""
Download archive of certain commit of a repository and extract to destination path
:param ref: commit or branch name
:param destination: destination path of extracted archive file
:param project_id: download project of current instance if project_id is None
:return: root path name of archive file
"""
if project_id is None:
project = self.project
else:
project = self.gitlab_inst.projects.get(project_id)
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
try:
project.repository_archive(sha=ref, streamed=True, action=temp_file.write)
except gitlab.GitlabGetError as e:
print("Failed to archive from project {}".format(project_id))
raise e
print("archive size: {:.03f}MB".format(float(os.path.getsize(temp_file.name)) / (1024 * 1024)))
with tarfile.open(temp_file.name, "r") as archive_file:
root_name = archive_file.getnames()[0]
archive_file.extractall(destination)
return os.path.join(os.path.realpath(destination), root_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("action")
parser.add_argument("project_id", type=int)
parser.add_argument("--pipeline_id", "-i", type=int, default=None)
parser.add_argument("--ref", "-r", default="master")
parser.add_argument("--job_id", "-j", type=int, default=None)
parser.add_argument("--job_name", "-n", default=None)
parser.add_argument("--project_name", "-m", default=None)
parser.add_argument("--destination", "-d", default=None)
parser.add_argument("--artifact_path", "-a", nargs="*", default=None)
args = parser.parse_args()
gitlab_inst = Gitlab(args.project_id)
if args.action == "download_artifacts":
gitlab_inst.download_artifacts(args.job_id, args.destination)
if args.action == "download_artifact":
gitlab_inst.download_artifact(args.job_id, args.artifact_path, args.destination)
elif args.action == "find_job_id":
job_ids = gitlab_inst.find_job_id(args.job_name, args.pipeline_id)
print(";".join([",".join([str(j["id"]), j["parallel_num"]]) for j in job_ids]))
elif args.action == "download_archive":
gitlab_inst.download_archive(args.ref, args.destination)
elif args.action == "get_project_id":
ret = gitlab_inst.get_project_id(args.project_name)
print("project id: {}".format(ret))
| 38.572816
| 107
| 0.606972
|
ca8dcc1d75ee96fa32309f5a57b8f0f8563b356c
| 5,779
|
py
|
Python
|
example.py
|
bq/sorolla
|
f9fc2f35a673f2f11d370975be4e06c520341d88
|
[
"Apache-2.0"
] | 16
|
2015-04-22T09:17:17.000Z
|
2015-12-05T17:17:22.000Z
|
example.py
|
bq/sorolla
|
f9fc2f35a673f2f11d370975be4e06c520341d88
|
[
"Apache-2.0"
] | null | null | null |
example.py
|
bq/sorolla
|
f9fc2f35a673f2f11d370975be4e06c520341d88
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Script that colorizes, scales & tints Android resources, returning a
ready-to-copy resources folder.
Relies in ImageMagick commands, so it needs to be installed for this script
to work.
It will also need GhostScript libs installed to work with .pdf files
For more data in ImageMagick color mods,
check: http://www.imagemagick.org/Usage/color_mods/
To use the script, use the following syntax:
script.py source_res_folder destination_res_folder rgb_hex_color_without_#
The source resource directory must contain the following folders:
- drawable -> Folder that contains the assets that must be scaled & colorized
with solid color. It can also include nine-patches whose file name follows
the convention: filename.9.ext where 'ext' is your file extension: pdf,
svg...
- drawable-<dpi_string> -> Folder that contains assets that must be tinted,
but are already scaled (for our use case, gray-scaled .9.png)
The preferred way to work with scalable files is to export them as PDF or SVG
files. The PDF or SVG file must be exported from a canvas with 72 dpi & for
mdpi screen density.
"""
import os
import re
import sys
from sorolla import Sorolla
"""
Available resolution dirs & scales; we'll use mdpi as the base resolution.
xxxdpi is not included yet, as it's just needed for launcher icon:
http://developer.android.com/guide/practices/screens_support.html#xxxhdpi-note
"""
AVAILABLE_RESOLUTION_SCALES = {
'ldpi': 0.75,
'mdpi': 1,
'hdpi': 1.5,
'xhdpi': 2,
'xxhdpi': 3
}
def _create_folders_if_needed(destination_dir):
"""
Creates needed folders for the script to work
"""
for res, scale in AVAILABLE_RESOLUTION_SCALES.iteritems():
folder = os.path.join(destination_dir,
"drawable-{0}".format(res))
if not os.path.isdir(folder):
os.makedirs(folder)
def _scale_and_color_resources(source_dir, destination_dir, fill_color):
"""
Scans through the drawable folder of 'source_dir' and tries to color
and scale every resource found there with the given color
"""
drawable_res_dir = os.path.join(source_dir, "drawable")
if os.path.isdir(drawable_res_dir):
for filename in os.listdir(drawable_res_dir):
filename_without_ext = filename[:-4]
for res, scale in AVAILABLE_RESOLUTION_SCALES.iteritems():
original_file = os.path.join(drawable_res_dir, filename)
scaled_file = os.path.join(
destination_dir, "drawable-{0}".format(res),
"{0}_scaled.png".format(filename_without_ext)
)
# Replace badly formatted nine-patch name so Imagemagick can
# properly convert the resource to PNG
if ".9_scaled" in scaled_file:
scaled_file = scaled_file.replace(".9_scaled", "_scaled.9")
generated_file = os.path.join(
destination_dir, "drawable-{0}".format(res),
"{0}.png".format(filename_without_ext)
)
Sorolla.scale_resource(
original_file, scaled_file, scale)
Sorolla.color_resource(
scaled_file, generated_file, fill_color)
os.remove(scaled_file)
else:
print("No drawable folder in {0}. Skipping...".format(source_dir))
def _tint_resources(source_dir, destination_dir, tint_color):
"""
Scans through the 'drawable-<dpi_string>' folders of 'source_dir' and tries
to tint gray-scaled resources
"""
for res, scale in AVAILABLE_RESOLUTION_SCALES.iteritems():
drawable_res_dir = os.path.join(
source_dir, "drawable-{0}".format(res))
if os.path.isdir(drawable_res_dir):
for filename in os.listdir(drawable_res_dir):
filename_without_ext = filename[:-4]
original_file = os.path.join(drawable_res_dir, filename)
generated_file = os.path.join(
destination_dir, "drawable-{0}".format(res),
"{0}.png".format(filename_without_ext)
)
Sorolla.tint_resource(
original_file, generated_file, tint_color)
else:
print "No drawable-{0} folder in {1}. Skipping...".format(
res, source_dir)
def _check_args(source_dir, base_color):
"""
Checks if the needed arguments are valid
"""
# Check input parameters
if not os.path.isdir(source_dir):
print "The source dir is not valid or it doesn't exist"
return False
elif not re.match("[0-9,a-f,A-F]{6}", base_color):
print "The color string must have the following format: RRGGBB"
return False
return True
def main(args):
"""
Main method. It receives three arguments: source folder, destination folder
& RGB hex color (without #)
"""
source_dir = os.path.abspath(args[0])
destination_dir = os.path.abspath(args[1])
base_color = args[2]
if not _check_args(source_dir, base_color):
sys.exit(1)
if _create_folders_if_needed(destination_dir):
print "Can't create destination folder. Is the path valid?"
sys.exit(1)
print "Scaling & coloring 'drawable' resources..."
_scale_and_color_resources(source_dir, destination_dir, base_color)
print "Tinting 'drawable-<dpi_string>' resources..."
_tint_resources(source_dir, destination_dir, base_color)
if __name__ == "__main__":
if ((len(sys.argv) > 1) and (len(sys.argv[1:]) == 3)):
main(sys.argv[1:])
else:
print 'usage: {0} res_src_dir res_dest_dir rgb_hex_color'.format(
sys.argv[0])
| 35.89441
| 79
| 0.649939
|
c1ce564ba81b0765d22cf4440b0fbc92bfd9f00d
| 5,175
|
py
|
Python
|
tools/nni_trial_tool/base_channel.py
|
hwpengms/nni
|
461ef242d2efe915ac58cbea27049abdd4d6dc73
|
[
"MIT"
] | 3
|
2021-01-05T07:41:58.000Z
|
2021-01-11T02:08:01.000Z
|
tools/nni_trial_tool/base_channel.py
|
mstfbl/nni
|
92149c58834ccf24d82a15f756decd0d1e613ed3
|
[
"MIT"
] | 21
|
2020-11-13T19:01:01.000Z
|
2022-02-27T09:12:51.000Z
|
tools/nni_trial_tool/base_channel.py
|
mstfbl/nni
|
92149c58834ccf24d82a15f756decd0d1e613ed3
|
[
"MIT"
] | 3
|
2019-01-02T06:15:50.000Z
|
2019-01-30T14:31:20.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import threading
import time
from abc import ABC, abstractmethod
from queue import Empty, Queue
from .log_utils import LogType, nni_log
from .commands import CommandType
INTERVAL_SECONDS = 0.5
class BaseChannel(ABC):
def __init__(self, args):
self.is_keep_parsed = args.node_count > 1
self.args = args
self.node_id = self.args.node_id
@abstractmethod
def _inner_send(self, message):
pass
@abstractmethod
def _inner_receive(self):
return []
@abstractmethod
def _inner_open(self):
pass
@abstractmethod
def _inner_close(self):
pass
def open(self):
# initialize receive, send threads.
self.is_running = True
self.receive_queue = Queue()
self.receive_thread = threading.Thread(target=self._receive_loop)
self.receive_thread.start()
self.send_queue = Queue()
self.send_thread = threading.Thread(target=self._send_loop)
self.send_thread.start()
self._inner_open()
client_info = {
"isReady": True,
"runnerId": self.args.runner_id,
"expId": self.args.exp_id,
}
nni_log(LogType.Info, 'Channel: send ready information %s' % client_info)
self.send(CommandType.Initialized, client_info)
def close(self):
self.is_running = False
self._inner_close()
def send(self, command, data):
"""Send command to Training Service.
command: CommandType object.
data: string payload.
the message is sent synchronized.
"""
data["node"] = self.node_id
data = json.dumps(data)
data = data.encode('utf8')
message = b'%b%014d%b' % (command.value, len(data), data)
self.send_queue.put(message)
def sent(self):
return self.send_queue.qsize() == 0
def received(self):
return self.receive_queue.qsize() > 0
def receive(self):
"""Receive a command from Training Service.
Returns a tuple of command (CommandType) and payload (str)
"""
command = None
data = None
try:
command_content = self.receive_queue.get(False)
if command_content is not None:
if (len(command_content) < 16):
# invalid header
nni_log(LogType.Error, 'incorrect command is found, command must be greater than 16 bytes!')
return None, None
header = command_content[:16]
command = CommandType(header[:2])
length = int(header[2:])
if (len(command_content)-16 != length):
nni_log(LogType.Error, 'incorrect command length, length {}, actual data length is {}, header {}.'
.format(length, len(command_content)-16, header))
return None, None
data = command_content[16:16+length]
data = json.loads(data.decode('utf8'))
if self.node_id is None:
nni_log(LogType.Info, 'Received command, header: [%s], data: [%s]' % (header, data))
else:
nni_log(LogType.Info, 'Received command(%s), header: [%s], data: [%s]' % (self.node_id, header, data))
except Empty:
# do nothing, if no command received.
pass
except Exception as identifier:
nni_log(LogType.Error, 'meet unhandled exception in base_channel: %s' % identifier)
return command, data
def _fetch_message(self, buffer, has_new_line=False):
messages = []
while(len(buffer)) >= 16:
header = buffer[:16]
length = int(header[2:])
message_length = length+16
total_length = message_length
if has_new_line:
total_length += 1
# break, if buffer is too short.
if len(buffer) < total_length:
break
data = buffer[16:message_length]
if has_new_line and 10 != buffer[total_length-1]:
nni_log(LogType.Error, 'end of message should be \\n, but got {}'.format(self.in_cache[total_length-1]))
buffer = buffer[total_length:]
messages.append(header + data)
return messages, buffer
def _receive_loop(self):
while (self.is_running):
messages = self._inner_receive()
if messages is not None:
for message in messages:
self.receive_queue.put(message)
time.sleep(INTERVAL_SECONDS)
def _send_loop(self):
while (self.is_running):
message = None
try:
# no sleep, since it's a block call with INTERVAL_SECONDS second timeout
message = self.send_queue.get(True, INTERVAL_SECONDS)
except Empty:
# do nothing, if no command received.
pass
if message is not None:
self._inner_send(message)
| 33.387097
| 122
| 0.575266
|
eda1570efb45b45e5624828e0d3b2108d3aca1fb
| 1,068
|
py
|
Python
|
src/inference.py
|
jasmcaus/aws-disaster-response-ml
|
5b0c3feedfa848e99d3487cb9024589e17d49af1
|
[
"MIT"
] | 1
|
2022-02-21T19:33:02.000Z
|
2022-02-21T19:33:02.000Z
|
src/inference.py
|
jasmcaus/aws-disaster-response-ml
|
5b0c3feedfa848e99d3487cb9024589e17d49af1
|
[
"MIT"
] | null | null | null |
src/inference.py
|
jasmcaus/aws-disaster-response-ml
|
5b0c3feedfa848e99d3487cb9024589e17d49af1
|
[
"MIT"
] | null | null | null |
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import load_img
from skimage import transform
from PIL import Image
import numpy as np
import caer
from .config import *
from .loss import *
model = load_model(
"./mass_roads_unet.h5",
custom_objects={
"soft_dice_loss": soft_dice_loss,
"iou_coef": iou_coef
}
)
model.evaluate(test_roads, test_masks)
predictions = model.predict(test_roads, verbose=1)
prediction_threshold = (predictions > THRESH_VAL).astype(np.uint8)
def predict(img_path):
global model
img = caer.imread(img_path, target_size=IMG_SIZE)
img = np.array(img).astype("float32") # (IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS)
img = np.expand_dims(img, axis=0) # (1, IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS)
img -= MEAN
img /= STD
predictions = model.predict(img, batch_size=1, verbose=1)
mask = (predictions > THRESH_VAL).astype(np.uint8)
mask[mask == 1] = 255
return np.squeeze(mask[0][:,:,0])
| 28.105263
| 84
| 0.674157
|
9bbcf444caba636bd1cf7eb835945e445770ee35
| 644
|
py
|
Python
|
out_to_in_op.py
|
kpister/biaxial-rnn-music-composition
|
f6feafad0fe1066dd957293803a86d6c584d9952
|
[
"BSD-2-Clause"
] | null | null | null |
out_to_in_op.py
|
kpister/biaxial-rnn-music-composition
|
f6feafad0fe1066dd957293803a86d6c584d9952
|
[
"BSD-2-Clause"
] | null | null | null |
out_to_in_op.py
|
kpister/biaxial-rnn-music-composition
|
f6feafad0fe1066dd957293803a86d6c584d9952
|
[
"BSD-2-Clause"
] | null | null | null |
from aesara.graph import Op, Apply
from aesara import tensor as at
import numpy as np
from data import noteStateSingleToInputForm
class OutputFormToInputFormOp(Op):
# Properties attribute
__props__ = ()
def make_node(self, state, time):
state = at.as_tensor_variable(state)
time = at.as_tensor_variable(time)
return Apply(self, [state, time], [at.bmatrix()])
# Python implementation:
def perform(self, node, inputs_storage, output_storage):
state, time = inputs_storage
output_storage[0][0] = np.array(
noteStateSingleToInputForm(state, time), dtype="int8"
)
| 28
| 65
| 0.681677
|
c83b34c16d30adf80de4879d4f6dbe54d3e8a644
| 4,589
|
py
|
Python
|
framework/CsvLoader.py
|
rinelson456/raven
|
1114246136a2f72969e75b5e99a11b35500d4eef
|
[
"Apache-2.0"
] | 159
|
2017-03-24T21:07:06.000Z
|
2022-03-20T13:44:40.000Z
|
framework/CsvLoader.py
|
rinelson456/raven
|
1114246136a2f72969e75b5e99a11b35500d4eef
|
[
"Apache-2.0"
] | 1,667
|
2017-03-27T14:41:22.000Z
|
2022-03-31T19:50:06.000Z
|
framework/CsvLoader.py
|
rinelson456/raven
|
1114246136a2f72969e75b5e99a11b35500d4eef
|
[
"Apache-2.0"
] | 95
|
2017-03-24T21:05:03.000Z
|
2022-03-08T17:30:22.000Z
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Feb 7, 2013
@author: alfoa
This python module performs the loading of data from csv files
"""
import numpy as np
import pandas as pd
from BaseClasses import MessageUser
class CsvLoader(MessageUser):
"""
Class aimed to load the CSV files
"""
acceptableUtils = ['pandas', 'numpy']
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self.type = 'CsvLoader' # naming type for this class
self.printTag = self.type # message handling representation
self.allOutParam = False # all output parameters?
self.allFieldNames = [] # "header" of the CSV file
def loadCsvFile(self, myFile, nullOK=None, utility='pandas'):
"""
Function to load a csv file into realization format
It also retrieves the headers
The format of the csv must be comma-separated (pandas readable)
@ In, myFile, string, Input file name (absolute path)
@ In, nullOK, bool, indicates if null values are acceptable
@ In, utility, str, indicates which utility should be used to load the csv
@ Out, loadCsvFile, pandas.DataFrame or numpy.ndarray, the loaded data
"""
if utility == 'pandas':
return self._loadCsvPandas(myFile, nullOK=nullOK)
elif utility == 'numpy':
return self._loadCsvNumpy(myFile, nullOK=nullOK)
else:
self.raiseAnError(RuntimeError, f'Unrecognized CSV loading utility: "{utility}"')
def _loadCsvPandas(self, myFile, nullOK=None):
"""
Function to load a csv file into realization format
It also retrieves the headers
The format of the csv must be comma-separated (pandas readable)
@ In, myFile, string, Input file name (absolute path)
@ In, nullOK, bool, indicates if null values are acceptable
@ Out, df, pandas.DataFrame, the loaded data
"""
# first try reading the file
try:
df = pd.read_csv(myFile)
except pd.errors.EmptyDataError:
# no data in file
self.raiseAWarning(f'Tried to read data from "{myFile}", but the file is empty!')
return
else:
self.raiseADebug(f'Reading data from "{myFile}"')
# check for NaN contents -> this isn't allowed in RAVEN currently, although we might need to change this for ND
if (not nullOK) and (pd.isnull(df).values.sum() != 0):
bad = pd.isnull(df).any(1).to_numpy().nonzero()[0][0]
self.raiseAnError(IOError, f'Invalid data in input file: row "{bad+1}" in "{myFile}"')
self.allFieldNames = list(df.columns)
return df
def _loadCsvNumpy(self, myFile, nullOK=None):
"""
Function to load a csv file into realization format
It also retrieves the headers
The format of the csv must be comma-separated with all floats after header row
@ In, myFile, string, Input file name (absolute path)
@ In, nullOK, bool, indicates if null values are acceptable
@ Out, data, np.ndarray, the loaded data
"""
with open(myFile, 'rb') as f:
head = f.readline().decode()
self.allFieldNames = list(x.strip() for x in head.split(','))
data = np.loadtxt(myFile, dtype=float, delimiter=',', ndmin=2, skiprows=1)
return data
def toRealization(self, data):
"""
Converts data from the "loadCsvFile" format to a realization-style format (dictionary
currently)
@ In, data, pandas.DataFrame or np.ndarray, result of loadCsvFile
@ Out, rlz, dict, realization
"""
rlz = {}
if isinstance(data, pd.DataFrame):
rlz = dict((header, np.array(data[header])) for header in self.allFieldNames)
elif isinstance(data, np.ndarray):
rlz = dict((header, entry) for header, entry in zip(self.allFieldNames, data.T))
return rlz
def getAllFieldNames(self):
"""
Function to get all field names found in the csv file
@ In, None
@ Out, allFieldNames, list, list of field names (headers)
"""
return self.allFieldNames
| 38.241667
| 115
| 0.670081
|
56b8309d77aa3f0e14658cb4753a0e198d03e4aa
| 4,921
|
py
|
Python
|
ioos_qc/argo.py
|
NOAA-PMEL/ioos_qc
|
bbe5a159275bd90f4b12b660776cf15557c10f0f
|
[
"Apache-2.0"
] | null | null | null |
ioos_qc/argo.py
|
NOAA-PMEL/ioos_qc
|
bbe5a159275bd90f4b12b660776cf15557c10f0f
|
[
"Apache-2.0"
] | null | null | null |
ioos_qc/argo.py
|
NOAA-PMEL/ioos_qc
|
bbe5a159275bd90f4b12b660776cf15557c10f0f
|
[
"Apache-2.0"
] | 1
|
2021-01-20T23:20:06.000Z
|
2021-01-20T23:20:06.000Z
|
#!/usr/bin/env python
# coding=utf-8
"""Tests based on the ARGO QC manual."""
import logging
import warnings
from numbers import Real as N
from typing import Sequence
import numpy as np
from ioos_qc.qartod import QartodFlags
from ioos_qc.utils import add_flag_metadata
from ioos_qc.utils import great_circle_distance
from ioos_qc.utils import mapdates
L = logging.getLogger(__name__) # noqa
@add_flag_metadata(stanard_name='pressure_increasing_test_quality_flag',
long_name='Pressure Increasing Test Quality Flag')
def pressure_increasing_test(inp):
"""
Returns an array of flag values where each input is flagged with SUSPECT if
it does not monotonically increase
Ref: ARGO QC Manual: 8. Pressure increasing test
Args:
inp: Pressure values as a numeric numpy array or a list of numbers.
Returns:
A masked array of flag values equal in size to that of the input.
"""
delta = np.diff(inp)
flags = np.ones_like(inp, dtype='uint8') * QartodFlags.GOOD
# Correct for downcast vs upcast by flipping the sign if it's decreasing
sign = np.sign(np.mean(delta))
if sign < 0:
delta = sign * delta
flag_idx = np.where(delta <= 0)[0] + 1
flags[flag_idx] = QartodFlags.SUSPECT
return flags
@add_flag_metadata(standard_name='speed_test_quality_flag',
long_name='Speed Test Quality Flag')
def speed_test(lon: Sequence[N],
lat: Sequence[N],
tinp: Sequence[N],
suspect_threshold: float,
fail_threshold: float
) -> np.ma.core.MaskedArray:
"""Checks that the calculated speed between two points is within reasonable bounds.
This test calculates a speed between subsequent points by
* using latitude and longitude to calculate the distance between points
* calculating the time difference between those points
* checking if distance/time_diff exceeds the given threshold(s)
Missing and masked data is flagged as UNKNOWN.
If this test fails, it typically means that either a position or time is bad data,
or that a platform is mislabeled.
Ref: ARGO QC Manual: 5. Impossible speed test
Args:
lon: Longitudes as a numeric numpy array or a list of numbers.
lat: Latitudes as a numeric numpy array or a list of numbers.
tinp: Time data as a sequence of datetime objects compatible with pandas DatetimeIndex.
This includes numpy datetime64, python datetime objects and pandas Timestamp object.
ie. pd.DatetimeIndex([datetime.utcnow(), np.datetime64(), pd.Timestamp.now()]
If anything else is passed in the format is assumed to be seconds since the unix epoch.
suspect_threshold: A float value representing a speed, in meters per second.
Speeds exceeding this will be flagged as SUSPECT.
fail_threshold: A float value representing a speed, in meters per second.
Speeds exceeding this will be flagged as FAIL.
Returns:
A masked array of flag values equal in size to that of the input.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
lat = np.ma.masked_invalid(np.array(lat).astype(np.floating))
lon = np.ma.masked_invalid(np.array(lon).astype(np.floating))
tinp = mapdates(tinp)
if lon.shape != lat.shape or lon.shape != tinp.shape:
raise ValueError(f'Lon ({lon.shape}) and lat ({lat.shape}) and tinp ({tinp.shape}) must be the same shape')
# Save original shape
original_shape = lon.shape
lon = lon.flatten()
lat = lat.flatten()
tinp = tinp.flatten()
# If no data, return
if lon.size == 0:
return np.ma.masked_array([])
# Start with everything as passing
flag_arr = QartodFlags.GOOD * np.ma.ones(lon.size, dtype='uint8')
# If either lon or lat are masked we just set the flag to MISSING
mloc = lon.mask & lat.mask
flag_arr[mloc] = QartodFlags.MISSING
# If only one data point, return
if lon.size < 2:
flag_arr[0] = QartodFlags.UNKNOWN
return flag_arr.reshape(original_shape)
# Calculate the great_distance between each point
dist = great_circle_distance(lat, lon)
# calculate speed in m/s
speed = np.ma.zeros(tinp.size, dtype='float')
speed[1:] = np.abs(dist[1:] / np.diff(tinp).astype('timedelta64[s]').astype(float))
with np.errstate(invalid='ignore'):
flag_arr[speed > suspect_threshold] = QartodFlags.SUSPECT
with np.errstate(invalid='ignore'):
flag_arr[speed > fail_threshold] = QartodFlags.FAIL
# first value is unknown, since we have no speed data for the first point
flag_arr[0] = QartodFlags.UNKNOWN
# If the value is masked set the flag to MISSING
flag_arr[dist.mask] = QartodFlags.MISSING
return flag_arr.reshape(original_shape)
| 36.183824
| 115
| 0.68482
|
b06c3164c8c7660c9fcf674a3c7e1a36c3f60953
| 50,168
|
py
|
Python
|
cadnano25/cadnano/gui/ui/dialogs/dialogicons_rc.py
|
amylittleyang/OtraCAD
|
126360719704caf6850d42565fe96be53b66a22d
|
[
"MIT"
] | 1
|
2021-10-11T21:47:33.000Z
|
2021-10-11T21:47:33.000Z
|
cadnano25/cadnano/gui/ui/dialogs/dialogicons_rc.py
|
amylittleyang/OtraCAD
|
126360719704caf6850d42565fe96be53b66a22d
|
[
"MIT"
] | null | null | null |
cadnano25/cadnano/gui/ui/dialogs/dialogicons_rc.py
|
amylittleyang/OtraCAD
|
126360719704caf6850d42565fe96be53b66a22d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sun Oct 12 21:41:42 2014
# by: The Resource Compiler for PyQt (Qt v5.3.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x0b\x17\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x75\x74\x66\
\x2d\x38\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x47\x65\x6e\x65\x72\
\x61\x74\x6f\x72\x3a\x20\x41\x64\x6f\x62\x65\x20\x49\x6c\x6c\x75\
\x73\x74\x72\x61\x74\x6f\x72\x20\x31\x35\x2e\x30\x2e\x30\x2c\x20\
\x53\x56\x47\x20\x45\x78\x70\x6f\x72\x74\x20\x50\x6c\x75\x67\x2d\
\x49\x6e\x20\x2e\x20\x53\x56\x47\x20\x56\x65\x72\x73\x69\x6f\x6e\
\x3a\x20\x36\x2e\x30\x30\x20\x42\x75\x69\x6c\x64\x20\x30\x29\x20\
\x20\x2d\x2d\x3e\x0a\x3c\x21\x44\x4f\x43\x54\x59\x50\x45\x20\x73\
\x76\x67\x20\x50\x55\x42\x4c\x49\x43\x20\x22\x2d\x2f\x2f\x57\x33\
\x43\x2f\x2f\x44\x54\x44\x20\x53\x56\x47\x20\x31\x2e\x31\x2f\x2f\
\x45\x4e\x22\x20\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\
\x77\x33\x2e\x6f\x72\x67\x2f\x47\x72\x61\x70\x68\x69\x63\x73\x2f\
\x53\x56\x47\x2f\x31\x2e\x31\x2f\x44\x54\x44\x2f\x73\x76\x67\x31\
\x31\x2e\x64\x74\x64\x22\x3e\x0a\x3c\x73\x76\x67\x20\x76\x65\x72\
\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x0a\x09\x20\x69\x64\x3d\
\x22\x73\x76\x67\x31\x34\x33\x30\x22\x20\x78\x6d\x6c\x6e\x73\x3a\
\x73\x76\x67\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\
\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\
\x20\x78\x6d\x6c\x6e\x73\x3a\x64\x63\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x65\x6c\
\x65\x6d\x65\x6e\x74\x73\x2f\x31\x2e\x31\x2f\x22\x20\x73\x6f\x64\
\x69\x70\x6f\x64\x69\x3a\x64\x6f\x63\x6e\x61\x6d\x65\x3d\x22\x61\
\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x73\x2d\x6d\x75\x6c\x74\
\x69\x6d\x65\x64\x69\x61\x2e\x73\x76\x67\x22\x20\x69\x6e\x6b\x73\
\x63\x61\x70\x65\x3a\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x30\x2e\
\x34\x33\x2b\x64\x65\x76\x65\x6c\x22\x20\x78\x6d\x6c\x6e\x73\x3a\
\x73\x6f\x64\x69\x70\x6f\x64\x69\x3d\x22\x68\x74\x74\x70\x3a\x2f\
\x2f\x73\x6f\x64\x69\x70\x6f\x64\x69\x2e\x73\x6f\x75\x72\x63\x65\
\x66\x6f\x72\x67\x65\x2e\x6e\x65\x74\x2f\x44\x54\x44\x2f\x73\x6f\
\x64\x69\x70\x6f\x64\x69\x2d\x30\x2e\x64\x74\x64\x22\x20\x73\x6f\
\x64\x69\x70\x6f\x64\x69\x3a\x64\x6f\x63\x62\x61\x73\x65\x3d\x22\
\x2f\x68\x6f\x6d\x65\x2f\x6a\x69\x6d\x6d\x61\x63\x2f\x73\x72\x63\
\x2f\x63\x76\x73\x2f\x74\x61\x6e\x67\x6f\x2d\x69\x63\x6f\x6e\x2d\
\x74\x68\x65\x6d\x65\x2f\x73\x63\x61\x6c\x61\x62\x6c\x65\x2f\x63\
\x61\x74\x65\x67\x6f\x72\x69\x65\x73\x22\x20\x73\x6f\x64\x69\x70\
\x6f\x64\x69\x3a\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x30\x2e\x33\
\x32\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\x68\x74\
\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\
\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\x2d\x73\
\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\x22\x20\x78\x6d\x6c\x6e\x73\
\x3a\x63\x63\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x65\x62\x2e\
\x72\x65\x73\x6f\x75\x72\x63\x65\x2e\x6f\x72\x67\x2f\x63\x63\x2f\
\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x69\x6e\x6b\x73\x63\x61\x70\x65\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x6e\x61\x6d\x65\x73\x70\
\x61\x63\x65\x73\x2f\x69\x6e\x6b\x73\x63\x61\x70\x65\x22\x0a\x09\
\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\
\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\
\x76\x67\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6c\x69\x6e\x6b\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\
\x72\x67\x2f\x31\x39\x39\x39\x2f\x78\x6c\x69\x6e\x6b\x22\x20\x78\
\x3d\x22\x30\x70\x78\x22\x20\x79\x3d\x22\x30\x70\x78\x22\x20\x77\
\x69\x64\x74\x68\x3d\x22\x34\x38\x70\x78\x22\x20\x68\x65\x69\x67\
\x68\x74\x3d\x22\x34\x38\x70\x78\x22\x0a\x09\x20\x76\x69\x65\x77\
\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\x22\x20\
\x65\x6e\x61\x62\x6c\x65\x2d\x62\x61\x63\x6b\x67\x72\x6f\x75\x6e\
\x64\x3d\x22\x6e\x65\x77\x20\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x20\x78\x6d\x6c\x3a\x73\x70\x61\x63\x65\x3d\x22\x70\x72\x65\
\x73\x65\x72\x76\x65\x22\x3e\x0a\x3c\x67\x3e\x0a\x09\x3c\x67\x3e\
\x0a\x09\x09\x3c\x67\x3e\x0a\x09\x09\x09\x3c\x70\x61\x74\x68\x20\
\x66\x69\x6c\x6c\x3d\x22\x23\x38\x30\x30\x30\x30\x30\x22\x20\x64\
\x3d\x22\x4d\x34\x32\x2e\x35\x30\x33\x2c\x33\x34\x2e\x39\x35\x31\
\x56\x33\x35\x63\x30\x2c\x34\x2e\x38\x39\x38\x2d\x33\x2e\x37\x34\
\x39\x2c\x39\x2e\x30\x34\x39\x2d\x39\x2e\x30\x32\x32\x2c\x39\x2e\
\x30\x34\x39\x63\x2d\x32\x2e\x36\x32\x35\x2c\x30\x2d\x34\x2e\x33\
\x37\x35\x2d\x30\x2e\x36\x32\x35\x2d\x35\x2e\x39\x37\x35\x2d\x31\
\x2e\x37\x37\x35\x0a\x09\x09\x09\x09\x63\x2d\x30\x2e\x32\x39\x39\
\x2d\x30\x2e\x32\x32\x35\x2d\x30\x2e\x36\x32\x34\x2d\x30\x2e\x36\
\x35\x2d\x30\x2e\x36\x32\x34\x2d\x31\x2e\x32\x35\x63\x30\x2d\x30\
\x2e\x38\x32\x34\x2c\x30\x2e\x36\x37\x35\x2d\x31\x2e\x35\x32\x33\
\x2c\x31\x2e\x35\x32\x34\x2d\x31\x2e\x35\x32\x33\x63\x30\x2e\x34\
\x2c\x30\x2c\x30\x2e\x36\x39\x39\x2c\x30\x2e\x31\x37\x34\x2c\x30\
\x2e\x39\x34\x39\x2c\x30\x2e\x33\x35\x0a\x09\x09\x09\x09\x63\x31\
\x2e\x31\x32\x35\x2c\x30\x2e\x38\x32\x34\x2c\x32\x2e\x33\x35\x2c\
\x31\x2e\x33\x37\x35\x2c\x34\x2e\x32\x35\x2c\x31\x2e\x33\x37\x35\
\x63\x33\x2e\x32\x32\x34\x2c\x30\x2c\x35\x2e\x36\x37\x34\x2d\x32\
\x2e\x38\x32\x34\x2c\x35\x2e\x36\x37\x34\x2d\x36\x2e\x31\x37\x34\
\x56\x33\x35\x63\x30\x2d\x33\x2e\x35\x39\x38\x2d\x32\x2e\x33\x37\
\x35\x2d\x36\x2e\x32\x34\x38\x2d\x35\x2e\x39\x34\x39\x2d\x36\x2e\
\x32\x34\x38\x0a\x09\x09\x09\x09\x63\x2d\x31\x2e\x36\x34\x39\x2c\
\x30\x2d\x33\x2e\x31\x34\x39\x2c\x30\x2e\x35\x32\x35\x2d\x34\x2e\
\x32\x32\x35\x2c\x31\x2e\x33\x32\x34\x76\x33\x2e\x32\x37\x35\x68\
\x33\x2e\x34\x34\x39\x63\x30\x2e\x37\x35\x2c\x30\x2c\x31\x2e\x33\
\x37\x35\x2c\x30\x2e\x35\x37\x34\x2c\x31\x2e\x33\x37\x35\x2c\x31\
\x2e\x33\x32\x34\x73\x2d\x30\x2e\x36\x32\x35\x2c\x31\x2e\x33\x35\
\x2d\x31\x2e\x33\x37\x35\x2c\x31\x2e\x33\x35\x68\x2d\x34\x2e\x38\
\x37\x33\x0a\x09\x09\x09\x09\x63\x2d\x30\x2e\x38\x37\x35\x2c\x30\
\x2d\x31\x2e\x35\x35\x2d\x30\x2e\x36\x37\x36\x2d\x31\x2e\x35\x35\
\x2d\x31\x2e\x35\x34\x39\x76\x2d\x34\x2e\x36\x37\x34\x63\x30\x2d\
\x30\x2e\x38\x37\x35\x2c\x30\x2e\x33\x35\x31\x2d\x31\x2e\x35\x2c\
\x31\x2e\x30\x37\x35\x2d\x31\x2e\x39\x35\x31\x63\x31\x2e\x34\x39\
\x39\x2d\x31\x2c\x33\x2e\x35\x39\x39\x2d\x31\x2e\x38\x39\x38\x2c\
\x36\x2e\x32\x32\x33\x2d\x31\x2e\x38\x39\x38\x0a\x09\x09\x09\x09\
\x43\x33\x38\x2e\x39\x30\x34\x2c\x32\x35\x2e\x39\x35\x33\x2c\x34\
\x32\x2e\x35\x30\x33\x2c\x32\x39\x2e\x38\x35\x32\x2c\x34\x32\x2e\
\x35\x30\x33\x2c\x33\x34\x2e\x39\x35\x31\x7a\x22\x2f\x3e\x0a\x09\
\x09\x3c\x2f\x67\x3e\x0a\x09\x3c\x2f\x67\x3e\x0a\x09\x3c\x67\x3e\
\x0a\x09\x09\x3c\x67\x3e\x0a\x09\x09\x09\x3c\x70\x61\x74\x68\x20\
\x66\x69\x6c\x6c\x3d\x22\x23\x38\x30\x30\x30\x30\x30\x22\x20\x64\
\x3d\x22\x4d\x31\x35\x2e\x36\x34\x35\x2c\x34\x30\x2e\x37\x37\x37\
\x68\x34\x2e\x32\x34\x39\x63\x30\x2e\x38\x2c\x30\x2c\x31\x2e\x34\
\x32\x34\x2c\x30\x2e\x36\x35\x2c\x31\x2e\x34\x32\x34\x2c\x31\x2e\
\x34\x32\x36\x63\x30\x2c\x30\x2e\x37\x37\x33\x2d\x30\x2e\x36\x32\
\x35\x2c\x31\x2e\x34\x32\x34\x2d\x31\x2e\x34\x32\x34\x2c\x31\x2e\
\x34\x32\x34\x48\x38\x2e\x32\x37\x31\x0a\x09\x09\x09\x09\x63\x2d\
\x30\x2e\x37\x37\x35\x2c\x30\x2d\x31\x2e\x34\x2d\x30\x2e\x36\x35\
\x2d\x31\x2e\x34\x2d\x31\x2e\x34\x32\x34\x63\x30\x2d\x30\x2e\x37\
\x37\x35\x2c\x30\x2e\x36\x32\x35\x2d\x31\x2e\x34\x32\x36\x2c\x31\
\x2e\x34\x2d\x31\x2e\x34\x32\x36\x68\x34\x2e\x32\x37\x34\x56\x32\
\x37\x2e\x35\x33\x31\x63\x30\x2d\x30\x2e\x38\x35\x2c\x30\x2e\x37\
\x2d\x31\x2e\x35\x32\x35\x2c\x31\x2e\x35\x34\x39\x2d\x31\x2e\x35\
\x32\x35\x0a\x09\x09\x09\x09\x63\x30\x2e\x38\x35\x2c\x30\x2c\x31\
\x2e\x35\x35\x2c\x30\x2e\x36\x37\x36\x2c\x31\x2e\x35\x35\x2c\x31\
\x2e\x35\x32\x35\x56\x34\x30\x2e\x37\x37\x37\x7a\x22\x2f\x3e\x0a\
\x09\x09\x3c\x2f\x67\x3e\x0a\x09\x3c\x2f\x67\x3e\x0a\x09\x3c\x67\
\x3e\x0a\x09\x09\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\
\x23\x30\x30\x33\x46\x38\x30\x22\x20\x64\x3d\x22\x4d\x35\x2e\x36\
\x39\x37\x2c\x32\x30\x2e\x36\x35\x33\x6c\x36\x2e\x34\x39\x39\x2d\
\x31\x34\x2e\x34\x39\x37\x63\x30\x2e\x33\x35\x2d\x30\x2e\x37\x37\
\x35\x2c\x30\x2e\x39\x37\x35\x2d\x31\x2e\x32\x35\x2c\x31\x2e\x38\
\x35\x2d\x31\x2e\x32\x35\x68\x30\x2e\x31\x35\x63\x30\x2e\x38\x37\
\x35\x2c\x30\x2c\x31\x2e\x34\x37\x35\x2c\x30\x2e\x34\x37\x35\x2c\
\x31\x2e\x38\x32\x35\x2c\x31\x2e\x32\x35\x0a\x09\x09\x09\x6c\x36\
\x2e\x34\x39\x39\x2c\x31\x34\x2e\x34\x39\x37\x63\x30\x2e\x31\x32\
\x35\x2c\x30\x2e\x32\x32\x35\x2c\x30\x2e\x31\x37\x35\x2c\x30\x2e\
\x34\x35\x2c\x30\x2e\x31\x37\x35\x2c\x30\x2e\x36\x35\x63\x30\x2c\
\x30\x2e\x38\x32\x35\x2d\x30\x2e\x36\x32\x35\x2c\x31\x2e\x34\x37\
\x35\x2d\x31\x2e\x34\x35\x2c\x31\x2e\x34\x37\x35\x63\x2d\x30\x2e\
\x37\x32\x35\x2c\x30\x2d\x31\x2e\x32\x32\x35\x2d\x30\x2e\x34\x32\
\x35\x2d\x31\x2e\x35\x2d\x31\x2e\x30\x37\x35\x6c\x2d\x31\x2e\x34\
\x32\x35\x2d\x33\x2e\x32\x37\x34\x0a\x09\x09\x09\x48\x39\x2e\x38\
\x32\x31\x6c\x2d\x31\x2e\x34\x37\x35\x2c\x33\x2e\x33\x37\x35\x63\
\x2d\x30\x2e\x32\x35\x2c\x30\x2e\x36\x32\x35\x2d\x30\x2e\x37\x37\
\x35\x2c\x30\x2e\x39\x37\x35\x2d\x31\x2e\x34\x32\x34\x2c\x30\x2e\
\x39\x37\x35\x63\x2d\x30\x2e\x38\x2c\x30\x2d\x31\x2e\x34\x32\x35\
\x2d\x30\x2e\x36\x32\x35\x2d\x31\x2e\x34\x32\x35\x2d\x31\x2e\x34\
\x32\x35\x43\x35\x2e\x34\x39\x37\x2c\x32\x31\x2e\x31\x32\x38\x2c\
\x35\x2e\x35\x37\x32\x2c\x32\x30\x2e\x39\x30\x33\x2c\x35\x2e\x36\
\x39\x37\x2c\x32\x30\x2e\x36\x35\x33\x0a\x09\x09\x09\x7a\x20\x4d\
\x31\x37\x2e\x31\x34\x34\x2c\x31\x35\x2e\x37\x30\x35\x4c\x31\x34\
\x2e\x30\x37\x2c\x38\x2e\x36\x33\x31\x6c\x2d\x33\x2e\x30\x37\x34\
\x2c\x37\x2e\x30\x37\x33\x48\x31\x37\x2e\x31\x34\x34\x7a\x22\x2f\
\x3e\x0a\x09\x3c\x2f\x67\x3e\x0a\x09\x3c\x67\x3e\x0a\x09\x09\x3c\
\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x30\x30\x33\x46\
\x38\x30\x22\x20\x64\x3d\x22\x4d\x32\x36\x2e\x34\x34\x34\x2c\x31\
\x33\x2e\x39\x35\x35\x76\x2d\x30\x2e\x30\x35\x63\x30\x2d\x34\x2e\
\x39\x37\x34\x2c\x33\x2e\x37\x32\x34\x2d\x39\x2e\x30\x34\x37\x2c\
\x39\x2e\x30\x32\x32\x2d\x39\x2e\x30\x34\x37\x63\x32\x2e\x36\x39\
\x39\x2c\x30\x2c\x34\x2e\x34\x37\x35\x2c\x30\x2e\x38\x2c\x35\x2e\
\x39\x39\x39\x2c\x31\x2e\x39\x37\x34\x0a\x09\x09\x09\x63\x30\x2e\
\x33\x32\x35\x2c\x30\x2e\x32\x35\x2c\x30\x2e\x36\x2c\x30\x2e\x36\
\x37\x35\x2c\x30\x2e\x36\x2c\x31\x2e\x32\x32\x35\x63\x30\x2c\x30\
\x2e\x38\x35\x2d\x30\x2e\x36\x37\x35\x2c\x31\x2e\x35\x2d\x31\x2e\
\x35\x32\x34\x2c\x31\x2e\x35\x63\x2d\x30\x2e\x34\x32\x35\x2c\x30\
\x2d\x30\x2e\x37\x32\x35\x2d\x30\x2e\x31\x35\x2d\x30\x2e\x39\x32\
\x35\x2d\x30\x2e\x33\x63\x2d\x31\x2e\x32\x2d\x30\x2e\x39\x37\x35\
\x2d\x32\x2e\x34\x39\x39\x2d\x31\x2e\x35\x37\x34\x2d\x34\x2e\x31\
\x37\x34\x2d\x31\x2e\x35\x37\x34\x0a\x09\x09\x09\x63\x2d\x33\x2e\
\x33\x32\x34\x2c\x30\x2d\x35\x2e\x37\x37\x34\x2c\x32\x2e\x37\x34\
\x39\x2d\x35\x2e\x37\x37\x34\x2c\x36\x2e\x31\x37\x33\x76\x30\x2e\
\x30\x35\x63\x30\x2c\x33\x2e\x34\x32\x34\x2c\x32\x2e\x34\x32\x35\
\x2c\x36\x2e\x31\x39\x39\x2c\x35\x2e\x37\x37\x34\x2c\x36\x2e\x31\
\x39\x39\x63\x31\x2e\x38\x34\x39\x2c\x30\x2c\x33\x2e\x30\x39\x39\
\x2d\x30\x2e\x36\x2c\x34\x2e\x33\x37\x34\x2d\x31\x2e\x36\x37\x35\
\x0a\x09\x09\x09\x63\x30\x2e\x32\x32\x35\x2d\x30\x2e\x32\x2c\x30\
\x2e\x35\x34\x39\x2d\x30\x2e\x33\x35\x2c\x30\x2e\x39\x32\x34\x2d\
\x30\x2e\x33\x35\x63\x30\x2e\x37\x37\x35\x2c\x30\x2c\x31\x2e\x34\
\x35\x2c\x30\x2e\x36\x35\x2c\x31\x2e\x34\x35\x2c\x31\x2e\x34\x32\
\x34\x63\x30\x2c\x30\x2e\x34\x37\x35\x2d\x30\x2e\x32\x32\x36\x2c\
\x30\x2e\x38\x35\x2d\x30\x2e\x35\x2c\x31\x2e\x31\x63\x2d\x31\x2e\
\x36\x34\x39\x2c\x31\x2e\x34\x34\x39\x2d\x33\x2e\x35\x32\x34\x2c\
\x32\x2e\x33\x34\x39\x2d\x36\x2e\x33\x34\x39\x2c\x32\x2e\x33\x34\
\x39\x0a\x09\x09\x09\x43\x33\x30\x2e\x32\x34\x34\x2c\x32\x32\x2e\
\x39\x35\x33\x2c\x32\x36\x2e\x34\x34\x34\x2c\x31\x38\x2e\x39\x37\
\x39\x2c\x32\x36\x2e\x34\x34\x34\x2c\x31\x33\x2e\x39\x35\x35\x7a\
\x22\x2f\x3e\x0a\x09\x3c\x2f\x67\x3e\x0a\x3c\x2f\x67\x3e\x0a\x3c\
\x2f\x73\x76\x67\x3e\x0a\
\x00\x00\x05\xd8\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x05\x7a\x49\x44\x41\x54\x78\xda\xc4\
\x57\x5b\x6c\x14\x55\x18\xfe\xce\xcc\xec\xad\xdb\xed\x6c\x6f\xb4\
\xa5\x40\x0b\xbd\xa0\x51\x70\x2b\x8a\x20\x2a\x88\x04\x42\x7d\x80\
\x07\x9f\x24\xd8\x4d\xbc\x41\xc3\xcd\x08\x09\xa1\x31\xf1\x09\x1e\
\x35\xc1\x07\x4d\x8c\xee\x9a\x08\xbc\x88\xb6\x31\x26\x02\xc6\x35\
\x12\xd0\x26\x44\x44\xad\xe1\x16\x05\xba\xd5\x5e\x70\x2f\xdd\xcb\
\xec\x5c\xfd\x67\x2f\x65\x5c\x8a\xb4\x4b\xab\x93\xfc\x7b\xce\xce\
\xcc\x9e\xef\xfb\xbf\xff\x72\xce\x72\xf8\x9f\x2f\x61\xb6\x16\xfe\
\xfa\xe9\x27\x45\xde\xe9\xf4\xf1\x2e\xd7\xf9\x55\xc7\xfb\x62\x77\
\x7a\x8f\x9b\x0d\xf0\x53\x2b\x1e\xe9\x22\xf0\x68\x79\x5b\x7b\xc8\
\xdd\xd2\xfa\xfb\xb9\xee\x57\xba\xfe\x13\x02\x07\x0e\x1c\x10\x3f\
\x3e\x72\xf4\xd3\xe4\xfd\x0f\x04\x98\x20\x40\x70\xbb\xe1\x6a\x6c\
\xf4\x3a\xe7\x36\x06\xce\xbf\xbe\xa7\x6b\x56\x09\x74\x77\x77\x8b\
\x4b\x7d\x1d\xa1\xaa\xea\x9a\xcd\xb1\x95\xab\xa0\x72\x1c\xa4\xe1\
\x61\x68\x89\x24\x6c\x1e\x0f\xf8\xb2\xb2\xc0\x85\x9e\xfd\x5d\xb3\
\x42\x60\xcb\x96\x2d\xe2\xaa\x27\x9e\x0a\x39\x1d\x4e\x5f\x26\x93\
\x41\xcd\xfc\x26\xf0\x5b\xfd\x50\xe2\x31\x48\xa3\x23\x50\x93\x49\
\x52\xa3\x1c\x02\x91\xf8\xf9\xcd\x37\xba\x66\x94\x40\xf9\x8a\x51\
\x71\x44\xd8\x17\xf2\x56\x56\xfa\xec\x0e\x07\xda\xdb\x5a\xe0\x15\
\x3d\x78\x70\xed\x3a\x68\xcf\xac\x87\x7c\x73\x2c\xa7\x44\x3a\x05\
\x5d\x55\x41\xa1\x09\xfc\xb0\x67\x67\xd3\x8c\x10\xf0\xae\x8b\x8a\
\x9c\x9d\x0b\x9d\xb9\xd8\xe8\xfb\xee\x47\x15\x2e\xa7\x03\xd1\x48\
\x04\xad\x6d\x6d\x48\xa6\x92\x70\x2c\x5b\x8e\xc1\xa5\x0f\x23\x93\
\x4a\x41\x8e\xfc\x05\x3d\x9d\x06\x0c\x40\x93\xa4\xe6\x7b\x2e\xc3\
\xda\x4d\x09\x51\x53\xf4\x10\x4d\x7d\x60\xc0\x89\x4b\x2b\xb0\xaf\
\xdb\x06\x55\xd5\xb2\xe0\x83\x83\x61\x84\xc3\x43\xa8\x7a\x74\x39\
\xa4\x85\x0b\x21\x9f\xfe\x06\x15\x92\x94\xfd\x2d\xa9\x11\x2d\xac\
\xc3\x97\x02\xde\xf0\x7c\x46\x34\x18\x42\x8c\xc1\x47\x06\x9b\xdb\
\x86\x71\xde\x85\x9b\x11\x05\x4b\xe6\x45\x31\x3c\x3c\x9a\x25\x20\
\x50\x25\x2c\x6e\x6f\x87\xdd\x2b\xc2\xb3\xfc\x31\x0c\xc5\xa8\x1d\
\xdc\xb8\x1e\x58\xf9\x7e\xe0\xbd\x92\x15\x98\xff\x92\x46\x9e\x6b\
\x21\x9e\xf1\x3e\x9d\xd0\xed\xe5\x02\xf8\x72\x3b\x74\x45\xc7\xb1\
\xb3\x65\xa8\x76\x8e\xa0\xbd\xe2\x3a\xaa\xab\xab\xd0\xdc\x64\x86\
\xda\x40\x34\x16\x47\x46\x56\xa0\xb5\xb4\xe1\xdb\x44\x32\x60\x5d\
\x6f\x5a\x0a\xb4\xec\x34\x44\x1d\x39\xcf\x41\x1f\x15\x5e\x0e\xd5\
\xf5\x76\x98\x73\x8d\x24\x31\x43\xd1\x7f\x45\x44\x2a\xc3\x63\xf5\
\x12\x06\xc1\x26\xe0\xfa\x8d\x41\x08\x3c\x8f\x14\x25\x61\x38\x1c\
\xf6\xef\xde\xb5\xab\xb7\x24\x02\xcb\x7a\x0c\x91\x06\x8a\x39\xf3\
\x19\x84\x54\x5b\xc3\xd0\xb4\x80\x07\x95\xbb\x89\x0f\xf3\x9e\xa6\
\xb3\xec\xbb\xbf\x8d\x79\x71\xf9\xa2\x1d\x8f\x2f\xa5\x9c\x90\xa3\
\x94\x13\x69\x5c\xbd\x72\xc5\xbf\x7d\xdb\xab\xc1\xe2\x75\xa7\x44\
\x60\xed\x21\x43\x64\xa6\xe7\x5c\x2e\xe6\xf5\xb5\x0c\x8b\x17\xb1\
\x09\x70\x96\x73\x1e\x3a\xa9\xa0\x12\x09\x67\x2c\x81\xd1\x41\x05\
\x47\x3e\x07\x86\x86\x19\x1c\xda\x59\xff\x6b\x7b\x76\x07\x27\x5b\
\x9b\xdd\x0d\x7c\xf3\x61\x43\xcc\xc8\x08\x51\x08\x7d\x19\x05\xa8\
\xaa\x00\x16\xcd\x07\xc5\x14\x90\xc8\xd2\x94\xd8\x49\xaa\xae\x44\
\x0a\x88\x27\x00\x65\x28\x0e\x44\xd3\x50\x64\x03\x72\x86\x4c\x32\
\xfc\x37\x4e\x37\x06\x4b\xda\x0d\xb7\x7e\x60\x88\x04\x9c\x8d\xb9\
\xe9\x6d\x43\x2d\xb0\x90\xc0\x15\x22\x52\xf0\xde\xac\x6b\x83\x4c\
\xa7\xe4\xd0\x46\x12\x50\x74\x19\x8a\xc3\x7c\xa8\x9b\x4b\xf8\xaf\
\x9e\x9a\x1b\x2c\x69\x3b\xde\x7e\xcc\x02\x4e\x40\x75\xd5\x40\x73\
\x23\x40\x65\x0e\x2e\x2f\xbb\x79\x15\xc0\x53\x7f\x24\x61\x57\x64\
\x30\x07\xcb\xcb\xca\xf9\x2f\x1c\x9f\x13\x2c\xe9\x3c\xb0\xb7\xd7\
\x10\x15\xf5\x96\xe7\x35\x95\xc0\x82\x06\xf2\x50\xb7\x78\x9e\x07\
\x37\xef\xc5\xc3\x69\x70\x14\x1f\xfb\x04\x38\xfc\x67\x82\xb5\xc1\
\xa9\xe4\xd7\xa4\x04\xe6\x38\xc7\xfc\xa3\x99\x9a\x2c\x78\x25\xc5\
\xbc\x61\x4e\x0e\xac\x90\x6c\x56\xcf\x23\x61\x09\xba\xa4\xc2\x66\
\x37\xbb\x7a\x4e\xf6\x93\xef\x54\x07\xa7\x5a\x5d\x93\x56\xc1\x17\
\x7b\xd9\x7e\x99\xb9\xef\x83\xcb\x0b\x6f\x95\xe3\x16\x30\xbb\x15\
\x77\xf3\x8a\xfc\x99\x41\x3a\xae\x59\x73\xda\xdf\x7b\xa8\x32\x38\
\x9d\xde\x32\x79\x0e\xe8\x99\x35\x1b\x2b\xbf\xc4\xb0\x23\x82\x13\
\xd2\xb3\x90\xb5\x9c\x6f\x5c\x9e\x84\xa9\x4c\xfc\x26\x25\x5b\x5a\
\x27\xcf\x27\x0a\xc9\x7f\xb4\xa7\x22\x38\xdd\xce\x7a\xdb\x6e\xa8\
\xf4\xbf\xbc\x89\xfa\xaa\x17\x9c\x03\xf5\x5c\x18\xab\xdd\xfd\xa0\
\x56\x3f\x91\x78\xa6\x8d\x47\x54\xa8\x52\x0e\xdc\x66\x63\xe6\xe8\
\xff\x68\x9f\x27\x58\xca\xbe\x72\x1b\x01\x43\x97\xd7\x18\x06\xf9\
\xcb\x48\x1c\x7b\x25\x9a\x1d\x43\x58\xe9\xea\x87\x90\x7f\x33\x49\
\x92\xab\xb2\x4e\x6d\x36\x07\x2e\x10\xf8\xbb\xdb\xdd\xc1\x52\x77\
\x55\x6e\x12\xf9\x37\x83\xa7\xb8\x97\xcd\x03\xb3\x57\x99\x84\xd0\
\xcc\x7e\xc1\x06\xf7\x49\xd8\xe5\x71\x68\xaa\x91\x05\xcf\x19\xfc\
\x6f\xbd\xe0\x0a\xde\xcb\x99\xe2\x1f\x04\x3a\x3a\x3a\x9a\x2e\x0f\
\x4a\xcd\x5c\x79\x3b\x58\x79\x0b\x0c\x39\x42\x05\x3e\x08\x43\x89\
\xa1\x3e\xf3\x3d\x11\xf9\x09\xbc\x09\x2c\x64\x09\xf8\x0f\x3e\xe7\
\xbc\x27\xf0\xdb\xaa\xc0\xe3\xf1\xbc\x1d\x37\xea\x7d\x6b\x3b\xb7\
\x82\x93\xae\x41\x8f\x9c\x03\x9d\xa9\xa8\xe7\x0e\x9b\xc7\x18\x34\
\x0b\x17\x11\x37\x6a\x3e\x1b\xd3\x1b\xb6\xf5\xac\xb7\xf5\xce\xc4\
\x79\x72\x22\x85\x9d\x4e\xe7\x43\x75\x75\x75\xe7\x77\xec\xd8\x01\
\x4e\x4f\x60\x63\xeb\xaf\x68\xad\xa7\x88\x30\xdb\x35\x45\x13\x4e\
\x8f\xa7\xd1\x37\x77\x7d\xb0\x2f\xaf\x1a\x2b\xda\x47\x0c\x8b\xe9\
\xf9\x51\xb3\x7c\xbf\x3b\x01\x3a\xbd\x7c\xd8\xd9\xd9\xe9\x37\x4f\
\x31\x03\x03\x03\x70\xbb\xdd\x9f\x74\x6f\x40\xe0\xc5\x83\xe7\x2e\
\xd1\x63\x9b\xc5\x84\xbc\x72\x05\x22\x86\x05\xd0\x34\xc5\x62\x19\
\x32\x39\x3f\x16\x4c\xb5\x92\x9a\x20\xc0\xf3\x7c\x0d\x63\xec\x2b\
\x52\x62\x80\xe6\x87\x63\xb1\xd8\x18\xdd\x76\x58\xcc\x6e\x31\x21\
\x6f\xcc\x02\xae\xde\x01\x58\xb2\x8c\x69\xcb\x77\x63\xb2\xed\x98\
\xe5\xbd\x2b\x78\x5b\x00\x2c\xf6\x9e\x2f\x0a\x83\x9e\xb7\x02\x91\
\x62\x32\x85\xb9\x5a\x1c\x92\xe2\x4e\x68\x58\x16\x48\x4f\x42\x8e\
\xfd\xcb\x39\xc2\x28\xca\x87\x19\xff\x77\x3c\xad\x85\xa7\x7a\xfd\
\x2d\xc0\x00\xc0\x60\x2e\xfc\x68\xc5\xf7\xa6\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0b\xb1\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x0b\x53\x49\x44\x41\x54\x78\xda\xec\
\x59\x69\x6c\x5c\xd5\x15\xfe\xee\x5b\x66\xf5\x8c\xc7\x63\x67\xbc\
\xc7\x63\x67\x21\x81\x84\x18\x92\x98\xa5\x6a\x65\x8a\xa8\xa0\xad\
\x68\x42\x5b\x84\x50\x2b\x05\xf5\x47\xab\x52\x89\xa4\x40\x05\x6a\
\x05\x84\x82\x2a\x95\x1f\x09\x52\x05\x94\xb6\x38\x11\x42\x10\x89\
\x8a\xa4\x42\x05\x0a\xd4\x21\x40\x80\xb0\x19\x1c\xc7\xd9\xbc\xc5\
\x89\x1d\x6f\xe3\x37\xfb\xf6\x96\x9e\x7b\xdf\x8c\x99\x86\x90\xba\
\xb1\x23\xfc\x23\x57\x3e\x7e\x33\x6f\xee\xdc\x77\xbe\x73\xbe\xb3\
\xdc\x3b\xc0\xc5\x71\x71\xcc\x69\xb0\x85\xa8\xd4\x92\xa6\x70\x2b\
\x5d\x1e\x24\x69\x2f\xdc\xda\x4d\xb2\xb5\x6f\x68\x70\x70\xc1\x03\
\x28\x28\xdf\xe9\x72\xb9\x02\x15\x15\x15\x30\x0c\x03\x9a\xa6\x21\
\x97\xcb\x69\x74\xff\x3a\x02\xd1\x55\x3a\x5f\x5e\x60\xca\x07\xb8\
\xf2\x65\x65\x65\x35\x97\xaf\x59\x03\xba\xc2\xe3\xf5\xd2\xd5\x8b\
\x68\x2c\xe6\x32\x4d\x73\xc5\x74\x54\xdb\x59\xfa\x1d\x69\x81\x39\
\x60\x9b\xa2\x28\xe1\xe5\x97\x5c\x82\x78\x32\x81\x48\x54\x43\x26\
\x9f\x87\x65\x59\xf0\xfb\x7c\x28\xa1\xd4\xcc\x50\x16\x90\xf5\x37\
\xd1\x65\x53\x53\x38\x0c\xa2\x0f\x26\xa6\x26\xa1\x9b\xa6\xa0\x90\
\x64\x11\xd7\xa5\xb3\xb3\x5d\x5a\x40\xbc\xdf\x56\x55\x55\x85\x9a\
\x9a\x1a\xe8\x7a\x1e\x2e\xa7\x0b\xa6\x6e\xc0\xc8\xeb\xf4\x5e\x47\
\x32\x91\xe4\x53\xb5\x05\x09\x80\x46\x07\x0f\xda\xa5\xcb\x96\x22\
\x4f\x94\xe1\xe2\x50\x55\xf8\x29\x06\x64\x49\x22\xe5\x13\xc8\x64\
\x32\x7c\xde\xd6\x05\x07\x80\xac\xbf\x8d\x2e\xad\x4b\x97\x2d\x83\
\x69\x5a\x33\x00\xb8\xe8\x24\x59\x52\x9c\x67\x21\x9e\x4a\x29\x03\
\x6d\x5f\x50\x00\x48\xf9\x0d\x74\xd9\xdc\xd8\xd8\x08\x8f\xc7\xf3\
\x25\xe5\xf9\x75\x6a\x6a\x8a\x80\x99\x1c\xc1\x1d\x67\x5b\x43\xf9\
\x9a\x53\x66\x87\xdf\xef\x47\x4d\x6d\x2d\xcf\xf3\x33\x4a\x73\xc9\
\x91\x68\x94\x85\xf8\x6b\x1a\x1b\xc9\xfa\xda\x82\x02\x40\xe3\x25\
\x59\x96\x03\x2d\x4b\x5a\xbe\x50\x5e\xe7\xca\xeb\x42\xe9\x54\x32\
\x89\x44\x3c\xc1\xe7\x6d\x27\xe5\xf7\x7e\xd5\x22\xf3\x56\xc8\x3e\
\xfd\xf4\xd3\x56\x45\x56\x6e\x8c\x8c\x4f\x3c\x18\x0c\x04\x5e\x20\
\x79\x88\x64\x33\xc9\x0a\x92\xb7\xa8\x00\x65\x4a\xac\xff\x10\x4f\
\x99\x61\x4a\x99\xaa\xea\x20\x85\x73\xff\x45\x1f\x0e\x48\x9b\x9e\
\xe6\xf9\xbf\x8b\x94\xdf\x78\xc1\x7b\xa1\xd1\xd3\x63\x1b\x7c\x65\
\xde\x97\x7a\x0f\x1f\xc6\xbb\xef\xbc\x8b\x57\x5e\x7e\x19\x79\x52\
\x22\x4f\xe9\x6f\x7c\x6c\x8c\xa7\x41\x5e\xfe\x1f\x27\xe1\xb4\xf9\
\x01\x2f\x48\x95\x94\x32\xab\xab\xab\x61\xe8\x76\x9a\xe4\xc2\xe7\
\x73\x4f\x94\xb4\x0e\x57\x9c\xad\xff\x99\x77\x00\x47\x8e\x1e\x9b\
\xf6\x7a\xbd\x81\x40\xb9\x1f\x43\x43\x27\xd0\x73\xe8\x10\xf6\x75\
\x76\x22\x42\x01\x98\xa0\x14\xd8\xdf\xd7\x87\x74\x3a\x2d\xe6\xf2\
\x22\x15\x0c\x06\xe1\x23\xee\x1b\x06\x57\xdc\x98\x01\xc0\xf3\x7f\
\x2a\x99\x42\x2a\x95\xe2\x53\xef\x20\xe5\x77\xfc\xaf\x67\xcf\x4b\
\x0c\x44\xa8\x6a\xf2\x8a\x69\x9a\x06\x78\x46\xf1\x7a\x3d\x70\x38\
\x1c\x02\xc4\x40\x7f\x3f\x96\x2c\x5d\x0a\x8b\xaa\xaa\xc9\x2b\x2b\
\xcd\x31\x0c\x13\xd9\x6c\xd6\xb6\x3e\x7d\xaf\xe8\x05\x4e\x9d\x02\
\xd0\xdd\xb3\x51\x7e\xde\x62\xe0\xbe\xfb\xee\x3b\x12\xd5\xb4\x1b\
\x49\x31\x97\x49\x7d\x4b\x79\x79\x39\xea\x6a\x6b\x50\x46\x56\xe6\
\x41\x39\x34\x38\xf0\x05\x45\x4a\x02\xb5\xf8\x5e\x2f\x06\x2e\x59\
\x9e\x78\xcf\x29\x73\x53\x69\xcc\x5c\x70\x00\xde\x4f\x9e\x77\x4d\
\x1c\xfe\x64\x53\xdd\xe5\xd7\xba\x72\x64\x5d\x3e\xdc\x2e\x37\x1a\
\x1b\xea\xe1\x72\xbb\xe1\x2f\x0f\xe0\xd8\xd1\xa3\xa2\x9a\xea\x45\
\xc5\x0b\xb9\xbe\xa8\x7c\x96\xac\x6f\x92\x37\x0a\x2d\xf3\xe0\x6c\
\x9f\x3d\x67\x00\xbf\xfd\x66\x88\x07\xe6\x4b\x29\x6d\x2a\x3c\x7d\
\xf0\x0d\x2c\x5a\xbe\x16\x79\xa6\x82\x31\x09\x94\x26\x11\x0e\x37\
\x51\xa6\x51\x10\xaa\xad\xc3\x91\xde\x5e\x4a\x8d\xf1\x19\x00\xa5\
\x99\xa7\xa0\xfc\x0e\x52\xfe\xcf\xff\xcf\xf3\xe7\x04\xe0\x81\xf6\
\xea\x00\x63\xe8\x94\x65\xa9\xb5\x32\xe0\x84\x4a\x6d\x63\xff\x81\
\xd7\xe0\x0d\xd6\x80\x79\x2b\x21\x49\xb2\x68\x85\x17\x2f\x5e\x4c\
\x1e\x71\xa2\x79\xc9\x32\x0c\x0c\xf4\x63\x7c\x7c\x7c\x06\x00\xa7\
\x16\x9f\x53\x18\x7b\x88\x3a\x7b\x2f\x38\x80\xb6\x3b\x4f\x05\xea\
\xdb\xee\xfe\x85\x21\x7b\xee\xaa\xcd\x1e\x68\xaf\x0e\xba\xa0\x90\
\x95\x33\x06\x03\x23\x5d\xa6\x8f\x7d\x00\x23\x11\x81\xb3\x6e\xa5\
\xf0\x04\xb7\x6e\x2d\x55\xdb\x32\xda\x9c\x84\x5b\x96\x60\x72\x62\
\x1c\x27\x86\x86\x4a\x15\x2f\x0e\x8d\x00\xec\xba\xa0\x00\x48\x79\
\xb1\xe5\x6b\xaa\x71\xdc\xa6\xa9\x2d\x2b\xbe\x81\x5d\xb0\x98\x8c\
\x34\x31\x40\xa6\xce\xca\xeb\x60\xe0\xad\x7b\x74\x64\x00\x89\x93\
\x47\xe0\x6e\xb8\x14\x4c\x71\x10\x08\x13\xc1\xca\x20\x82\x15\x01\
\xd4\xd6\xd7\xa3\x22\x58\x89\xee\xcf\xba\xce\x5c\x9e\x17\xbd\x21\
\x02\xd1\x75\x41\x00\x14\x95\xbf\x72\xb9\xa7\x66\xcb\x0f\xfd\xe8\
\x1a\x64\x68\x1e\x7f\x06\x19\x1d\x70\xc8\x0c\x5e\x15\xc8\x9b\x8c\
\xd2\xa5\x5d\x61\x32\xd3\xe3\x18\x3d\xf4\x3e\x5c\xd5\x2d\x90\xdd\
\x65\xe0\x06\xe7\xdb\xc4\x3a\xf2\x86\x93\xea\xc1\xa5\xab\x56\xe3\
\x93\x8f\x3f\x12\x74\x2a\x19\x1b\x0a\x95\x7b\x70\x5e\x01\x70\xe5\
\x19\x63\x9d\xad\xcb\xbc\x81\x9f\xde\xe0\xc5\xce\xd7\xd3\x38\xd2\
\x1f\xc1\xca\xe4\x73\x70\xab\x4c\x00\xc8\x92\x17\xb8\x92\x32\xb9\
\x40\x25\x6f\x70\x4f\x24\xa9\xa7\x19\xe9\x79\x1f\xce\xf2\x10\x14\
\x5f\x15\x51\x8a\x89\x5e\xbf\xa9\x69\xb1\xa0\xd6\xba\xab\xae\x46\
\x2f\x15\xbe\x78\x2c\x56\xfa\xb8\xf0\x99\x7b\xdf\x39\x01\x68\xfb\
\xe5\xc9\x56\x7a\x72\x67\xb8\xce\x1d\xb8\xfe\x4a\x17\x76\xbe\x99\
\xc1\x44\xd4\x44\x59\xa2\x1b\xad\xe6\xab\x42\xe1\x1c\xa7\x10\x57\
\x8e\x56\x54\x78\x93\x4e\xaf\x93\x05\xc3\xe6\x72\x79\x4c\x1c\xda\
\x4f\x0f\x33\xe1\x58\x14\xa6\xed\xa1\x2c\x5c\x14\x26\x10\xbc\x88\
\x2d\x5f\xb1\x12\xf1\x78\x0c\xa7\x4e\x9e\x9c\x01\x40\x5e\x78\x7c\
\x36\xb5\x40\x9e\x85\xf2\xf4\x44\xbc\xb7\x28\xe8\x0c\xac\x6e\x76\
\x62\xcf\xfb\x59\xc4\x92\x16\x55\x54\x0b\xaa\x9e\xc0\x6a\x7d\x0f\
\xbd\x26\x8b\xd3\x4a\x5c\x24\xce\x1d\xfa\x4b\x13\xad\x0c\x8b\x11\
\xa5\x04\x16\x78\x14\x86\xe8\x50\x0f\xe2\x63\x83\x70\x54\x2f\x85\
\xea\x74\x8b\xca\xdc\x40\xb5\xc2\xe9\x50\xd1\xd8\xd4\x0c\x37\x55\
\xf0\xde\x9e\x9e\xe2\xa3\x5f\x9b\x0d\x8d\xe4\x73\x2b\x3f\x4c\x39\
\x9e\xbd\xe2\xf7\xa9\xe1\xda\x4a\x27\x0e\x1c\x4e\x23\x93\x27\x8e\
\x13\x4d\x78\xfe\x88\x23\x88\x0c\xca\xe0\x91\x73\x18\x70\x7c\x0b\
\xdd\x8e\x8d\x78\xcb\xbd\x05\xab\x52\xbb\x28\x23\x51\x3c\x90\x98\
\x04\xc2\xa5\xd8\xe0\xa6\xdd\x6b\xf1\xba\xf9\x33\xe0\x60\x07\xca\
\xa9\x91\x53\x29\x2e\x78\x5b\x11\x0a\x85\xe0\xa3\xd8\x28\xf3\xf9\
\x51\xdf\xd0\x28\x28\x45\x71\xc1\x3d\x70\x7a\xae\xbd\x50\x07\x93\
\x58\xab\xd3\xa9\xe2\x60\x7f\xc2\xde\xc0\xd1\x1e\x95\x2b\xaf\x53\
\xca\x94\xe8\x75\xb7\xfb\x36\x1c\x64\xb7\xc1\x41\x16\x76\x3b\x29\
\x0e\xd2\x16\x51\x84\x7f\x6e\xd3\x48\x91\x2d\xb8\x28\x46\xfa\xdd\
\xdf\xc3\xc7\xd5\x0f\x88\xee\x71\x7a\xe0\x2f\xe8\xee\xb8\x1f\xb9\
\x5b\xb6\xa0\xee\xb2\xab\x09\xa4\x49\xd5\xda\x8f\xb5\x57\x5e\x01\
\x45\x91\x49\x94\xc1\x7b\xef\xbd\x7b\x56\x99\x48\x3a\x87\xf5\x79\
\xcf\xbe\x81\xc9\x2a\x46\xc7\xa3\x14\x9c\xa6\xdd\xbb\xf2\x2b\x45\
\x2a\xcf\xe1\x85\xae\x81\x94\xe4\x81\x09\x10\x4e\x34\x54\xd1\x92\
\xaa\x9f\x52\x2b\x13\xd3\x9d\xf4\xd9\x87\xe5\x5b\xf0\x79\xfd\x43\
\xf8\xdd\xed\x7e\x01\x3e\x88\x51\x28\x46\x0a\xc7\x76\x3d\x82\xd1\
\x8f\xfe\x89\x48\x24\x42\x2d\x74\x14\x12\xe5\xe1\xf5\xeb\xd6\xa2\
\xed\xaa\xb6\x59\x67\x46\xe9\x2b\x94\xe7\x07\x48\x0f\x9a\x64\xca\
\x6c\x6a\x1a\x36\x61\x60\x83\x10\x62\xcc\x00\xe1\x20\x72\x79\x0b\
\x29\x0a\xb7\x38\x59\xbf\x2e\x28\xc1\x68\xbe\x59\x64\xa0\xac\xb3\
\x0e\xff\x0e\x3d\x89\x53\xa1\xdb\xb1\xe9\x06\x0f\x06\x4e\x1b\x60\
\x89\x11\x02\x70\x5a\x7c\xee\xa7\xbc\x1b\xdf\xbf\x03\xfb\x3b\xfe\
\x80\xa9\xc9\x49\x44\xa3\x31\xea\x52\x73\x68\x5b\xbf\x6e\xeb\x79\
\xd7\x01\x9b\xf7\x78\xcf\x30\xf3\x2e\x23\x4b\x56\x91\x14\xa2\x84\
\x72\xce\x8d\x04\x87\xc7\xe3\xc2\x10\x98\x18\xd6\xb5\xb7\x23\xe2\
\x59\x8b\xcf\x2b\xb7\x80\xf9\xea\xf1\xed\x35\x0e\x91\xa9\xf6\x7e\
\x9e\x45\x78\xf4\x69\x84\xf4\x1e\xb8\x28\x5d\x35\xd6\x78\xd1\x7d\
\x2c\x82\xc9\x13\x7d\x9a\x1a\x08\x5d\xe3\xad\xaa\x1f\x93\x15\x65\
\x4f\x28\xb4\xe8\xa9\xf3\x3e\x9d\x26\x00\x9b\x2d\x33\xbf\x2d\x9f\
\x8d\x41\x52\x9c\x44\x63\xb9\x30\x8d\x91\xa2\x56\x41\x5d\xfb\xbd\
\xf8\x93\xa8\x71\x93\x9d\xe4\x4b\x45\xdc\xe3\xd5\xd8\xe7\x66\x08\
\xfa\x24\x11\x07\x8b\x17\xc9\xa8\xf0\x31\x8c\x46\x0c\x74\x1f\x89\
\xe0\x27\xd3\x1b\xe1\x91\x92\x68\xa9\xf7\xa1\xff\x64\x0c\xc7\x87\
\x63\x1c\xfc\x75\x8f\xee\x1b\xdf\x7b\x3e\x6d\xcd\xd9\x82\xb8\x8b\
\x2c\xae\x29\x8e\xb2\x80\x69\xe4\x49\x5d\xf3\x0c\x9c\x5c\x6b\x6b\
\xc6\xf4\x96\x91\x23\x36\x51\xc2\x27\xa0\x4c\x72\x40\x27\x7a\x69\
\xba\x4c\x54\x90\x51\x1f\x72\x22\x95\xb3\x30\x35\x62\x62\x78\x42\
\x47\x5b\xe6\xaf\xf0\xca\x49\x84\x1b\x2b\x90\xc9\xea\x18\x38\x15\
\x47\xc4\x77\xcd\xee\x0f\x9b\x9e\x68\x6f\x5b\x25\x02\xaa\xeb\xc0\
\x13\x8d\xda\x9c\x7f\x1f\x20\x2f\x84\x0b\xe7\xf3\x9b\x4c\x93\xb7\
\xba\x76\x45\xe2\x8d\x99\x48\xea\x85\xaf\x59\x02\x0c\x2b\xbc\x2b\
\x59\x8a\xee\x55\x06\x5c\xd4\x81\xaa\xc8\xea\x4c\xc4\x08\x12\xa7\
\xf0\xf3\xcc\xad\x68\x5c\x5c\x25\xf8\xbf\xf7\xdd\x7e\xf4\x79\xbe\
\x8b\xc1\x65\xbf\x47\x95\x5f\xa2\x2c\x66\xa1\x77\x48\xec\x83\xb7\
\x12\x88\xed\xf3\xf2\x03\x47\xa1\x88\xdd\x45\xd3\x08\x88\x4e\x1e\
\xc9\xd9\x5f\xe0\xb4\x62\xd2\x0c\x8d\xbe\x58\xc6\x06\xe4\xa2\x74\
\xe4\xa0\x5e\x22\x6f\xca\xd4\xfb\x5b\xa2\xbd\xb8\x19\x8f\xe1\xfb\
\xe1\x2e\x51\x79\xdf\xfb\x80\x94\xcf\x5f\x86\x0f\xc2\x4f\xe3\xd6\
\x76\x1f\x9a\x6b\x68\xbf\x10\x90\x84\x49\x1e\x7d\x76\x0a\xe3\x9a\
\x71\x07\x81\xd8\x31\x67\x00\xc5\xb1\xfe\xce\x91\x80\xf0\x08\xc3\
\x66\x53\xcf\x0a\xca\x70\xba\x40\x92\xcf\x50\x9e\xdf\x52\xa8\x95\
\x90\xc4\xeb\xbc\xce\x0a\x1e\xb3\xd0\xe4\x3c\x81\x94\x41\x0d\x5c\
\xe4\x29\x04\x26\xfe\x85\x37\x1a\x9e\xc3\xaa\xd5\x4b\x70\xfd\x15\
\x2e\x5e\x5a\xd0\x3f\x92\x13\x72\xa0\x37\x2d\x4e\xe1\x08\xc0\xee\
\x79\xff\x89\x69\xfd\xaf\x46\xa9\x1b\x65\x1d\xc4\xfb\x56\xd3\xc8\
\x40\x56\xdd\x85\xe0\x2e\x06\xb5\xed\x19\x11\xee\xa2\x6e\x48\x33\
\x5e\x61\x36\xb3\xe0\xd0\x35\xd4\xba\xc6\xb1\xa8\xe5\x32\xac\x68\
\x50\x84\x87\xf6\x1f\x4c\x61\x78\x2c\x4b\x8a\x5b\x8f\x93\x75\xb6\
\x1f\x78\x32\xac\x9d\x37\x85\xde\xd9\xf7\x36\x35\x56\x71\x46\xcd\
\x15\xe3\xd7\x44\x22\x29\x51\x47\x29\xa5\x53\x29\x39\x95\x4e\xcb\
\xc7\xb3\x57\x05\xc7\xd8\xd5\x7f\xd4\xa1\xde\x2a\x59\x69\xb1\xd3\
\xe2\xa9\x53\xa7\x7e\x48\xb7\x64\xd8\xc7\xf8\x16\x29\x66\xda\x31\
\x63\xa7\xaa\x99\xd8\x50\xa9\xb0\xf9\x3c\x12\xca\x5c\x14\x16\x29\
\x7e\x06\x94\x89\x9b\xd9\xc9\xbf\x65\x06\x5f\xfc\x53\xa6\xff\xf9\
\x29\x9a\xc5\x6b\xb8\x61\x09\x0b\x88\xd7\x66\x21\xf5\x09\xa1\x2d\
\xe7\x97\x01\xf0\x7d\x6a\x34\x1a\x65\x9a\xa6\x49\x5c\x62\xb1\x98\
\x12\x8f\xc5\x65\x02\xa1\x92\xf2\x6a\x32\x91\x74\xa4\x52\x29\x07\
\x01\x70\x66\xd2\x69\x07\x6d\xce\x5d\x87\x3d\xf7\x3c\x9c\x65\x81\
\xef\x94\xa9\x39\xea\x63\x54\xea\x91\x64\x91\x42\xf9\x8a\x26\x81\
\x99\x8a\xe5\x6d\xc5\x2d\xcb\x2e\x7c\xdc\x3b\x3c\xe5\x12\xed\x24\
\xae\x93\xa9\xc3\x88\x1d\x7f\x3b\x73\x7c\xe7\x33\xb9\xd1\xce\x01\
\x9a\x45\xdc\xb4\x78\xf7\x99\xe5\x0d\x2c\x01\xc8\xf1\x2b\x49\xbe\
\x20\x7a\x11\x5c\x11\x14\x07\x23\xd2\x28\xf5\x34\x4c\x34\xea\xa2\
\x32\x8b\xc4\xcf\xef\x53\x63\x40\xbb\x73\xea\x06\x4a\xc4\xc5\x0f\
\x1c\xb8\x34\x4f\x3f\xfa\xc4\x40\xf9\xbd\x8e\x04\x42\xed\xc9\xc9\
\x24\x9a\xaa\x5d\x28\x27\x20\x3a\x79\x83\x77\xa7\x53\x9a\x41\x3a\
\xc6\x45\xc5\x66\x8a\x8b\x44\xb1\xc9\x46\x89\xc0\x4c\x8f\xf6\x65\
\x8e\x3e\xfd\xf7\xdc\xe8\x9b\xc7\x49\x69\xae\x5c\x05\x49\xda\x7e\
\xa6\x90\x4c\xa1\x4b\x60\xe7\xa0\xb9\x00\x21\x00\x78\xbc\x5e\xeb\
\xe4\xf0\x70\xd1\x4d\x66\x21\xf9\x13\x62\xb1\x78\xae\xb0\x60\x71\
\x71\x47\x51\xea\xc6\x1f\xb9\x7f\xa4\xea\x9e\x1f\xe7\x1c\xe1\x5f\
\x0f\x4f\xa6\xe1\x25\x78\x8b\xab\x9d\x18\xd3\x68\x09\x8a\x11\xce\
\x7c\xe6\xf0\xd9\xc5\x8e\xab\x9f\x1a\x79\x35\x3f\xb6\xb7\x33\x73\
\xf4\xa9\x1e\xcb\x34\x8b\x16\xfe\x2a\x29\xb5\xfc\x99\xd6\x2f\x36\
\x00\x67\x47\xf7\x8f\x3d\x7b\x40\x54\x41\x2c\x16\x67\xa9\x54\x92\
\xc7\x01\xa3\x18\x90\x88\x42\x8c\xee\x4b\x44\x21\x39\x9b\xcd\x4a\
\x5c\xf2\xb9\x9c\x74\xba\xee\xe1\x6b\x0d\x35\xf4\x1b\xfa\xea\xb7\
\x2c\xd3\xb0\xd7\xe6\x85\xcd\xe6\xfe\x10\xf4\xe4\x0b\xc6\xf8\xbe\
\x17\xb3\x87\x1e\x3b\xc5\xb9\x6d\x92\xd0\x7f\xbb\x2b\xb7\x2c\xc1\
\x77\x4b\x18\x6e\x86\xf7\x56\x21\x06\xac\x12\xc1\x99\xfc\x9f\xf3\
\xd9\xe8\x8f\x36\xde\x22\x4e\xd6\x72\x59\x71\x90\xcb\xb4\x95\xcf\
\xae\xa1\x25\xdb\xed\x43\x5c\x7e\xc2\x66\x75\xe9\xfb\x6e\xfa\xcc\
\x2c\x1c\x2b\x72\xe1\x5d\x2c\x3f\xbd\x13\x30\xec\x0e\x11\x47\xfb\
\xfb\x70\x71\x5c\x1c\x5f\xd3\xf8\x8f\x00\x03\x00\x01\xe5\x02\x7c\
\xec\x49\x2a\x6f\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x02\xed\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\x8f\x49\x44\x41\x54\x78\xda\xec\
\xd7\xcb\x6b\x13\x41\x1c\x07\xf0\xef\x6e\xd2\x36\x36\x49\xd3\x86\
\x54\x9b\xfa\xc8\x49\xb4\xda\xd2\x8a\x20\x78\x12\xf1\xe2\x49\x3d\
\x78\x50\x2f\x01\x9f\x90\x83\x54\xff\x01\x03\xe2\xc1\x4b\x5b\xaf\
\xa1\x60\xbc\x54\x7c\x40\x41\x5b\xf5\xa6\xc1\x43\xa9\xd2\x5a\xc4\
\x8b\xf4\xd2\x90\x68\x14\x14\xb6\x8f\x4d\xb3\x9b\x7d\x38\x33\xcd\
\x86\x35\x22\x4e\xdc\xdd\x5b\x06\x7e\xc9\xcc\x66\xd8\xdf\x67\x7f\
\x33\x9b\x64\x81\x56\xfb\x57\xbb\x6a\x8e\x93\x18\xf6\xea\xf4\x22\
\xc7\x9c\x51\x12\x6f\xbc\x42\xf0\x00\x70\x60\x87\xda\xed\x15\x82\
\x0b\x70\xf3\xd8\xba\x67\x08\x2e\x40\x24\x60\xe0\x49\xf2\x87\x27\
\x08\x91\x77\x62\x24\x60\xe2\x69\xf2\x27\x0e\xf6\x55\x5d\x45\xf0\
\x01\x04\x01\xa2\x28\xa2\x27\x28\x60\xfa\xa2\x44\x10\x9a\x6b\x08\
\x2e\x00\x4d\xee\xf3\xf9\x58\x50\xc4\xb3\x2b\x6b\x18\x8c\xbb\x83\
\xe0\x06\xd8\x11\xd1\x90\x88\x99\x6b\x32\x86\xe2\xba\x63\x04\x1f\
\x80\x2c\x81\x95\xdc\x8e\x98\x4d\x95\x31\xd4\xef\x0c\xc1\x05\x10\
\x6a\x15\xb0\x57\x81\x21\x82\x22\x5e\xa6\x2a\x8e\x10\x7c\x80\xda\
\x26\x6c\x04\x58\x7b\xe2\x45\x6a\xf3\xbf\x11\xdc\x00\x3b\xc2\x0a\
\xc3\x30\xa0\x28\x0a\xda\x0c\x19\x0f\x2f\x7c\xc3\xc0\x76\xa5\x69\
\x84\xd8\x8c\x56\xd7\x75\x68\x9a\xf6\x1b\xca\x8a\xc8\x36\x13\x53\
\xe7\x4b\xd8\xdf\x5b\x69\x0a\xc1\x05\x50\x2a\x15\xc8\xb2\x0c\x55\
\x55\x19\xc2\x0a\x5a\x01\xd3\x34\xeb\x11\xee\xd0\x91\x3d\x9b\xc7\
\xbe\xd8\x26\x37\x82\x0b\xa0\x56\xab\xec\xca\xad\x0a\x50\x08\x8d\
\x2a\x39\x6e\x41\xac\x08\xb5\x69\xc8\x9c\x5a\xc6\xde\x68\x99\x0b\
\xe1\xe7\x01\xd0\x13\xd3\x44\x34\x21\xbd\x52\xda\xa7\x8d\xf6\xa7\
\x16\xda\xf1\x68\x31\x8c\x82\xe4\x43\x71\xf5\x8f\xd3\x51\xc4\x04\
\x89\xe3\x8e\x00\xf6\xe4\xb7\x5e\x75\xe1\xe4\x80\x82\xa3\x09\x85\
\x8d\xe3\x41\x03\x73\xf9\x08\x9d\x96\xad\x45\x63\x5b\x71\x5c\x01\
\x5d\xdb\x02\xdc\x7f\xdf\x85\xc9\xf9\x10\x1e\x2f\x75\xe2\x6d\xaa\
\xc8\xd6\xfc\x70\x7f\x05\x47\x76\x85\xf1\xae\xd8\x39\x82\x8c\x90\
\xf3\xe4\x7b\x40\xd3\x35\x7c\x2a\xf9\x71\xe7\x75\x2f\x1d\x4e\xac\
\x29\xa2\x34\x96\x0b\xd7\xf7\xc2\xed\x13\x05\x7a\x7c\x84\xac\x77\
\xd2\x13\x80\x54\x36\x71\x7d\x66\x37\xd8\xa6\xca\x08\x37\xc8\x7b\
\xfa\xc1\x62\x0f\x3e\x7e\x15\x19\x20\x16\x90\x71\x6e\xf0\x3b\xc3\
\x11\x44\xc4\x75\xc0\xd8\xdc\x1e\x94\x36\x3a\x24\xd2\x3d\xc3\x0e\
\x64\x84\x7b\xe4\x75\xe9\x6e\xae\xaf\x7e\x37\x5c\x1a\x2e\x20\xd4\
\xce\x7e\x21\x47\x5d\x07\x90\xe4\x60\xc9\x33\xc2\xaa\xfd\xcf\xea\
\x42\x29\x8c\xe7\x9f\xa3\x6c\x40\xf7\xc3\xe5\x43\x5f\x68\x37\x4d\
\xaa\x90\x70\x13\x20\xb1\xd2\x36\x6e\xb0\xad\x71\x76\x7c\x3e\x81\
\x75\xc5\x87\xd9\xe5\x18\x26\x3f\xec\xb4\xe6\x77\xf3\x02\x04\x8e\
\xe7\x82\x04\x49\x96\xff\xcb\x67\x74\xbd\x57\x68\xe9\x37\x54\x3f\
\x6a\xf7\x7c\xba\xa1\x52\x9e\x3f\xb8\x9c\x26\x31\xdd\x4c\xd9\x5b\
\xad\xd5\xec\xed\x97\x00\x03\x00\x5d\x06\x45\x2c\x57\x01\x7f\xbf\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x07\xa3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x07\x45\x49\x44\x41\x54\x78\xda\xec\
\x59\x5d\x6c\x14\x55\x14\x3e\x85\x5a\x07\x44\x33\x88\xc2\x40\xd0\
\x0c\xa6\xd1\x8d\xda\x64\x14\x1e\x56\x43\xcc\x34\xa8\x2c\x0f\xd2\
\xc5\x98\xb0\x10\x13\xb6\x09\x44\x7c\xea\xf6\x45\x89\x3e\x94\xbe\
\x68\x48\x4c\x5a\x9e\x84\xf8\xd0\xf2\x22\xf8\xd4\xad\xc4\x50\x4c\
\x70\xa7\x31\xc2\x3e\x88\x1d\xc2\x8f\x4b\x03\x74\x08\x0d\x2c\x84\
\x86\xd1\x6a\x5d\x7e\xca\x7a\xce\x9d\x3b\x65\x76\xf7\xce\xcc\xee\
\xc2\x83\x26\x7b\x92\x9b\x99\xdd\x39\xfb\xdd\x73\xee\xbd\xe7\x9c\
\x6f\xce\x02\x34\xa4\x21\x0d\x69\xc8\xff\x41\x64\x1c\x29\x1c\x19\
\x1c\x63\x7c\x0c\xe1\x48\xfa\xe8\xab\x38\x06\xb8\xfe\x2d\x7e\x1d\
\xe0\xdf\x8b\x24\xc9\xf1\x08\x77\x82\xeb\xa7\xf8\xbc\x81\xd2\x54\
\x85\xf1\x1a\x81\xab\x6d\xba\xaa\xad\xdb\x06\xca\x2a\x0d\x0a\x7f\
\xdb\x60\xdf\xb0\xc0\xf8\xb6\x97\xae\x26\x3e\x6f\xc7\x61\x73\xfd\
\xdd\xd2\x13\x72\x8f\xb6\x2e\x09\x91\x68\x07\xe0\x3d\xd3\xcf\x65\
\x87\xc1\x3c\x36\x48\xf7\xbd\xa4\xe3\x59\x98\x8c\xbc\x54\xd5\xf4\
\xad\x3d\x80\x57\xf6\x25\x61\x9b\xc7\x0e\x80\x75\xda\xb0\xf0\xe3\
\x26\x1c\x66\xbd\x0e\x10\xe2\x58\x6c\x47\x9f\x1c\xdd\x98\x12\x2a\
\xa4\xfb\x3b\xc9\x30\xd7\x89\x14\x19\x9f\xfc\x22\x03\xca\x0b\x5a\
\x85\x6e\xfe\x92\x09\x83\x9f\xb5\xbb\x4e\xf4\x93\xf1\xe8\xa8\x16\
\x4f\x0d\x08\xb1\xb3\xdf\xf7\xc3\xc8\x37\xdd\xb4\x30\xaf\xe1\xb0\
\x44\x3a\xf3\x43\x1c\x18\x42\xc3\x23\xfa\xd6\xdd\xbe\x0a\x91\x68\
\x1c\xac\x33\xa3\x0a\xae\x9a\x42\x0e\x6c\xff\xea\x84\xd0\x78\x92\
\x45\x8b\x15\x68\x7d\x3d\x06\xbf\x8e\xec\xd7\xf1\xa3\x82\xbb\x1a\
\x4b\x7c\x3e\xe4\x8b\xbd\xf2\xa5\x28\x3a\xfb\x87\x34\x79\x3e\x4b\
\x80\x07\x44\x3a\xf3\x42\x56\x5f\xa7\xad\x0d\x92\xc2\x5d\x74\x62\
\x53\x1f\x3b\xc7\x74\x6c\xfc\x8c\x9f\x93\x67\x34\x50\xdf\x64\xa1\
\x93\x8c\x6d\xef\x0b\x3d\xbf\xea\x7a\x36\xbf\xee\x17\x3f\xcd\x41\
\xbf\xc5\x15\x62\x67\xb8\x5c\xec\x19\xdc\x5e\x0c\xb5\x5c\xde\xb9\
\x67\x61\xf2\xa4\xca\xce\xbc\x48\xac\x29\xd4\xbf\xe4\x5c\xc9\x61\
\x58\xd4\x81\xe7\xdd\x10\x3a\x4b\xcf\xcd\x2b\xce\xc8\xff\xc9\xc3\
\x64\x05\xda\x7f\xd5\x50\x45\xc7\xa8\x39\x64\x07\x2a\xc0\x8d\x71\
\xc7\x98\x0a\x41\x07\xca\x9d\x25\x83\x8d\xf3\xce\xb5\x44\x1e\x97\
\x41\x5e\x56\xb9\xa0\x2e\x36\x73\xd2\x2b\x2d\x32\xd4\xb3\x03\x96\
\xd7\x70\x5a\x71\x21\xb8\x2b\x37\x4d\x96\x3d\x02\x0d\x77\x65\xda\
\x62\x01\xed\x62\xd3\x4e\x92\xf1\xce\x6e\x0a\xe4\x8e\x0d\x7e\x41\
\xdc\x14\x92\xfb\x6f\x29\x3b\x27\x20\x7f\x5f\x0d\x3e\xa8\xd7\x0c\
\x80\xc3\xed\x20\xbf\x1c\x07\x78\x77\xc8\xdf\x10\x57\x7e\xc4\xcc\
\x68\xa5\x41\x4d\x66\xc0\x6a\xd1\x83\x75\xd1\x59\x38\xb8\x8a\xee\
\x16\x7b\x52\x75\x55\x41\x4c\xca\x83\xf9\x91\xee\xe0\x09\x68\x75\
\x8e\x33\x9d\x7e\xfb\x5c\xda\xb6\x2f\x1a\xe1\xce\x5a\x69\xc2\xee\
\xb7\x0e\x77\xbb\xab\xeb\x2f\x27\x18\xf6\xa0\xc8\xf8\x6a\xd2\xe8\
\x28\xd8\xb9\x18\x4c\x5f\x56\x58\x20\xcd\x97\x2a\x8d\xc7\x95\x87\
\x29\x93\xac\xde\x82\xe3\x3a\x58\xc3\x71\x90\x23\xc0\x46\xc5\xa1\
\x4c\x03\x1c\x43\xb5\xd9\xc2\xc7\xf8\x69\x0f\xfc\x93\xd7\xe1\xca\
\x51\x15\x5a\x13\x62\xec\x9f\x51\xed\xe2\x21\x93\x63\x17\x1e\x86\
\x46\x64\x30\x48\x8b\xd0\x96\x2a\xc2\x7b\x99\x22\x1e\x13\xe7\xbe\
\x45\x2e\x72\x8a\xe0\x8d\xde\x38\xa3\x0f\x2b\xf4\x22\xbc\xd1\xe7\
\xe8\xd3\x95\x3e\x3b\xb4\x22\x5e\x86\x3d\xc0\x70\x08\x8f\x70\x49\
\x9f\xee\x69\x3e\x87\x52\xc8\xf0\x08\x84\x19\x85\x59\xa6\x88\xa9\
\x95\x0d\xba\xe7\xbc\x45\x17\x38\x4c\x4e\x15\x31\x4d\x32\x5d\xba\
\xd2\x67\x81\xb3\x6e\x8e\x9f\x10\x60\x97\x3b\x5b\x37\x17\x1a\x40\
\xc0\x24\xd2\x09\xa0\x42\x35\x97\x99\x90\xdf\x10\xb7\x21\x3e\x84\
\xf7\x9d\xfc\x9c\xb2\xdd\xc2\xea\xac\x91\xbe\xcb\x6d\x5c\x7e\x83\
\xb4\x00\x39\x51\xda\xcb\x9d\x92\x88\x3d\x40\xc5\x92\xb0\xbd\x69\
\x98\xb0\x49\x1f\xb1\x09\xb7\xb3\x5e\x07\xe2\x08\x3a\xe4\xc7\x6d\
\x5c\x7e\xb3\xaf\x8b\xa8\x0a\xe3\x2b\x7d\x68\x88\xee\xc7\x6d\x3c\
\xdc\x89\x62\x86\xa2\x73\x6c\xe7\xde\xb1\x40\x6c\xce\x9d\x88\xd0\
\xa5\xeb\x09\xe2\x23\x1f\xf6\x1e\x91\x57\x46\xa2\xbe\x0a\xc4\x6f\
\xa0\xa9\x89\x98\x63\x02\x57\x3c\x92\xfc\x32\x13\x08\x48\xdc\x09\
\x99\xa6\x8a\x46\x25\x90\x63\x49\xaf\xbe\x95\x08\xc4\x26\x3e\x84\
\xfa\x64\xc0\xde\x5a\xb9\x90\x8e\x06\x31\x3a\xe1\x5b\xe9\xa6\x9c\
\x02\x94\x7f\xbe\x8b\x9d\xfd\x68\x47\x97\x7f\x4e\x9e\x71\xe8\x01\
\xe9\x43\x1b\xd7\xdf\xe8\xaf\x4f\x34\x82\x74\xcd\x59\x9d\xaa\xbc\
\x2a\x88\xb5\x70\x2a\x21\x2a\xf7\xe2\xaa\x8c\x67\x77\x89\xc6\xde\
\x15\xfc\x78\x10\x55\xdb\x39\x59\xa8\xb1\x63\x23\xe2\x59\xae\x93\
\x25\xc5\x10\xb1\xb1\xa0\xc9\xb5\x3a\x50\x51\x38\xc8\x10\x02\xf7\
\xa5\x13\x65\x2b\xce\x56\xf0\x4a\x75\x69\x4e\x68\x78\x29\x95\xa8\
\xd9\x01\xcb\xbe\x6e\xb1\x15\xa4\x41\x13\x04\x72\x95\x29\x13\x72\
\x67\x4d\x28\x3c\xab\xb3\xd5\x0e\x34\x1c\x79\x13\x05\xa8\x75\xd5\
\x86\x7c\x41\x66\x3b\x1a\x48\x3f\xa6\x2d\xa8\xe7\x85\x26\x8f\x81\
\x96\x34\x67\x34\xd9\x9a\x8d\x04\xaf\xba\xb9\x87\x28\x82\x3d\x69\
\x9d\x97\xce\x3c\x9d\xe2\x34\x38\x40\x7e\xda\x42\x4e\xdb\xe6\xb5\
\x05\xd2\x85\x66\x3d\x18\x9b\xaa\xf7\xef\xfb\x2d\x9e\xb5\x6a\x0a\
\x62\x60\x3f\x32\x3a\xd9\xea\xfa\x0a\x3d\x3b\xc9\xde\xd8\xda\x71\
\xa5\x0c\xa6\x1f\x24\xf4\x9c\xf4\x48\x9f\x7e\x17\x86\xed\xe0\xf9\
\x12\xb2\xb0\x34\x9a\x43\xde\xa2\xc2\xc5\xef\x34\x58\xa8\x38\xc1\
\xe4\x3d\x36\xe7\xf6\x39\x13\xcc\x16\x68\x96\x11\x1c\xc3\x38\x69\
\x0c\xa6\x4e\x29\xb0\x34\xca\x78\x7f\xc9\x31\x18\xed\x74\xb9\xcd\
\x06\x7e\x24\x2e\x23\x76\x9c\xf1\xa0\xc5\x91\x52\x3e\x34\x3e\xe8\
\xf0\xa6\x3b\xac\x90\xed\x79\x24\x54\x82\x71\x16\xe2\x34\x0e\xaf\
\x09\xa5\x12\xe8\xb0\xa3\xbb\x24\x9c\x4a\xb0\xe7\x2e\x76\x4b\xf5\
\x54\x62\x7e\x15\xc6\xd3\x84\x7d\x54\xa4\x34\x3d\x01\x6f\xbf\xff\
\x11\x44\x5e\xd1\xb0\xc8\x2c\x87\x9b\x93\x39\xf9\xde\xdd\x82\xc3\
\x5a\x1f\xb0\xc5\x18\x8e\x5d\x58\x3f\xa4\xe8\x3b\x9b\x61\xed\xfa\
\x04\x28\xcb\x15\xb8\x77\xf7\x36\xd1\x09\xca\xcb\xa7\xd8\xce\x3e\
\xc0\xfe\x14\xd3\xa9\xbe\x66\xc3\x4e\xd4\xdd\x0c\x6b\xd6\xc6\x40\
\x92\x16\xc0\x5f\xb7\xf2\x12\xc6\xa0\xc2\x76\x35\x80\x89\x36\x55\
\x61\x3c\x6b\x7d\x10\xb7\x29\xcf\xdb\xc4\x87\xa8\xd4\x63\x46\x31\
\x38\xbf\x61\xdc\x86\xa8\x04\x55\xdc\x8a\xf3\x98\x4d\x33\x2a\xe1\
\xe1\x4e\x19\xac\x07\x3a\x51\x15\x11\x36\x71\x21\x4f\xcb\xa6\xae\
\xf7\x81\xaf\xd1\x90\xd8\x07\x9f\x1c\x84\xe6\x16\xa9\x32\x07\xe3\
\x77\x44\x05\x2e\xfc\x76\x54\xc5\x15\x23\x0b\x76\x21\xf5\x90\x5a\
\x57\xc7\xc4\x0d\x89\x95\x11\x97\x1a\xb0\xb6\x0a\x1a\x9f\x10\x19\
\xef\x62\xd3\x22\xe4\x27\x4e\x29\xb8\xd3\xee\x4e\xd4\x94\x85\x64\
\xd6\xfa\xd8\x11\xdc\xfa\xa0\xc9\x79\x7b\x24\x85\x13\xca\x41\xd4\
\x83\x95\x77\x7c\x4e\x7a\xa4\x4f\xbf\x13\x19\xef\x15\x3e\x7f\xb2\
\x9e\x42\xa6\xd1\x64\x5e\x4a\x5c\x5e\x39\xa9\x60\x39\xad\x12\x9d\
\x75\x0e\xfc\xda\x2a\x8c\x07\x4d\xa2\xfe\x35\xde\x2a\x79\x6a\x1b\
\x1a\x6e\x80\x9f\xb3\x84\xeb\x62\xdb\x33\xaa\xdb\x56\xa1\x14\x68\
\xd4\xc4\x85\xaa\xe6\x2a\xbc\x61\x55\xee\xac\x2f\x9d\xc0\xf4\x2a\
\xa2\xd0\x64\xf4\xc8\x59\xdf\xaa\x5c\xb3\x03\x16\x05\x52\x55\x5c\
\x85\xe7\x79\x57\x3f\xb4\x0d\x73\xdb\x06\xa2\x29\xa1\xfd\xa3\xb2\
\xb5\xab\x99\x0b\x21\xc7\x87\x43\xbf\x38\x7c\x25\x90\xab\x50\xc5\
\x44\x07\xb2\x99\x61\xc8\xde\x8f\x87\x19\x82\xc8\xc3\xec\x0d\xed\
\xd0\x08\x72\xa2\xc7\xb4\x60\x6c\x2a\x98\x57\x0d\x5f\x2e\x34\x2f\
\xa4\xb1\x65\xe4\x7e\xe8\x0d\xef\xf3\xf0\xd6\x87\x75\x7c\x10\xac\
\x71\x33\x58\x97\x9c\xa5\x2a\x8b\xfa\xb9\xa1\xee\x70\xec\x93\xbd\
\xc0\x8f\x4e\x5d\xdd\xe9\x51\xb8\x91\x4d\x62\x80\x4a\xb0\x2c\xea\
\xcf\x6d\xac\xb4\xdb\xfa\xb8\x8d\xd4\x40\x87\xe7\x30\x8d\x12\xf5\
\x10\x19\x4f\x6d\x98\xd9\x02\x59\xd5\x8b\xbb\xe6\xb4\x6c\x54\x9f\
\x82\x7b\xba\x9f\x1c\xb0\x39\xf5\xb0\x1f\xea\x0f\x0e\xcc\x04\x2a\
\xbc\xb8\x8d\xf5\x40\xe7\xb8\x0d\xad\xce\x74\xe5\x1f\x1c\x38\x7a\
\xa0\x2d\x85\x69\xa0\xa3\xe4\xd8\x30\x83\xc8\xf0\xb2\x3f\x38\x10\
\x53\x83\xd5\x3d\xa5\xd8\xe3\x07\xe8\xe8\x3c\xf4\x1f\x1c\x25\x35\
\x01\x47\x07\x6f\xb2\x5a\xdc\xe0\x61\x5e\x51\x45\x8d\xe1\x2e\xee\
\xbc\xc6\x0d\x30\xf9\x7b\xad\xe5\xf3\x17\x53\x87\xa7\x81\xeb\xc5\
\xb6\xa1\x21\x0d\x69\x48\x43\xfe\xb3\xf2\xaf\x00\x03\x00\x53\x60\
\x89\xda\x1d\xfd\x02\x88\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x06\x5d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x05\xff\x49\x44\x41\x54\x78\xda\xec\
\x59\x5f\x4c\x1b\x75\x1c\xff\xba\xca\xc0\x9a\xc9\x6f\xeb\xc4\x6e\
\x49\xe3\x6d\x26\x88\x51\xc2\x4d\x7d\x18\x46\xe3\x35\x24\x0b\x0f\
\x23\xbb\xbd\x2c\x4c\x1f\x38\x5e\x66\x78\x6a\x0b\x3e\xa8\x31\x01\
\x1e\x4d\x84\x96\x27\x25\x31\x01\x1e\x8c\x93\xc4\xac\x84\x3d\x48\
\xe2\x2c\x18\x63\xd5\x48\x2c\x21\x73\xa4\x2f\x1c\x2b\x02\x5b\xa8\
\x1c\x74\xc2\x64\x12\xfc\x7e\xaf\xd7\xa6\xd0\xbb\xeb\x41\x0f\xc7\
\x43\xbf\xc9\x2f\x2d\xdc\xef\xbe\xff\x7e\xdf\x3f\x9f\xef\xaf\x00\
\x25\x2a\x51\x89\x4a\xf4\x38\xe9\x89\x03\xe2\xcb\xe1\x92\x70\x55\
\xe2\x62\xb8\xe6\x70\x0d\xe2\x92\x0f\xbb\x01\xa4\x6c\x27\x3b\xe6\
\xf4\x8b\x42\x1d\xd4\x55\x7b\x60\x35\xb5\x01\xf2\xe2\x32\x84\xc7\
\xa7\x40\x49\xad\x87\xf0\x79\x37\x2e\xe5\xb0\x1a\xf0\x3b\x5f\xed\
\xe1\x23\xfd\xed\x80\x46\xec\x78\x80\xca\x83\xf7\xbd\x5e\x88\xc5\
\x13\x31\xfc\xf3\x9c\x5d\x02\x1d\x36\x2a\xdf\x85\xca\x37\xeb\x29\
\x4f\x54\x51\x5e\x06\xcd\x17\x5e\x87\xb1\xe8\x1f\xee\xa5\xe4\x1a\
\x39\x6e\xdc\x0e\xa1\x47\x6c\x34\xc0\x17\xec\xb8\xa2\xab\x7c\x36\
\xbe\xf0\x19\xed\xa1\xbd\x76\x09\xdd\xaf\x01\x82\x96\xa8\xd9\xbf\
\xb9\xd3\x2e\x26\xbc\x56\xad\xbf\x3b\xb5\x88\xe9\x8b\x0e\xbf\xf3\
\x0d\xbe\x38\x0e\x9c\xeb\x28\xd3\x78\xe4\x26\xbd\x70\xd0\x06\x90\
\x80\x08\xae\x6d\x0c\x95\x08\x7a\x73\x16\xbf\xaf\xe0\x1a\x20\xe7\
\x72\xa7\x5c\xfa\x8a\xc7\x6f\xe2\x1a\x05\x48\xc6\x01\xd6\x93\x69\
\x6d\x5d\xe5\x99\x84\xa7\x77\x57\x88\x17\xf1\x24\xde\x9a\x0c\xcb\
\xc6\x3c\x69\x71\x5f\x90\x2a\x0b\x1d\xbf\xf8\x76\x5d\x36\x4c\xe4\
\xc5\x24\x0b\xf4\x0c\x4b\xe1\xf1\x98\x94\xa7\xf8\xe2\x24\x7e\x2e\
\xe8\x32\x53\x36\xb6\xe8\xe3\x86\x28\xf0\x6a\x48\x65\x8c\xa7\x44\
\x0f\x4f\x4c\x09\xc8\x53\xd0\x2a\x56\xc0\x8e\x2a\xe4\x47\x85\x83\
\x94\x9c\xe8\x25\xdd\x0d\x83\x37\xa3\xd0\xda\x35\x08\xb3\x5f\x5c\
\x05\xae\x3c\x99\xf5\xb4\x1e\xc9\xc9\x4d\x38\xf3\xd1\x34\x0c\x74\
\x49\x20\x5d\xac\xd7\xdd\x83\x95\x4a\xad\x58\x68\x04\x19\x10\x2a\
\xc6\x00\x3a\xe6\xd9\x48\x7f\x87\x71\x7c\x6b\x74\xf9\xfd\xcf\x80\
\x73\x2c\x40\xf0\x8a\xc7\x74\x5f\x60\x38\x01\xf2\xd6\x69\xb8\xf1\
\x69\x9b\xe9\xbe\xf1\xc9\x38\x1a\xd1\x43\xfd\xe2\x8c\x59\xdf\x28\
\x94\x03\x02\x7a\xdd\x5c\xf9\xcd\x14\xb2\x97\xa1\xf3\x12\x07\xa1\
\x5b\xf7\xd5\x65\x44\x99\xe7\x9d\xd7\x2e\x9a\x4b\xc5\x10\x14\x30\
\xad\x79\x8e\xb1\x42\xf9\x50\x28\x07\x78\x8a\x53\x5d\xa2\xa4\xbc\
\x3f\x9d\x0d\x17\xde\x99\xf5\xb0\x3c\x12\x53\x38\x5f\x43\x15\x08\
\xd5\xc7\xd2\xde\x8c\xa7\xa0\x0f\x15\xc7\x4f\x82\x12\x9c\x6e\x28\
\x92\x23\x16\x26\x55\x67\xc0\xd6\xa6\xfa\x2f\xb1\xd6\x09\x31\x59\
\x21\x05\xc2\xc5\x26\x71\x4e\x06\xa2\x80\x44\x34\x2d\x50\x9f\xce\
\xa1\xa2\x12\xae\x16\xb2\x2b\x13\x11\xb8\x46\x34\x3c\xb4\xb2\x63\
\x37\x29\x4b\x8a\x93\x33\xf6\x41\x85\x0c\x90\x29\xa1\x54\x21\xa4\
\x38\x79\xdd\xa0\xb2\x50\x72\x6a\xb1\xaa\x68\x89\x17\x32\x74\x01\
\x56\x2f\x8e\x60\xde\x2a\xf2\xbc\x37\x9d\xf5\x78\x5e\x32\x27\xd6\
\xa1\x10\x00\x2c\x04\x25\x94\x19\x79\xc9\x2f\x9d\x9d\x07\xb6\x99\
\x30\xf3\x3a\xf4\x7d\xaf\x86\xc8\x75\xcd\xd3\x66\x54\x73\xfc\xc1\
\x1d\x5e\x38\x91\x48\x97\xdb\xed\x2d\x43\x87\xb4\x7d\x79\x17\xb4\
\x52\xba\xef\x24\x26\xeb\x43\xad\x83\xe6\x28\x98\x3c\xd5\x35\xaa\
\x9e\x4c\xb7\x85\x53\xef\xa6\xbd\x9a\x77\x0d\x49\x93\x19\x2a\xf6\
\x04\x88\x7e\x41\x6f\x34\x8e\x4c\xad\xba\x1b\x5f\xae\x04\xe6\x74\
\xe4\x55\x96\xd6\x21\x19\x1e\x3e\xda\x6e\xb5\x08\xd0\xc8\x9b\x73\
\x5f\xff\xb6\x22\x56\x94\x1d\x81\xf3\x67\x9f\xce\xf3\xbc\xb7\x37\
\x0e\x3f\xcf\xfe\x4d\xa8\x95\x78\x3e\xb4\x0b\xe7\x07\xa9\xd5\x63\
\x65\xd9\xf6\x37\x54\x6d\x4b\xf5\xae\x6d\xc4\x34\xd4\xfa\x67\xf7\
\x89\x63\xe8\x9d\x59\xe2\x41\xbc\x88\x27\xf1\xd6\xe0\x44\x50\x93\
\x69\x1b\x94\x50\x2b\x25\x41\x08\xee\xc5\x57\x80\x9d\x3a\xa9\x72\
\x67\x49\x74\x52\x32\xc1\xb4\x6a\xb3\x57\x78\x4c\xef\x30\xe6\x7a\
\x0e\xb8\xda\x74\xb1\xe2\x9c\xcb\xc0\x16\xd5\xc1\x87\xb7\x73\xa0\
\x21\x05\x23\x52\x53\x3d\x1f\x6c\xcf\x87\xcb\xd4\x31\xa9\x0b\x5b\
\xc5\x2e\xb9\xd8\x8a\xba\xf1\xee\x26\x49\x78\x28\xd0\x3b\x0c\x83\
\xa3\x51\x0a\x21\x6f\xa1\xe9\xcd\x8a\x01\x04\xba\x44\xb3\xd6\x9f\
\x83\x5d\xbc\x16\x4e\x42\x40\xe5\x23\x66\xd8\x2a\x03\x4d\x10\x24\
\x52\x03\xbb\x5c\x0c\x9c\xe6\x50\x98\x38\xd0\xd9\x62\x1e\x0b\xa8\
\x88\xff\x6a\x83\xd5\x41\xc5\x47\x7b\xcd\x94\x27\x22\x99\x24\x7b\
\xd7\xdc\xb1\x67\x03\x44\x3a\x62\xc3\x29\x8b\x9a\x9b\x36\xa8\xb4\
\x70\x6a\xb5\x13\x2d\x18\x20\xb6\x34\xd5\x1b\xe3\x2a\xea\xc8\x38\
\x43\x30\x79\x04\x84\x17\x8e\x16\xe4\x59\x28\x89\x99\xae\xa7\xa8\
\x23\x53\xfb\xcf\x69\x6c\x58\x4d\x72\x73\x86\xe6\x83\x4b\x39\xd5\
\x89\xe2\x79\x48\x83\x12\x90\x37\xfc\x64\x70\x10\xf1\xcd\x3d\x59\
\x8f\x13\xc2\x31\x85\x15\x55\x85\x28\xa9\x76\x0c\x2a\xf3\x3f\x99\
\xe2\x7d\xba\x99\xc0\x53\xe3\x7c\x18\x26\x99\x04\xc5\x44\xe7\xfb\
\xbe\xba\xc5\xe3\xa7\x2f\x0f\x07\x91\xc7\x0d\xe0\x84\xb2\xfe\x6f\
\xd1\xb7\x12\x6c\xe9\xaf\xb5\x66\xff\x9b\x18\x42\x77\x7f\x4c\x0b\
\x7b\xb4\xa1\xbb\x11\x3d\x05\xd8\x9c\x68\xc2\x62\x9f\x7f\xf8\x2e\
\xd4\x70\x6e\xf5\x26\x82\x16\x7d\x97\x9a\xde\xa0\x50\x64\x63\xd1\
\xdb\xc0\x57\x6d\x41\x4d\x19\xc2\x84\x04\x3a\x63\x6d\xde\x10\x4e\
\x10\x94\xc0\xe9\xad\x0f\xbf\xce\xec\xd7\x80\x19\x25\xb5\x21\xf1\
\x27\x52\xac\xe6\x59\xf3\x74\x21\x61\xe2\x85\xb7\xa0\xeb\x5a\x93\
\xe1\x9e\xf3\xb5\x67\xd5\x8b\xae\x91\xef\xa2\x20\xbd\xea\x30\x54\
\x3c\xe3\x90\xfe\x1f\x96\x29\xb1\xda\x8a\x1d\xea\x03\x04\x15\xcc\
\xb0\x0b\xe1\x16\xc2\xfc\xbe\x77\x1a\xcc\x39\x61\x98\xf8\x9a\x5e\
\x52\xf7\x9a\xe1\x2b\x92\x45\x32\xad\xf4\x15\x2b\x58\x68\x06\x71\
\xce\x1c\x7a\x43\x5c\xc5\x61\xdc\xfd\x4c\x19\xb8\x2b\xcb\xb2\x5e\
\x22\xcf\x87\xa7\x14\x05\x93\xbd\xe2\x03\xa9\x51\x57\x69\xb8\x87\
\x39\x2c\x4f\x00\xfc\xf9\x2b\xa2\xda\x79\x40\x5c\x05\xdf\xde\x5e\
\x53\x26\xe2\x0f\x2a\x08\x5b\xd5\xb8\x2b\xb2\x8a\x7f\x32\xb6\x84\
\xca\xcf\x65\xb0\xd5\x75\x3b\xaf\x16\xa9\x1e\x77\x6a\x65\x8d\xed\
\xaa\x2e\x31\x4c\x58\x6c\x4e\x1d\x96\x12\xd4\xdb\x13\xa7\x53\xf0\
\x6a\x70\x22\x77\xf0\x51\xb4\xe9\xab\xdb\xea\x45\xf0\x5e\xb0\x90\
\xac\xa1\xc3\x56\xbd\xee\x8a\x43\x8a\xa5\xca\x92\x46\x9c\xff\x64\
\xc1\x6c\xa1\x5b\x87\xff\xf3\x72\x77\x25\xf2\x71\x3d\x13\x3c\x9b\
\xe6\xb7\x0d\x18\xff\x78\x02\xe4\xe9\xe3\x87\xed\x6e\xb4\x2f\x30\
\x84\x48\x72\xdd\xb8\xb2\xd0\xb3\xc0\xf0\xbc\xba\xd7\x2e\xa1\xf6\
\x5f\xaf\x7b\x9c\x7c\xa4\xbd\x3a\x6f\xf0\x21\xe5\x69\x50\xc1\x44\
\xb5\xf5\x7a\xfd\x60\x7e\xe0\x70\x3a\xfc\x62\x1d\x03\xee\x64\x39\
\x54\x3e\xe5\x80\x29\xac\x2e\x58\xa9\xc8\x88\x43\xff\x03\x47\x6e\
\xc5\x22\x3c\xf4\xbc\xa6\xec\x2a\x1c\xd0\x4f\x4c\x25\x2a\x51\x89\
\x4a\xf4\x78\xe9\x3f\x01\x06\x00\xc4\xe2\xad\xb3\xbb\x01\x63\x01\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x04\
\x00\x07\x68\x94\
\x00\x70\
\x00\x61\x00\x72\x00\x74\
\x00\x09\
\x0a\xfb\xcb\x33\
\x00\x70\
\x00\x61\x00\x74\x00\x68\x00\x74\x00\x6f\x00\x6f\x00\x6c\x00\x73\
\x00\x0a\
\x05\x77\xdc\x61\
\x00\x64\
\x00\x6c\x00\x67\x00\x5f\x00\x61\x00\x64\x00\x64\x00\x73\x00\x65\x00\x71\
\x00\x0a\
\x06\x69\x2c\x5c\
\x00\x64\
\x00\x6c\x00\x67\x00\x5f\x00\x70\x00\x65\x00\x6e\x00\x63\x00\x69\x00\x6c\
\x00\x09\
\x0d\x66\xaa\x14\
\x00\x64\
\x00\x6c\x00\x67\x00\x5f\x00\x70\x00\x61\x00\x69\x00\x6e\x00\x74\
\x00\x0a\
\x06\x99\x4e\x04\
\x00\x64\
\x00\x6c\x00\x67\x00\x5f\x00\x73\x00\x65\x00\x6c\x00\x65\x00\x63\x00\x74\
\x00\x0a\
\x06\xa5\xdd\x25\
\x00\x64\
\x00\x6c\x00\x67\x00\x5f\x00\x73\x00\x71\x00\x75\x00\x61\x00\x72\x00\x65\
\x00\x0d\
\x02\x95\x5d\x52\
\x00\x64\
\x00\x6c\x00\x67\x00\x5f\x00\x68\x00\x6f\x00\x6e\x00\x65\x00\x79\x00\x63\x00\x6f\x00\x6d\x00\x62\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x07\
\x00\x00\x00\x0e\x00\x02\x00\x00\x00\x04\x00\x00\x00\x03\
\x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x40\x00\x00\x00\x00\x00\x01\x00\x00\x0b\x1b\
\x00\x00\x00\x72\x00\x00\x00\x00\x00\x01\x00\x00\x1c\xac\
\x00\x00\x00\x5a\x00\x00\x00\x00\x00\x01\x00\x00\x10\xf7\
\x00\x00\x00\xa6\x00\x00\x00\x00\x00\x01\x00\x00\x27\x44\
\x00\x00\x00\x8c\x00\x00\x00\x00\x00\x01\x00\x00\x1f\x9d\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 61.707257
| 97
| 0.726439
|
07e863b9065e0a2e23c6a7b3ef08a7441ba51432
| 3,265
|
py
|
Python
|
tsyplov_stats/sarima_model.py
|
tsyploff/tsyplov_stats
|
29126d494bd846a9d40357c59c5c1751d006bc65
|
[
"MIT"
] | null | null | null |
tsyplov_stats/sarima_model.py
|
tsyploff/tsyplov_stats
|
29126d494bd846a9d40357c59c5c1751d006bc65
|
[
"MIT"
] | null | null | null |
tsyplov_stats/sarima_model.py
|
tsyploff/tsyplov_stats
|
29126d494bd846a9d40357c59c5c1751d006bc65
|
[
"MIT"
] | null | null | null |
import numpy as np
from tsyplov_stats.wolfram_functions import *
from tsyplov_stats.arima_model import diff, accumulate
from tsyplov_stats.sarma_model import SARMA
from tsyplov_stats.autoregression_model import AutoRegression
def seasonal_diff(ts, D, s):
start, end, dts = list(), list(), ts.copy()
for _ in range(D):
start.append(dts[:s])
end.append(dts[-s:])
dts = dts[s:] - dts[:-s]
return dts, start[::-1], end[::-1]
def seasonal_cumsum(dts, ts0, s):
'''
>>> len(start)
s
'''
k = s - len(dts) % s
ts = np.hstack((ts0, dts, np.zeros(k)))
ts = np.cumsum(partition(ts, s, s), axis=0).flatten()
return ts[:-k]
def seasonal_accumulate(dts, start, s):
integrate = lambda dy, y0: seasonal_cumsum(dy, y0, s)
return fold(integrate, dts, start)
class SARIMA(AutoRegression):
def __init__(self, order=(1, 1, 1), seasonal_order=(1, 1, 1, 12)):
'''
order = (p, d, q)
seasonal_order = (P, D, Q, seasonal lag)
'''
self.p, self.d, self.q = order
self.P, self.D, self.Q, self.s = seasonal_order
self.true_values = np.zeros(2)
self.fitted_values = np.zeros(2)
self.residuals = np.zeros(2)
self.coef = np.zeros(self.p + self.q + self.P + self.Q + 1)
self.reg = SARMA(order=(self.p, self.q), seasonal_order=(self.P, self.Q, self.s))
self.series = np.zeros(2)
self.dts = np.zeros(2)
self.start = np.zeros(2)
self.end = np.zeros(2)
self.seasonal_dts = np.zeros(2)
self.seasonal_start = np.zeros(2)
self.seasonal_end = np.zeros(2)
def fit(self, ts):
self.reset_to_default() #model clearing
self.series = ts.copy()
self.dts, self.start, self.end = diff(ts, self.d)
self.seasonal_dts, self.seasonal_start, self.seasonal_end = seasonal_diff(self.dts, self.D, self.s)
self.reg.fit(self.seasonal_dts)
self.coef = self.reg.coef
self.fitted_values = accumulate(seasonal_accumulate(self.reg.fitted_values, self.seasonal_start, self.s), self.start)
self.true_values = ts[-len(self.fitted_values):]
self.residuals = self.true_values - self.fitted_values
return self
def predict(self, h=1):
return accumulate(seasonal_accumulate(self.reg.predict(h), self.seasonal_end, self.s), self.end)[-h:]
def reset_to_default(self):
self.true_values = np.zeros(2)
self.fitted_values = np.zeros(2)
self.residuals = np.zeros(2)
self.coef = np.zeros(self.p + self.q + self.P + self.Q + 1)
self.reg = SARMA(order=(self.p, self.q), seasonal_order=(self.P, self.Q, self.s))
self.series = np.zeros(2)
self.dts = np.zeros(2)
self.start = np.zeros(2)
self.end = np.zeros(2)
self.seasonal_dts = np.zeros(2)
self.seasonal_start = np.zeros(2)
self.seasonal_end = np.zeros(2)
return SARIMA(order=(self.p, self.d, self.q), seasonal_order=(self.P, self.D, self.Q, self.s))
| 33.316327
| 126
| 0.57366
|
9bcf2e128e35fbb57acf8e783f35818fe74e9623
| 617
|
py
|
Python
|
create_annotation.py
|
h1-the-swan/science_history_institute_chp_app
|
0e99dec17403dfcaa2e7fbcd0374c39a773445b1
|
[
"MIT"
] | null | null | null |
create_annotation.py
|
h1-the-swan/science_history_institute_chp_app
|
0e99dec17403dfcaa2e7fbcd0374c39a773445b1
|
[
"MIT"
] | 2
|
2021-03-31T19:15:56.000Z
|
2021-12-13T20:10:25.000Z
|
create_annotation.py
|
h1-the-swan/science_history_institute_chp_app
|
0e99dec17403dfcaa2e7fbcd0374c39a773445b1
|
[
"MIT"
] | null | null | null |
import os
def create_new_annotation(target, text, uri, group='Kzwy6GDV', tags=[]):
target['type'] = 'TextQuoteSelector'
a = {
'group': group,
'permissions': {'read': ['group:{}'.format(group)]},
'tags': tags,
'target': [{'selector': [
target
]}],
'text': text,
'uri': uri,
}
return a
def get_token_params(app, username):
token = app.hypothesis_client.grant_token(username=username)
params = {
'assertion': token.decode(),
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
}
return params
| 25.708333
| 72
| 0.560778
|
0bdb6570c5c0e7d0fb6841ffff1350c5df95e99c
| 3,570
|
py
|
Python
|
main.py
|
pohanchi/RePred-YuHow
|
a97bf4f93801bfab41ba19699b2b7c4184a2f40b
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
pohanchi/RePred-YuHow
|
a97bf4f93801bfab41ba19699b2b7c4184a2f40b
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
pohanchi/RePred-YuHow
|
a97bf4f93801bfab41ba19699b2b7c4184a2f40b
|
[
"Apache-2.0"
] | null | null | null |
import torch
import numpy as np
from torch.utils.data import DataLoader, Dataset
import yaml
import IPython
import pdb
import tqdm
import argparse
from torch import nn
from tqdm import trange
from model import Model
from dataset import Resisitivity_Dataset
import wandb
def train(expert, optimizer, criterion, dataloader, eval_dataloader, config):
step = 0
# IPython.embed()
# pdb.set_trace()
for epoch in trange(config['train_args']['epoches']):
for num, (data, label) in enumerate(tqdm.tqdm(dataloader)):
# forward model to get prediction
output = expert(data).squeeze(-1)
# calculate loss
loss = criterion(output, label)
wandb.log({"loss":loss.item()},step=step)
# compute gradient and accumulate gradient in each weights
loss.backward()
# update all weights in model
optimizer.step()
# clean gradient on each weights for preparing next update step
optimizer.zero_grad()
step +=1
if step % config['train_args']['eval_step'] == 0:
# evaluate the prediction whether is close to the ground truth
eval_loss = evaluate(eval_dataloader, expert, config['eval_part'],step)
torch.save(expert.state_dict(), f"step-{step}_"+config['path']['model_save_path'])
return
def evaluate(eval_dataloader, model, config_dict,step):
criterion = nn.L1Loss()
prediction_list = []
label_list = []
eval_loss = 0
with torch.no_grad():
for num, (data, label) in enumerate(tqdm.tqdm(eval_dataloader)):
prediction = model(data).squeeze(-1)
loss=criterion(prediction, label)
eval_loss += loss
# save all prediction to visualize result
prediction = prediction.tolist()
prediction_list.extend(prediction)
label_list.extend(label.tolist())
eval_loss /= (num+1)
wandb.log({"eval_loss":eval_loss.item()},step=step)
# write the (prediction, label) to the output file
zip_object = zip(prediction_list, label_list)
zipped_list = list(zip_object)
f = open(f"step-{step}_" + config_dict['output_file_path'], "w")
for x in zipped_list:
f.write(str(x)+"\n")
f.close()
def main(config):
# wandb init
wandb.init(project=config['project_name'], config=config,name=config['exp_name'])
# Initialize Model
expert = Model(config['model'])
wandb.watch(expert)
# Define optimizer
optimizer = torch.optim.Adam(expert.parameters(),lr=config['train_args']['lr'])
# Initialize dataset
train_dataset = Resisitivity_Dataset(config['path']['data_path'], config, "train")
eval_dataset = Resisitivity_Dataset(config['path']['data_path'], config, "eval")
# Build dataloader
train_dataloader = DataLoader(train_dataset,batch_size=config['train_args']['batch_size'], shuffle=True)
eval_dataloader = DataLoader(eval_dataset,batch_size=config['train_args']['batch_size'])
# Define loss function
criterion = torch.nn.L1Loss()
# training
train(expert, optimizer, criterion, train_dataloader, eval_dataloader, config)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--config",type=str, default="config_default.yaml")
args = parser.parse_args()
config = yaml.safe_load(open(args.config,"r"))
main(config)
| 33.055556
| 108
| 0.639496
|
10f7d460947ee8299f96f9acb727aaeae2c9c2ad
| 63
|
py
|
Python
|
tools/build.py
|
LucidSigma/stardust
|
7012b46b0e75fa272e0bd39f3a1cc483c29fc10c
|
[
"MIT"
] | 7
|
2020-10-08T11:40:53.000Z
|
2022-03-30T10:40:06.000Z
|
tools/build.py
|
LucidSigma/stardust
|
7012b46b0e75fa272e0bd39f3a1cc483c29fc10c
|
[
"MIT"
] | 1
|
2020-10-08T11:44:27.000Z
|
2020-12-01T08:43:19.000Z
|
tools/build.py
|
LucidSigma/stardust
|
7012b46b0e75fa272e0bd39f3a1cc483c29fc10c
|
[
"MIT"
] | null | null | null |
import subprocess
subprocess.run("cmake ..", cwd = "../build")
| 21
| 44
| 0.666667
|
6705222d3e04617b8cdf18de7d809cd6e1ca1405
| 2,525
|
py
|
Python
|
powerline/bindings/vim/__init__.py
|
zhaocai/powerline
|
8aae1145835b4b2a71f1ed71b81d490e2907bd39
|
[
"MIT"
] | 1
|
2015-03-21T21:59:15.000Z
|
2015-03-21T21:59:15.000Z
|
powerline/bindings/vim/__init__.py
|
zhaocai/powerline
|
8aae1145835b4b2a71f1ed71b81d490e2907bd39
|
[
"MIT"
] | null | null | null |
powerline/bindings/vim/__init__.py
|
zhaocai/powerline
|
8aae1145835b4b2a71f1ed71b81d490e2907bd39
|
[
"MIT"
] | null | null | null |
# vim:fileencoding=utf-8:noet
import sys
try:
import vim
except ImportError:
vim = {}
if hasattr(vim, 'bindeval'):
def vim_get_func(f, rettype=None):
'''Return a vim function binding.'''
try:
func = vim.bindeval('function("' + f + '")')
if sys.version_info >= (3,) and rettype is str:
return (lambda *args, **kwargs: func(*args, **kwargs).decode('utf-8', errors='replace'))
return func
except vim.error:
return None
else:
import json
class VimFunc(object):
'''Evaluate a vim function using vim.eval().
This is a fallback class for older vim versions.
'''
__slots__ = ('f', 'rettype')
def __init__(self, f, rettype=None):
self.f = f
self.rettype = rettype
def __call__(self, *args):
r = vim.eval(self.f + '(' + json.dumps(args)[1:-1] + ')')
if self.rettype:
return self.rettype(r)
return r
vim_get_func = VimFunc
# It may crash on some old vim versions and I do not remember in which patch
# I fixed this crash.
if hasattr(vim, 'vars') and vim.vvars['version'] > 703:
_vim_to_python_types = {
vim.Dictionary: lambda value: dict(((key, _vim_to_python(value[key])) for key in value.keys())),
vim.List: lambda value: [_vim_to_python(item) for item in value],
vim.Function: lambda _: None,
}
_id = lambda value: value
def _vim_to_python(value):
return _vim_to_python_types.get(type(value), _id)(value)
def vim_getvar(varname):
return _vim_to_python(vim.vars[str(varname)])
else:
_vim_exists = vim_get_func('exists', rettype=int)
def vim_getvar(varname): # NOQA
varname = 'g:' + varname
if _vim_exists(varname):
return vim.eval(varname)
else:
raise KeyError(varname)
if hasattr(vim, 'options'):
def vim_getbufoption(info, option):
return info['buffer'].options[option]
else:
def vim_getbufoption(info, option): # NOQA
return getbufvar(info['bufnr'], '&' + option)
if sys.version_info < (3,) or not hasattr(vim, 'bindeval'):
getbufvar = vim_get_func('getbufvar')
else:
_getbufvar = vim_get_func('getbufvar')
def getbufvar(*args):
r = _getbufvar(*args)
if type(r) is bytes:
return r.decode('utf-8')
return r
class VimEnviron(object):
@staticmethod
def __getitem__(key):
return vim.eval('$' + key)
@staticmethod
def get(key, default=None):
return vim.eval('$' + key) or default
@staticmethod
def __setitem__(key, value):
return vim.command('let $' + key + '="'
+ value.replace('"', '\\"').replace('\\', '\\\\').replace('\n', '\\n').replace('\0', '')
+ '"')
environ = VimEnviron()
| 24.047619
| 98
| 0.664554
|
e36b1058b0915e0edcc6705ca4ec8ea3fc70df89
| 875
|
py
|
Python
|
trove_tempest_plugin/services/database/json/limits_client.py
|
bzurkowski/trove-tempest-plugin
|
5a5f36e8845dbd74c36aacb45a9dc9426be464e7
|
[
"Apache-2.0"
] | 1
|
2018-08-08T11:49:35.000Z
|
2018-08-08T11:49:35.000Z
|
trove_tempest_plugin/services/database/json/limits_client.py
|
bzurkowski/trove-tempest-plugin
|
5a5f36e8845dbd74c36aacb45a9dc9426be464e7
|
[
"Apache-2.0"
] | null | null | null |
trove_tempest_plugin/services/database/json/limits_client.py
|
bzurkowski/trove-tempest-plugin
|
5a5f36e8845dbd74c36aacb45a9dc9426be464e7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove_tempest_plugin.services.database.json import base_client
class LimitsClient(base_client.BaseClient):
uri = '/limits'
def list_limits(self):
"""List all limits."""
return self.list_resources(self.uri)
| 33.653846
| 78
| 0.725714
|
0660d772028ec30143c2a4ad4f54b2f10032dd32
| 5,171
|
py
|
Python
|
Project_Health/src/mklaren/kernel/kernel.py
|
Anonymous633671/STABILIZER
|
5a1ab8099a2d75ace7e053afc78055f1f4d359c0
|
[
"MIT"
] | 9
|
2017-07-27T10:32:48.000Z
|
2021-07-01T11:51:51.000Z
|
Project_Health/src/mklaren/kernel/kernel.py
|
Anonymous633671/STABILIZER
|
5a1ab8099a2d75ace7e053afc78055f1f4d359c0
|
[
"MIT"
] | 11
|
2016-03-15T16:27:47.000Z
|
2019-09-05T02:25:08.000Z
|
src/mklaren/kernel/kernel.py
|
ai-se/GENERAL
|
5a4bef2a80526524e3e18139b561fc0e2bb8888d
|
[
"MIT"
] | 5
|
2017-01-28T22:45:34.000Z
|
2019-12-04T13:15:10.000Z
|
""" Methods related to calculation of kernel function values and kernel
matrices.
"""
import numpy as np
import numpy.ma as ma
from itertools import product
import scipy.sparse as sp
def linear_kernel(x, y):
"""
The linear kernel (the usual dot product in n-dimensional space).
.. math::
k(\mathbf{x}, \mathbf{y}) = \mathbf{x}^T \mathbf{y}
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
if isinstance(x, int):
return x * y
if sp.isspmatrix(x):
return np.array(x.dot(y.T).todense())
else:
return x.dot(y.T)
def poly_kernel(x, y, p=2, b=0):
"""
The polynomial kernel.
.. math::
k(\mathbf{x}, \mathbf{y}) = (b + \mathbf{x}^T \mathbf{y})^p
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param p: (``float``) Polynomial degree.
:param b: (``float``) Bias term.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
if sp.isspmatrix(x):
return np.array(x.dot(y.T).todense())**p
if not hasattr(x, "shape"):
return (x * y)**p
else:
return x.dot(y.T)**p
def sigmoid_kernel(x, y, b=1, c=0):
"""
The sigmoid kernel.
.. math::
k(\mathbf{x}, \mathbf{y}) = tan(c \mathbf{x}^T \mathbf{y} + b)
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param c: (``float``) Scale.
:param b: (``float``) Bias term.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
if sp.isspmatrix(x) and sp.isspmatrix(y):
x = np.array(x.todense())
y = np.array(y.todense())
if not hasattr(x, "shape"):
return np.tanh(b * x * y + c)
else:
return np.tanh(b * x.dot(y.T) + c)
def exponential_kernel(x, y, sigma=2.0, gamma=None):
"""
The exponential quadratic / radial basis kernel (RBF) kernel.
.. math::
k(\mathbf{x}, \mathbf{y}) = exp\{\dfrac{\|\mathbf{x} - \mathbf{y}\|^2}{\sigma^2} \}
or
.. math::
k(\mathbf{x}, \mathbf{y}) = exp\{\gamma \|\mathbf{x} - \mathbf{y}\|^2 \}
:param x: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param y: (``numpy.ndarray``) Data point(s) of shape ``(n_samples, n_features)`` or ``(n_features, )``.
:param sigma: (``float``) Length scale.
:param gamma: (``float``) Scale.
:return: (``numpy.ndarray``) Kernel value/matrix between data points.
"""
if gamma is None:
gamma = 1.0 / (2.0 * sigma ** 2)
if sp.isspmatrix(x) and sp.isspmatrix(y):
x = np.array(x.todense())
y = np.array(y.todense())
if not hasattr(x, "shape"):
return np.exp(-gamma * np.linalg.norm(x - y, ord=2)**2)
if np.asarray(x).ndim == 0:
return np.exp(-gamma * (x - y)**2)
if len(x.shape) >= 2 or len(y.shape) >= 2:
K = np.zeros((x.shape[0], y.shape[0]))
for i, xi in enumerate(x):
for j, yj in enumerate(y):
K[i, j] = np.exp(-gamma * np.linalg.norm(xi - yj, ord=2)**2)
return K
return np.exp(-gamma * np.linalg.norm(x - y, ord=2)**2)
rbf_kernel = exponential_kernel
def random_kernel(n):
"""
Generate a random kernel matrix of shape ``(n, n)``.
:param n: (``int``) Number of examples.
:return: (``numpy.ndarray``) Random positive semidefinite kernel matrix of shape ``(n, n)``.
"""
G = np.random.rand(n, n)
return G.T.dot(G)
def center_kernel(K):
"""
Center a kernel matrix.
.. math::
\mathbf{K}_{c} = (\mathbf{I}-\dfrac{\mathbf{11}^T}{n})\mathbf{K}(\mathbf{I}-\dfrac{\mathbf{11}^1}{n})
:param K: (``numpy.ndarray``) Kernel matrix of shape ``(n, n)``.
:return: (``numpy.ndarray``) Centered kernel for a sample of points.
"""
m = float(K.shape[0])
o = np.ones((m, 1))
I = np.eye(m, m)
Ic = (I-o.dot(o.T)/m)
return Ic.dot(K).dot(Ic)
def center_kernel_low_rank(G):
"""
Center a the feature matrix such that :math:`\mathbf{G}_c \mathbf{G}_c^T` is centered.
.. math::
\mathbf{G}_c = (\mathbf{I} - \dfrac{\mathbf{11}^T}{n})\mathbf{G}
:param G: (``numpy.ndarray``) Low-rank approximation of the feature matrix of shape ``(n, k)``.
:return: (``numpy.ndarray``) Centered low-rank approximation of the feature space.
"""
return G - G.mean(axis=0)
| 30.063953
| 111
| 0.539934
|
1165c9d13fb337b6e70ed43dd6048d3adf282cda
| 2,501
|
py
|
Python
|
sahara/openstack/common/test.py
|
citrix-openstack-build/sahara
|
17e4f4dac5bb321ef4d5a55664cca0857127d7e6
|
[
"Apache-2.0"
] | 1
|
2015-02-26T03:23:23.000Z
|
2015-02-26T03:23:23.000Z
|
heat/openstack/common/test.py
|
NeCTAR-RC/heat
|
b152817f192a7b46514793633ddc968c1fe1ebf8
|
[
"Apache-2.0"
] | 1
|
2018-11-01T09:14:17.000Z
|
2018-11-01T09:14:17.000Z
|
heat/openstack/common/test.py
|
NeCTAR-RC/heat
|
b152817f192a7b46514793633ddc968c1fe1ebf8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common utilities used in testing"""
import logging
import os
import fixtures
import testtools
_TRUE_VALUES = ('True', 'true', '1', 'yes')
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
class BaseTestCase(testtools.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self._set_timeout()
self._fake_output()
self._fake_logs()
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
def _set_timeout(self):
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
def _fake_output(self):
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
def _fake_logs(self):
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
level = logging.DEBUG
else:
level = logging.INFO
capture_logs = os.environ.get('OS_LOG_CAPTURE') in _TRUE_VALUES
if capture_logs:
self.useFixture(
fixtures.FakeLogger(
format=_LOG_FORMAT,
level=level,
nuke_handlers=capture_logs,
)
)
else:
logging.basicConfig(format=_LOG_FORMAT, level=level)
| 34.736111
| 76
| 0.65014
|
fd3b345c9040562d05c23ed50bdacaeb47b33ec9
| 130
|
py
|
Python
|
python_socks/_resolver_async.py
|
mgorny/python-socks
|
b6bae4bb904e8c066ec3bb15b408182a57ea2fbf
|
[
"Apache-2.0"
] | 28
|
2020-09-21T08:34:00.000Z
|
2022-02-25T15:57:36.000Z
|
aiohttp_socks/core_socks/_resolver_async.py
|
maemo-leste-extras/aiohttp-socks
|
4771b8bc73bd85f41a158600171b931de8e0c414
|
[
"Apache-2.0"
] | 15
|
2020-09-21T15:39:49.000Z
|
2022-01-22T15:13:31.000Z
|
aiohttp_socks/core_socks/_resolver_async.py
|
maemo-leste-extras/aiohttp-socks
|
4771b8bc73bd85f41a158600171b931de8e0c414
|
[
"Apache-2.0"
] | 9
|
2020-11-09T10:53:32.000Z
|
2022-03-24T22:17:48.000Z
|
class AsyncResolver:
async def resolve(self, host, port=0, family=0):
raise NotImplementedError() # pragma: no cover
| 32.5
| 55
| 0.692308
|
bd93be532c61f3aeb68d4d94d846545e70039b7d
| 194
|
py
|
Python
|
html5/constant.py
|
valbendan/html5
|
6e4a6786005d9361f317c3ab3b37a139fd0cc977
|
[
"MIT"
] | 1
|
2015-09-25T08:48:27.000Z
|
2015-09-25T08:48:27.000Z
|
html5/constant.py
|
valbendan/html5
|
6e4a6786005d9361f317c3ab3b37a139fd0cc977
|
[
"MIT"
] | null | null | null |
html5/constant.py
|
valbendan/html5
|
6e4a6786005d9361f317c3ab3b37a139fd0cc977
|
[
"MIT"
] | null | null | null |
__all__ = ['void_tag']
void_tag = {'meta', 'base', 'br', 'hr', 'img', 'area', 'link', 'input', 'wbr',
'colgroup', 'track', 'command', 'param', 'col', 'source', 'keygen', '!DOCTYPE'}
| 48.5
| 91
| 0.520619
|
b5c77155e772e525fa10751da5ed617a638e4491
| 4,337
|
py
|
Python
|
aisp-core/aisp-core-main/src/main/python/tools/audacity2metadata.py
|
Enterprise-Neurosystem/ai-signal-processing
|
cce8e50c265498da494b679a870422365657274f
|
[
"Apache-2.0"
] | null | null | null |
aisp-core/aisp-core-main/src/main/python/tools/audacity2metadata.py
|
Enterprise-Neurosystem/ai-signal-processing
|
cce8e50c265498da494b679a870422365657274f
|
[
"Apache-2.0"
] | null | null | null |
aisp-core/aisp-core-main/src/main/python/tools/audacity2metadata.py
|
Enterprise-Neurosystem/ai-signal-processing
|
cce8e50c265498da494b679a870422365657274f
|
[
"Apache-2.0"
] | null | null | null |
#*******************************************************************************
# * Copyright [2022] [IBM]
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *******************************************************************************
import argparse
from argparse import RawTextHelpFormatter
from argparse import ArgumentDefaultsHelpFormatter
import sys
import wave
def as_metadata(wav_file, start_msec, end_msec, label_name, label_value):
if start_msec is None or end_msec is None:
print("{},{}={},".format(wav_file, label_name,label_value))
else:
print("{}[{}-{}],{}={},".format(wav_file, start_msec, end_msec, label_name,label_value))
if __name__ == '__main__':
argp = argparse.ArgumentParser(description=
#xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
'Converts a labels file produced by Audacity to metadata.csv format on the\n'
'standard output. The input Audacity label file contains 1 or more rows, each\n'
'with 3 white-space separated columns. Each row defines a labeled segment and\n'
'is converted to a single row in the metadata output. Columns are defined as \n'
'follows:\n'
' 1: the offset in seconds of the start of the segment\n'
' 2: the offset in seconds of the end of the segment\n'
' 3: the single label value to assign to the segment\n'
'For example, converting the following labels.txt:\n'
' 1.3 2.1 abnormal\n'
' 3.0 4.0 abnormal\n'
'using options --wav mysound.wav --gap-label-value normal --label state\n'
'produces:\n'
' mysound.wav[0-1300],state=normal,\n'
' mysound.wav[1300-2100],state=abnormal,\n'
' mysound.wav[2100-3000],state=normal,\n'
' mysound.wav[3000-4000],state=abnormal,\n'
'\nNote that a trailing gap segment is currently not emitted.\n'
,formatter_class=RawTextHelpFormatter)
argp.add_argument('-wav', help='Specifies name of the wav file to which the labels apply', default=None, required=True, type=str)
argp.add_argument('-audacity', help='Specifies the name of the Audacity labels file. Default is labels.txt.', default="labels.txt", type=str)
argp.add_argument('-label', help='The label name to use in the output. Default is state.', default='state', type=str)
argp.add_argument('-gap-label-value', help='The label value to apply to the gaps in the labels specified in the Audacity labels file. Default is None and will not be applied.', default=None, type=str)
has_freq = False
args = argp.parse_args()
labels_file=args.audacity
wav_file=args.wav
with wave.open(wav_file,'r') as wave:
clip_len_msec = int(1000 * wave.getnframes() / wave.getframerate())
label_name=args.label
gap_label=args.gap_label_value
last_end = 0
with open(labels_file) as in_file:
for line in in_file:
fields=line.split('\t')
if '\\' in fields[0]: # Skip frequency information, for now.
has_freq = True
continue
start_msec = int(float(fields[0]) * 1000)
end_msec = int(float(fields[1]) * 1000);
label = fields[2].rstrip('\n')
if gap_label is not None and start_msec > last_end:
# Echo gap label
as_metadata(wav_file, last_end, start_msec, label_name, gap_label)
as_metadata(wav_file, start_msec, end_msec, label_name, label)
last_end = end_msec
if gap_label is not None and last_end < clip_len_msec:
as_metadata(wav_file, last_end, clip_len_msec, label_name, gap_label)
if has_freq:
print("Labels file contains frequency ranges. These are not included in the output.", file=sys.stderr)
| 53.54321
| 204
| 0.649988
|
e7c1d94579ae39e106a8ccd7a6e00a1dd1301d08
| 849
|
py
|
Python
|
plato_pylib/utils/ase_conversions.py
|
RFogarty1/plato_pylib
|
b0ab65bfe489c4bb1fd321cc102580bef2b6ff68
|
[
"MIT"
] | null | null | null |
plato_pylib/utils/ase_conversions.py
|
RFogarty1/plato_pylib
|
b0ab65bfe489c4bb1fd321cc102580bef2b6ff68
|
[
"MIT"
] | null | null | null |
plato_pylib/utils/ase_conversions.py
|
RFogarty1/plato_pylib
|
b0ab65bfe489c4bb1fd321cc102580bef2b6ff68
|
[
"MIT"
] | null | null | null |
import itertools as it
from ..shared import ucell_class as uCell
def getUnitCellObjectFromASEAtomsObject(aseAtomsObj):
""" Get a plato_pylib UnitCell object from ASE Atoms object (essentially the equivalent ASE object). Note at time of writing im only planning to test thes on objects with pbcs, so be careful if using for anything else
Args:
aseAtomsObj: (Atoms object from ASE)
Returns
outCell: (plato_pylib UnitCell object)
"""
fractCoords = aseAtomsObj.get_scaled_positions()
symbols = aseAtomsObj.get_chemical_symbols()
lattLengths = aseAtomsObj.get_cell_lengths_and_angles()[:3]
lattAngles = aseAtomsObj.get_cell_lengths_and_angles()[3:]
outCell = uCell.UnitCell( lattParams=lattLengths, lattAngles=lattAngles )
outCell.fractCoords = [list(x)+[y] for x,y in it.zip_longest(fractCoords,symbols)]
return outCell
| 31.444444
| 218
| 0.779741
|
dd1b44917600e3bb440d9f57cb1d8c57d589812e
| 700
|
py
|
Python
|
molecule_ec2/test/scenarios/driver/ec2/molecule/multi-node/tests/test_default.py
|
aruntomar/molecule-ec2
|
a875011fa8f76fc8c6eaf6666c87c0eb9545a341
|
[
"MIT"
] | 39
|
2020-01-19T01:58:41.000Z
|
2022-03-10T12:28:34.000Z
|
molecule_ec2/test/scenarios/driver/ec2/molecule/multi-node/tests/test_default.py
|
aruntomar/molecule-ec2
|
a875011fa8f76fc8c6eaf6666c87c0eb9545a341
|
[
"MIT"
] | 43
|
2020-01-22T19:58:33.000Z
|
2022-02-01T09:44:53.000Z
|
molecule_ec2/test/scenarios/driver/ec2/molecule/multi-node/tests/test_default.py
|
aruntomar/molecule-ec2
|
a875011fa8f76fc8c6eaf6666c87c0eb9545a341
|
[
"MIT"
] | 31
|
2020-01-22T19:57:17.000Z
|
2022-03-12T07:31:08.000Z
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("all")
# EC2 provides unique random hostnames.
def test_hostname(host):
pass
def test_etc_molecule_directory(host):
f = host.file("/etc/molecule")
assert f.is_directory
assert f.user == "root"
assert f.group == "root"
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
filename = "/etc/molecule/{}".format(host.check_output("hostname -s"))
f = host.file(filename)
assert f.is_file
assert f.user == "root"
assert f.group == "root"
assert f.mode == 0o644
| 21.875
| 74
| 0.702857
|
136b4ff0c51351e1dbb431f03e0922e9e17d8ee9
| 66
|
py
|
Python
|
excel_import_export/wizard/__init__.py
|
Chief0-0/Localizacion_ERP_V12
|
f59e56564e29525f772b59db7fef7c7cde347336
|
[
"Apache-2.0"
] | null | null | null |
excel_import_export/wizard/__init__.py
|
Chief0-0/Localizacion_ERP_V12
|
f59e56564e29525f772b59db7fef7c7cde347336
|
[
"Apache-2.0"
] | null | null | null |
excel_import_export/wizard/__init__.py
|
Chief0-0/Localizacion_ERP_V12
|
f59e56564e29525f772b59db7fef7c7cde347336
|
[
"Apache-2.0"
] | null | null | null |
from . import export_xlsx_wizard
from . import import_xlsx_wizard
| 22
| 32
| 0.848485
|
3374e1c13f8004100c2f3c114edbfba2db26dec6
| 44,702
|
py
|
Python
|
sdk/cwl/tests/test_container.py
|
basharbme/arvados
|
1c3c8f7fd2e1268b139e046fbd6a7093dd82222f
|
[
"Apache-2.0"
] | 1
|
2019-09-08T01:49:09.000Z
|
2019-09-08T01:49:09.000Z
|
sdk/cwl/tests/test_container.py
|
basharbme/arvados
|
1c3c8f7fd2e1268b139e046fbd6a7093dd82222f
|
[
"Apache-2.0"
] | null | null | null |
sdk/cwl/tests/test_container.py
|
basharbme/arvados
|
1c3c8f7fd2e1268b139e046fbd6a7093dd82222f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
from builtins import str
from builtins import object
import arvados_cwl
import arvados_cwl.context
import arvados_cwl.util
from arvados_cwl.arvdocker import arv_docker_clear_cache
import copy
import arvados.config
import logging
import mock
import unittest
import os
import functools
import cwltool.process
import cwltool.secrets
from schema_salad.ref_resolver import Loader
from schema_salad.sourceline import cmap
from .matcher import JsonDiffMatcher, StripYAMLComments
from .mock_discovery import get_rootDesc
if not os.getenv('ARVADOS_DEBUG'):
logging.getLogger('arvados.cwl-runner').setLevel(logging.WARN)
logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
class CollectionMock(object):
def __init__(self, vwdmock, *args, **kwargs):
self.vwdmock = vwdmock
self.count = 0
def open(self, *args, **kwargs):
self.count += 1
return self.vwdmock.open(*args, **kwargs)
def copy(self, *args, **kwargs):
self.count += 1
self.vwdmock.copy(*args, **kwargs)
def save_new(self, *args, **kwargs):
pass
def __len__(self):
return self.count
def portable_data_hash(self):
if self.count == 0:
return arvados.config.EMPTY_BLOCK_LOCATOR
else:
return "99999999999999999999999999999996+99"
class TestContainer(unittest.TestCase):
def setUp(self):
cwltool.process._names = set()
def helper(self, runner, enable_reuse=True):
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.1")
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
loadingContext = arvados_cwl.context.ArvLoadingContext(
{"avsc_names": avsc_names,
"basedir": "",
"make_fs_access": make_fs_access,
"loader": Loader({}),
"metadata": {"cwlVersion": "v1.1", "http://commonwl.org/cwltool#original_cwlVersion": "v1.0"}})
runtimeContext = arvados_cwl.context.ArvRuntimeContext(
{"work_api": "containers",
"basedir": "",
"name": "test_run_"+str(enable_reuse),
"make_fs_access": make_fs_access,
"tmpdir": "/tmp",
"enable_reuse": enable_reuse,
"priority": 500,
"project_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
})
return loadingContext, runtimeContext
# Helper function to set up the ArvCwlExecutor to use the containers api
# and test that the RuntimeStatusLoggingHandler is set up correctly
def setup_and_test_container_executor_and_logging(self, gcc_mock) :
api = mock.MagicMock()
api._rootDesc = copy.deepcopy(get_rootDesc())
# Make sure ArvCwlExecutor thinks it's running inside a container so it
# adds the logging handler that will call runtime_status_update() mock
self.assertFalse(gcc_mock.called)
runner = arvados_cwl.ArvCwlExecutor(api)
self.assertEqual(runner.work_api, 'containers')
root_logger = logging.getLogger('')
handlerClasses = [h.__class__ for h in root_logger.handlers]
self.assertTrue(arvados_cwl.RuntimeStatusLoggingHandler in handlerClasses)
return runner
# The test passes no builder.resources
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_run(self, keepdocker):
for enable_reuse in (True, False):
arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
"portable_data_hash": "99999999999999999999999999999993+99"}
tool = cmap({
"inputs": [],
"outputs": [],
"baseCommand": "ls",
"arguments": [{"valueFrom": "$(runtime.outdir)"}],
"id": "#",
"class": "CommandLineTool"
})
loadingContext, runtimeContext = self.helper(runner, enable_reuse)
arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
arvtool.formatgraph = None
for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
j.run(runtimeContext)
runner.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher({
'environment': {
'HOME': '/var/spool/cwl',
'TMPDIR': '/tmp'
},
'name': 'test_run_'+str(enable_reuse),
'runtime_constraints': {
'vcpus': 1,
'ram': 1073741824
},
'use_existing': enable_reuse,
'priority': 500,
'mounts': {
'/tmp': {'kind': 'tmp',
"capacity": 1073741824
},
'/var/spool/cwl': {'kind': 'tmp',
"capacity": 1073741824 }
},
'state': 'Committed',
'output_name': 'Output for step test_run_'+str(enable_reuse),
'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
'output_path': '/var/spool/cwl',
'output_ttl': 0,
'container_image': '99999999999999999999999999999993+99',
'command': ['ls', '/var/spool/cwl'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
'properties': {},
'secret_mounts': {}
}))
# The test passes some fields in builder.resources
# For the remaining fields, the defaults will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_resource_requirements(self, keepdocker):
arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 3600
runner.secret_store = cwltool.secrets.SecretStore()
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
"portable_data_hash": "99999999999999999999999999999993+99"}
tool = cmap({
"inputs": [],
"outputs": [],
"hints": [{
"class": "ResourceRequirement",
"coresMin": 3,
"ramMin": 3000,
"tmpdirMin": 4000,
"outdirMin": 5000
}, {
"class": "http://arvados.org/cwl#RuntimeConstraints",
"keep_cache": 512
}, {
"class": "http://arvados.org/cwl#APIRequirement",
}, {
"class": "http://arvados.org/cwl#PartitionRequirement",
"partition": "blurb"
}, {
"class": "http://arvados.org/cwl#IntermediateOutput",
"outputTTL": 7200
}, {
"class": "http://arvados.org/cwl#ReuseRequirement",
"enableReuse": False
}],
"baseCommand": "ls",
"id": "#",
"class": "CommandLineTool"
})
loadingContext, runtimeContext = self.helper(runner)
runtimeContext.name = "test_resource_requirements"
arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
arvtool.formatgraph = None
for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
j.run(runtimeContext)
call_args, call_kwargs = runner.api.container_requests().create.call_args
call_body_expected = {
'environment': {
'HOME': '/var/spool/cwl',
'TMPDIR': '/tmp'
},
'name': 'test_resource_requirements',
'runtime_constraints': {
'vcpus': 3,
'ram': 3145728000,
'keep_cache_ram': 536870912,
'API': True
},
'use_existing': False,
'priority': 500,
'mounts': {
'/tmp': {'kind': 'tmp',
"capacity": 4194304000 },
'/var/spool/cwl': {'kind': 'tmp',
"capacity": 5242880000 }
},
'state': 'Committed',
'output_name': 'Output for step test_resource_requirements',
'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
'output_path': '/var/spool/cwl',
'output_ttl': 7200,
'container_image': '99999999999999999999999999999993+99',
'command': ['ls'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {
'partitions': ['blurb']
},
'properties': {},
'secret_mounts': {}
}
call_body = call_kwargs.get('body', None)
self.assertNotEqual(None, call_body)
for key in call_body:
self.assertEqual(call_body_expected.get(key), call_body.get(key))
# The test passes some fields in builder.resources
# For the remaining fields, the defaults will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
@mock.patch("arvados.collection.Collection")
def test_initial_work_dir(self, collection_mock, keepdocker):
arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
"portable_data_hash": "99999999999999999999999999999993+99"}
sourcemock = mock.MagicMock()
def get_collection_mock(p):
if "/" in p:
return (sourcemock, p.split("/", 1)[1])
else:
return (sourcemock, "")
runner.fs_access.get_collection.side_effect = get_collection_mock
vwdmock = mock.MagicMock()
collection_mock.side_effect = lambda *args, **kwargs: CollectionMock(vwdmock, *args, **kwargs)
tool = cmap({
"inputs": [],
"outputs": [],
"hints": [{
"class": "InitialWorkDirRequirement",
"listing": [{
"class": "File",
"basename": "foo",
"location": "keep:99999999999999999999999999999995+99/bar"
},
{
"class": "Directory",
"basename": "foo2",
"location": "keep:99999999999999999999999999999995+99"
},
{
"class": "File",
"basename": "filename",
"location": "keep:99999999999999999999999999999995+99/baz/filename"
},
{
"class": "Directory",
"basename": "subdir",
"location": "keep:99999999999999999999999999999995+99/subdir"
} ]
}],
"baseCommand": "ls",
"id": "#",
"class": "CommandLineTool"
})
loadingContext, runtimeContext = self.helper(runner)
runtimeContext.name = "test_initial_work_dir"
arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
arvtool.formatgraph = None
for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
j.run(runtimeContext)
call_args, call_kwargs = runner.api.container_requests().create.call_args
vwdmock.copy.assert_has_calls([mock.call('bar', 'foo', source_collection=sourcemock)])
vwdmock.copy.assert_has_calls([mock.call('.', 'foo2', source_collection=sourcemock)])
vwdmock.copy.assert_has_calls([mock.call('baz/filename', 'filename', source_collection=sourcemock)])
vwdmock.copy.assert_has_calls([mock.call('subdir', 'subdir', source_collection=sourcemock)])
call_body_expected = {
'environment': {
'HOME': '/var/spool/cwl',
'TMPDIR': '/tmp'
},
'name': 'test_initial_work_dir',
'runtime_constraints': {
'vcpus': 1,
'ram': 1073741824
},
'use_existing': True,
'priority': 500,
'mounts': {
'/tmp': {'kind': 'tmp',
"capacity": 1073741824 },
'/var/spool/cwl': {'kind': 'tmp',
"capacity": 1073741824 },
'/var/spool/cwl/foo': {
'kind': 'collection',
'path': 'foo',
'portable_data_hash': '99999999999999999999999999999996+99'
},
'/var/spool/cwl/foo2': {
'kind': 'collection',
'path': 'foo2',
'portable_data_hash': '99999999999999999999999999999996+99'
},
'/var/spool/cwl/filename': {
'kind': 'collection',
'path': 'filename',
'portable_data_hash': '99999999999999999999999999999996+99'
},
'/var/spool/cwl/subdir': {
'kind': 'collection',
'path': 'subdir',
'portable_data_hash': '99999999999999999999999999999996+99'
}
},
'state': 'Committed',
'output_name': 'Output for step test_initial_work_dir',
'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
'output_path': '/var/spool/cwl',
'output_ttl': 0,
'container_image': '99999999999999999999999999999993+99',
'command': ['ls'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {
},
'properties': {},
'secret_mounts': {}
}
call_body = call_kwargs.get('body', None)
self.assertNotEqual(None, call_body)
for key in call_body:
self.assertEqual(call_body_expected.get(key), call_body.get(key))
# Test redirecting stdin/stdout/stderr
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_redirects(self, keepdocker):
arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
"portable_data_hash": "99999999999999999999999999999993+99"}
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.1")
tool = cmap({
"inputs": [],
"outputs": [],
"baseCommand": "ls",
"stdout": "stdout.txt",
"stderr": "stderr.txt",
"stdin": "/keep/99999999999999999999999999999996+99/file.txt",
"arguments": [{"valueFrom": "$(runtime.outdir)"}],
"id": "#",
"class": "CommandLineTool"
})
loadingContext, runtimeContext = self.helper(runner)
runtimeContext.name = "test_run_redirect"
arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
arvtool.formatgraph = None
for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
j.run(runtimeContext)
runner.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher({
'environment': {
'HOME': '/var/spool/cwl',
'TMPDIR': '/tmp'
},
'name': 'test_run_redirect',
'runtime_constraints': {
'vcpus': 1,
'ram': 1073741824
},
'use_existing': True,
'priority': 500,
'mounts': {
'/tmp': {'kind': 'tmp',
"capacity": 1073741824 },
'/var/spool/cwl': {'kind': 'tmp',
"capacity": 1073741824 },
"stderr": {
"kind": "file",
"path": "/var/spool/cwl/stderr.txt"
},
"stdin": {
"kind": "collection",
"path": "file.txt",
"portable_data_hash": "99999999999999999999999999999996+99"
},
"stdout": {
"kind": "file",
"path": "/var/spool/cwl/stdout.txt"
},
},
'state': 'Committed',
"output_name": "Output for step test_run_redirect",
'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
'output_path': '/var/spool/cwl',
'output_ttl': 0,
'container_image': '99999999999999999999999999999993+99',
'command': ['ls', '/var/spool/cwl'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
'properties': {},
'secret_mounts': {}
}))
@mock.patch("arvados.collection.Collection")
def test_done(self, col):
api = mock.MagicMock()
runner = mock.MagicMock()
runner.api = api
runner.num_retries = 0
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.api.containers().get().execute.return_value = {"state":"Complete",
"output": "abc+123",
"exit_code": 0}
col().open.return_value = []
loadingContext, runtimeContext = self.helper(runner)
arvjob = arvados_cwl.ArvadosContainer(runner,
runtimeContext,
mock.MagicMock(),
{},
None,
[],
[],
"testjob")
arvjob.output_callback = mock.MagicMock()
arvjob.collect_outputs = mock.MagicMock()
arvjob.successCodes = [0]
arvjob.outdir = "/var/spool/cwl"
arvjob.output_ttl = 3600
arvjob.collect_outputs.return_value = {"out": "stuff"}
arvjob.done({
"state": "Final",
"log_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz1",
"output_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2",
"uuid": "zzzzz-xvhdp-zzzzzzzzzzzzzzz",
"container_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
"modified_at": "2017-05-26T12:01:22Z"
})
self.assertFalse(api.collections().create.called)
self.assertFalse(runner.runtime_status_error.called)
arvjob.collect_outputs.assert_called_with("keep:abc+123", 0)
arvjob.output_callback.assert_called_with({"out": "stuff"}, "success")
runner.add_intermediate_output.assert_called_with("zzzzz-4zz18-zzzzzzzzzzzzzz2")
# Test to make sure we dont call runtime_status_update if we already did
# some where higher up in the call stack
@mock.patch("arvados_cwl.util.get_current_container")
def test_recursive_runtime_status_update(self, gcc_mock):
self.setup_and_test_container_executor_and_logging(gcc_mock)
root_logger = logging.getLogger('')
# get_current_container is invoked when we call runtime_status_update
# so try and log again!
gcc_mock.side_effect = lambda *args: root_logger.error("Second Error")
try:
root_logger.error("First Error")
except RuntimeError:
self.fail("RuntimeStatusLoggingHandler should not be called recursively")
@mock.patch("arvados_cwl.ArvCwlExecutor.runtime_status_update")
@mock.patch("arvados_cwl.util.get_current_container")
@mock.patch("arvados.collection.CollectionReader")
@mock.patch("arvados.collection.Collection")
def test_child_failure(self, col, reader, gcc_mock, rts_mock):
runner = self.setup_and_test_container_executor_and_logging(gcc_mock)
gcc_mock.return_value = {"uuid" : "zzzzz-dz642-zzzzzzzzzzzzzzz"}
self.assertTrue(gcc_mock.called)
runner.num_retries = 0
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
runner.label = mock.MagicMock()
runner.label.return_value = '[container testjob]'
runner.api.containers().get().execute.return_value = {
"state":"Complete",
"output": "abc+123",
"exit_code": 1,
"log": "def+234"
}
col().open.return_value = []
loadingContext, runtimeContext = self.helper(runner)
arvjob = arvados_cwl.ArvadosContainer(runner,
runtimeContext,
mock.MagicMock(),
{},
None,
[],
[],
"testjob")
arvjob.output_callback = mock.MagicMock()
arvjob.collect_outputs = mock.MagicMock()
arvjob.successCodes = [0]
arvjob.outdir = "/var/spool/cwl"
arvjob.output_ttl = 3600
arvjob.collect_outputs.return_value = {"out": "stuff"}
arvjob.done({
"state": "Final",
"log_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz1",
"output_uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz2",
"uuid": "zzzzz-xvhdp-zzzzzzzzzzzzzzz",
"container_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
"modified_at": "2017-05-26T12:01:22Z"
})
rts_mock.assert_called_with(
'error',
'arvados.cwl-runner: [container testjob] (zzzzz-xvhdp-zzzzzzzzzzzzzzz) error log:',
' ** log is empty **'
)
arvjob.output_callback.assert_called_with({"out": "stuff"}, "permanentFail")
# The test passes no builder.resources
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_mounts(self, keepdocker):
arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
"portable_data_hash": "99999999999999999999999999999994+99",
"manifest_text": ". 99999999999999999999999999999994+99 0:0:file1 0:0:file2"}
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.1")
tool = cmap({
"inputs": [
{"id": "p1",
"type": "Directory"}
],
"outputs": [],
"baseCommand": "ls",
"arguments": [{"valueFrom": "$(runtime.outdir)"}],
"id": "#",
"class": "CommandLineTool"
})
loadingContext, runtimeContext = self.helper(runner)
runtimeContext.name = "test_run_mounts"
arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
arvtool.formatgraph = None
job_order = {
"p1": {
"class": "Directory",
"location": "keep:99999999999999999999999999999994+44",
"http://arvados.org/cwl#collectionUUID": "zzzzz-4zz18-zzzzzzzzzzzzzzz",
"listing": [
{
"class": "File",
"location": "keep:99999999999999999999999999999994+44/file1",
},
{
"class": "File",
"location": "keep:99999999999999999999999999999994+44/file2",
}
]
}
}
for j in arvtool.job(job_order, mock.MagicMock(), runtimeContext):
j.run(runtimeContext)
runner.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher({
'environment': {
'HOME': '/var/spool/cwl',
'TMPDIR': '/tmp'
},
'name': 'test_run_mounts',
'runtime_constraints': {
'vcpus': 1,
'ram': 1073741824
},
'use_existing': True,
'priority': 500,
'mounts': {
"/keep/99999999999999999999999999999994+44": {
"kind": "collection",
"portable_data_hash": "99999999999999999999999999999994+44",
"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzz"
},
'/tmp': {'kind': 'tmp',
"capacity": 1073741824 },
'/var/spool/cwl': {'kind': 'tmp',
"capacity": 1073741824 }
},
'state': 'Committed',
'output_name': 'Output for step test_run_mounts',
'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
'output_path': '/var/spool/cwl',
'output_ttl': 0,
'container_image': '99999999999999999999999999999994+99',
'command': ['ls', '/var/spool/cwl'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
'properties': {},
'secret_mounts': {}
}))
# The test passes no builder.resources
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_secrets(self, keepdocker):
arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
"portable_data_hash": "99999999999999999999999999999993+99"}
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.1")
tool = cmap({"arguments": ["md5sum", "example.conf"],
"class": "CommandLineTool",
"hints": [
{
"class": "http://commonwl.org/cwltool#Secrets",
"secrets": [
"#secret_job.cwl/pw"
]
}
],
"id": "#secret_job.cwl",
"inputs": [
{
"id": "#secret_job.cwl/pw",
"type": "string"
}
],
"outputs": [
],
"requirements": [
{
"class": "InitialWorkDirRequirement",
"listing": [
{
"entry": "username: user\npassword: $(inputs.pw)\n",
"entryname": "example.conf"
}
]
}
]})
loadingContext, runtimeContext = self.helper(runner)
runtimeContext.name = "test_secrets"
arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
arvtool.formatgraph = None
job_order = {"pw": "blorp"}
runner.secret_store.store(["pw"], job_order)
for j in arvtool.job(job_order, mock.MagicMock(), runtimeContext):
j.run(runtimeContext)
runner.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher({
'environment': {
'HOME': '/var/spool/cwl',
'TMPDIR': '/tmp'
},
'name': 'test_secrets',
'runtime_constraints': {
'vcpus': 1,
'ram': 1073741824
},
'use_existing': True,
'priority': 500,
'mounts': {
'/tmp': {'kind': 'tmp',
"capacity": 1073741824
},
'/var/spool/cwl': {'kind': 'tmp',
"capacity": 1073741824 }
},
'state': 'Committed',
'output_name': 'Output for step test_secrets',
'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
'output_path': '/var/spool/cwl',
'output_ttl': 0,
'container_image': '99999999999999999999999999999993+99',
'command': ['md5sum', 'example.conf'],
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
'properties': {},
"secret_mounts": {
"/var/spool/cwl/example.conf": {
"content": "username: user\npassword: blorp\n",
"kind": "text"
}
}
}))
# The test passes no builder.resources
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
def test_timelimit(self, keepdocker):
arv_docker_clear_cache()
runner = mock.MagicMock()
runner.ignore_docker_for_reuse = False
runner.intermediate_output_ttl = 0
runner.secret_store = cwltool.secrets.SecretStore()
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
"portable_data_hash": "99999999999999999999999999999993+99"}
tool = cmap({
"inputs": [],
"outputs": [],
"baseCommand": "ls",
"arguments": [{"valueFrom": "$(runtime.outdir)"}],
"id": "#",
"class": "CommandLineTool",
"hints": [
{
"class": "ToolTimeLimit",
"timelimit": 42
}
]
})
loadingContext, runtimeContext = self.helper(runner)
runtimeContext.name = "test_timelimit"
arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
arvtool.formatgraph = None
for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
j.run(runtimeContext)
_, kwargs = runner.api.container_requests().create.call_args
self.assertEqual(42, kwargs['body']['scheduling_parameters'].get('max_run_time'))
class TestWorkflow(unittest.TestCase):
def setUp(self):
cwltool.process._names = set()
def helper(self, runner, enable_reuse=True):
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.1")
make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,
collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))
document_loader.fetcher_constructor = functools.partial(arvados_cwl.CollectionFetcher, api_client=runner.api, fs_access=make_fs_access(""))
document_loader.fetcher = document_loader.fetcher_constructor(document_loader.cache, document_loader.session)
document_loader.fetch_text = document_loader.fetcher.fetch_text
document_loader.check_exists = document_loader.fetcher.check_exists
loadingContext = arvados_cwl.context.ArvLoadingContext(
{"avsc_names": avsc_names,
"basedir": "",
"make_fs_access": make_fs_access,
"loader": document_loader,
"metadata": {"cwlVersion": "v1.1", "http://commonwl.org/cwltool#original_cwlVersion": "v1.0"},
"construct_tool_object": runner.arv_make_tool})
runtimeContext = arvados_cwl.context.ArvRuntimeContext(
{"work_api": "containers",
"basedir": "",
"name": "test_run_wf_"+str(enable_reuse),
"make_fs_access": make_fs_access,
"tmpdir": "/tmp",
"enable_reuse": enable_reuse,
"priority": 500})
return loadingContext, runtimeContext
# The test passes no builder.resources
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.collection.CollectionReader")
@mock.patch("arvados.collection.Collection")
@mock.patch('arvados.commands.keepdocker.list_images_in_arv')
def test_run(self, list_images_in_arv, mockcollection, mockcollectionreader):
arv_docker_clear_cache()
arvados_cwl.add_arv_hints()
api = mock.MagicMock()
api._rootDesc = get_rootDesc()
runner = arvados_cwl.executor.ArvCwlExecutor(api)
self.assertEqual(runner.work_api, 'containers')
list_images_in_arv.return_value = [["zzzzz-4zz18-zzzzzzzzzzzzzzz"]]
runner.api.collections().get().execute.return_value = {"portable_data_hash": "99999999999999999999999999999993+99"}
runner.api.collections().list().execute.return_value = {"items": [{"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzz",
"portable_data_hash": "99999999999999999999999999999993+99"}]}
runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
runner.ignore_docker_for_reuse = False
runner.num_retries = 0
runner.secret_store = cwltool.secrets.SecretStore()
loadingContext, runtimeContext = self.helper(runner)
runner.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)
tool, metadata = loadingContext.loader.resolve_ref("tests/wf/scatter2.cwl")
metadata["cwlVersion"] = tool["cwlVersion"]
mockc = mock.MagicMock()
mockcollection.side_effect = lambda *args, **kwargs: CollectionMock(mockc, *args, **kwargs)
mockcollectionreader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), "token.txt")
arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, loadingContext)
arvtool.formatgraph = None
it = arvtool.job({}, mock.MagicMock(), runtimeContext)
next(it).run(runtimeContext)
next(it).run(runtimeContext)
with open("tests/wf/scatter2_subwf.cwl") as f:
subwf = StripYAMLComments(f.read()).rstrip()
runner.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher({
"command": [
"cwltool",
"--no-container",
"--move-outputs",
"--preserve-entire-environment",
"workflow.cwl#main",
"cwl.input.yml"
],
"container_image": "99999999999999999999999999999993+99",
"cwd": "/var/spool/cwl",
"environment": {
"HOME": "/var/spool/cwl",
"TMPDIR": "/tmp"
},
"mounts": {
"/keep/99999999999999999999999999999999+118": {
"kind": "collection",
"portable_data_hash": "99999999999999999999999999999999+118"
},
"/tmp": {
"capacity": 1073741824,
"kind": "tmp"
},
"/var/spool/cwl": {
"capacity": 1073741824,
"kind": "tmp"
},
"/var/spool/cwl/cwl.input.yml": {
"kind": "collection",
"path": "cwl.input.yml",
"portable_data_hash": "99999999999999999999999999999996+99"
},
"/var/spool/cwl/workflow.cwl": {
"kind": "collection",
"path": "workflow.cwl",
"portable_data_hash": "99999999999999999999999999999996+99"
},
"stdout": {
"kind": "file",
"path": "/var/spool/cwl/cwl.output.json"
}
},
"name": "scatterstep",
"output_name": "Output for step scatterstep",
"output_path": "/var/spool/cwl",
"output_ttl": 0,
"priority": 500,
"properties": {},
"runtime_constraints": {
"ram": 1073741824,
"vcpus": 1
},
"scheduling_parameters": {},
"secret_mounts": {},
"state": "Committed",
"use_existing": True
}))
mockc.open().__enter__().write.assert_has_calls([mock.call(subwf)])
mockc.open().__enter__().write.assert_has_calls([mock.call(
'''{
"fileblub": {
"basename": "token.txt",
"class": "File",
"location": "/keep/99999999999999999999999999999999+118/token.txt",
"size": 0
},
"sleeptime": 5
}''')])
# The test passes no builder.resources
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.collection.CollectionReader")
@mock.patch("arvados.collection.Collection")
@mock.patch('arvados.commands.keepdocker.list_images_in_arv')
def test_overall_resource_singlecontainer(self, list_images_in_arv, mockcollection, mockcollectionreader):
arv_docker_clear_cache()
arvados_cwl.add_arv_hints()
api = mock.MagicMock()
api._rootDesc = get_rootDesc()
runner = arvados_cwl.executor.ArvCwlExecutor(api)
self.assertEqual(runner.work_api, 'containers')
list_images_in_arv.return_value = [["zzzzz-4zz18-zzzzzzzzzzzzzzz"]]
runner.api.collections().get().execute.return_value = {"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzz",
"portable_data_hash": "99999999999999999999999999999993+99"}
runner.api.collections().list().execute.return_value = {"items": [{"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzzz",
"portable_data_hash": "99999999999999999999999999999993+99"}]}
runner.project_uuid = "zzzzz-8i9sb-zzzzzzzzzzzzzzz"
runner.ignore_docker_for_reuse = False
runner.num_retries = 0
runner.secret_store = cwltool.secrets.SecretStore()
loadingContext, runtimeContext = self.helper(runner)
runner.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)
loadingContext.do_update = True
tool, metadata = loadingContext.loader.resolve_ref("tests/wf/echo-wf.cwl")
mockcollection.side_effect = lambda *args, **kwargs: CollectionMock(mock.MagicMock(), *args, **kwargs)
arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, loadingContext)
arvtool.formatgraph = None
it = arvtool.job({}, mock.MagicMock(), runtimeContext)
next(it).run(runtimeContext)
next(it).run(runtimeContext)
with open("tests/wf/echo-subwf.cwl") as f:
subwf = StripYAMLComments(f.read())
runner.api.container_requests().create.assert_called_with(
body=JsonDiffMatcher({
'output_ttl': 0,
'environment': {'HOME': '/var/spool/cwl', 'TMPDIR': '/tmp'},
'scheduling_parameters': {},
'name': u'echo-subwf',
'secret_mounts': {},
'runtime_constraints': {'API': True, 'vcpus': 3, 'ram': 1073741824},
'properties': {},
'priority': 500,
'mounts': {
'/var/spool/cwl/cwl.input.yml': {
'portable_data_hash': '99999999999999999999999999999996+99',
'kind': 'collection',
'path': 'cwl.input.yml'
},
'/var/spool/cwl/workflow.cwl': {
'portable_data_hash': '99999999999999999999999999999996+99',
'kind': 'collection',
'path': 'workflow.cwl'
},
'stdout': {
'path': '/var/spool/cwl/cwl.output.json',
'kind': 'file'
},
'/tmp': {
'kind': 'tmp',
'capacity': 1073741824
}, '/var/spool/cwl': {
'kind': 'tmp',
'capacity': 3221225472
}
},
'state': 'Committed',
'output_path': '/var/spool/cwl',
'container_image': '99999999999999999999999999999993+99',
'command': [
u'cwltool',
u'--no-container',
u'--move-outputs',
u'--preserve-entire-environment',
u'workflow.cwl#main',
u'cwl.input.yml'
],
'use_existing': True,
'output_name': u'Output for step echo-subwf',
'cwd': '/var/spool/cwl'
}))
def test_default_work_api(self):
arvados_cwl.add_arv_hints()
api = mock.MagicMock()
api._rootDesc = copy.deepcopy(get_rootDesc())
runner = arvados_cwl.executor.ArvCwlExecutor(api)
self.assertEqual(runner.work_api, 'containers')
| 41.583256
| 147
| 0.518567
|
a901d0c61b616245177af2738749a27058f1a8ad
| 513
|
py
|
Python
|
code/scoring/compare_test_preds.py
|
s231644/rucompoundsplitter
|
606d4ac085ea0b3f8c764ebf413d66d2ec45cab4
|
[
"Apache-2.0"
] | null | null | null |
code/scoring/compare_test_preds.py
|
s231644/rucompoundsplitter
|
606d4ac085ea0b3f8c764ebf413d66d2ec45cab4
|
[
"Apache-2.0"
] | null | null | null |
code/scoring/compare_test_preds.py
|
s231644/rucompoundsplitter
|
606d4ac085ea0b3f8c764ebf413d66d2ec45cab4
|
[
"Apache-2.0"
] | null | null | null |
import torch
from code.ml.vocab import vocab
from evaluate import compare
from train_lstm_scorer import LSTMScorer
best_path = "scorer_best.bin"
scorer = LSTMScorer(
len(vocab.i2w), 250, 128, 256, emb_dropout=0.1, rnn_dropout=0.25,
pretrained_path="scorer_best.bin"
)
scorer.model.load_state_dict(torch.load(best_path, map_location=torch.device('cpu')))
print("Computing test accuracy")
acc = compare(
scorer,
"../../data/hypotheses/generated.txt",
"../../data/gold_analyses/test.csv",
)
| 23.318182
| 85
| 0.732943
|
ef4cbc68a1fa37fb0eb7bee3d960c620770eaf1d
| 7,072
|
py
|
Python
|
tests/test_model.py
|
rameshKrSah/mle-logging
|
ab4b7b8f8ea46cdac170349f60137a4aae4666d3
|
[
"MIT"
] | 47
|
2021-08-23T14:30:59.000Z
|
2021-10-31T15:08:49.000Z
|
tests/test_model.py
|
rameshKrSah/mle-logging
|
ab4b7b8f8ea46cdac170349f60137a4aae4666d3
|
[
"MIT"
] | 2
|
2021-08-24T11:26:26.000Z
|
2021-10-01T08:17:53.000Z
|
tests/test_model.py
|
rameshKrSah/mle-logging
|
ab4b7b8f8ea46cdac170349f60137a4aae4666d3
|
[
"MIT"
] | 3
|
2021-08-24T16:53:31.000Z
|
2021-08-28T21:06:49.000Z
|
import os
import shutil
import numpy as np
import torch.nn as nn
from sklearn.svm import SVC
from mle_logging import MLELogger, load_model, load_log
log_config = {
"time_to_track": ["num_updates", "num_epochs"],
"what_to_track": ["train_loss", "test_loss"],
"experiment_dir": "experiment_dir/",
"config_fname": None,
"use_tboard": True,
"model_type": "torch",
}
time_tic = {"num_updates": 10, "num_epochs": 1}
stats_tic = {"train_loss": 0.1234, "test_loss": 0.1235}
class DummyModel(nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
def create_tensorflow_model():
import tensorflow as tf
from tensorflow import keras
model = tf.keras.models.Sequential(
[
keras.layers.Dense(512, activation="relu", input_shape=(784,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(10),
]
)
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
return model
def test_save_load_torch():
"""Test saving and loading of torch model."""
# Remove experiment dir at start of test
if os.path.exists(log_config["experiment_dir"]) and os.path.isdir(
log_config["experiment_dir"]
):
shutil.rmtree(log_config["experiment_dir"])
# Instantiate logging to experiment_dir
log_config["model_type"] = "torch"
log = MLELogger(**log_config)
# Save a torch model
model = DummyModel()
log.update(time_tic, stats_tic, model, save=True)
# Assert the existence of the files
file_to_check = os.path.join(
log_config["experiment_dir"], "models/final", "final_no_seed_provided.pt"
)
assert os.path.exists(file_to_check)
# Load log and afterwards the model
relog = load_log(log_config["experiment_dir"])
remodel = load_model(relog.meta.model_ckpt, log_config["model_type"], model)
assert type(remodel) == DummyModel
# Finally -- clean up
shutil.rmtree(log_config["experiment_dir"])
def test_save_load_tf():
"""Test saving and loading of tensorflow model."""
# Remove experiment dir at start of test
if os.path.exists(log_config["experiment_dir"]) and os.path.isdir(
log_config["experiment_dir"]
):
shutil.rmtree(log_config["experiment_dir"])
# Instantiate logging to experiment_dir
log_config["model_type"] = "tensorflow"
log = MLELogger(**log_config)
# Save a torch model
model = create_tensorflow_model()
log.update(time_tic, stats_tic, model, save=True)
# Assert the existence of the files
file_to_check = os.path.join(
log_config["experiment_dir"],
"models/final",
"final_no_seed_provided.pt" + ".data-00000-of-00001",
)
assert os.path.exists(file_to_check)
file_to_check = os.path.join(
log_config["experiment_dir"],
"models/final",
"final_no_seed_provided.pt" + ".index",
)
assert os.path.exists(file_to_check)
file_to_check = os.path.join(
log_config["experiment_dir"], "models/final", "checkpoint"
)
assert os.path.exists(file_to_check)
# Load log and afterwards the model
relog = load_log(log_config["experiment_dir"])
_ = load_model(relog.meta.model_ckpt, log_config["model_type"], model)
# Finally -- clean up
shutil.rmtree(log_config["experiment_dir"])
def test_save_load_jax():
"""Test saving and loading of jax model."""
# Remove experiment dir at start of test
if os.path.exists(log_config["experiment_dir"]) and os.path.isdir(
log_config["experiment_dir"]
):
shutil.rmtree(log_config["experiment_dir"])
# Instantiate logging to experiment_dir
log_config["model_type"] = "jax"
log = MLELogger(**log_config)
# Save a torch model
import jax
import haiku as hk
def lenet_fn(x):
"""Standard LeNet-300-100 MLP network."""
mlp = hk.Sequential(
[
hk.Flatten(),
hk.Linear(300),
jax.nn.relu,
hk.Linear(100),
jax.nn.relu,
hk.Linear(10),
]
)
return mlp(x)
lenet = hk.without_apply_rng(hk.transform(lenet_fn))
params = lenet.init(jax.random.PRNGKey(42), np.zeros((32, 784)))
log.update(time_tic, stats_tic, params, save=True)
# Assert the existence of the files
file_to_check = os.path.join(
log_config["experiment_dir"], "models/final", "final_no_seed_provided.pkl"
)
assert os.path.exists(file_to_check)
# Load log and afterwards the model
relog = load_log(log_config["experiment_dir"])
_ = load_model(relog.meta.model_ckpt, log_config["model_type"])
# Finally -- clean up
shutil.rmtree(log_config["experiment_dir"])
def test_save_load_sklearn():
"""Test saving and loading of sklearn model."""
# Remove experiment dir at start of test
if os.path.exists(log_config["experiment_dir"]) and os.path.isdir(
log_config["experiment_dir"]
):
shutil.rmtree(log_config["experiment_dir"])
# Instantiate logging to experiment_dir
log_config["model_type"] = "sklearn"
log = MLELogger(**log_config)
# Save a torch model
model = SVC(gamma="auto")
log.update(time_tic, stats_tic, model, save=True)
# Assert the existence of the files
file_to_check = os.path.join(
log_config["experiment_dir"], "models/final", "final_no_seed_provided.pkl"
)
assert os.path.exists(file_to_check)
# Load log and afterwards the model
relog = load_log(log_config["experiment_dir"])
remodel = load_model(relog.meta.model_ckpt, log_config["model_type"], model)
assert type(remodel) == SVC
# Finally -- clean up
shutil.rmtree(log_config["experiment_dir"])
def test_save_load_numpy():
"""Test saving and loading of numpy model/array."""
# Remove experiment dir at start of test
if os.path.exists(log_config["experiment_dir"]) and os.path.isdir(
log_config["experiment_dir"]
):
shutil.rmtree(log_config["experiment_dir"])
# Instantiate logging to experiment_dir
log_config["model_type"] = "numpy"
log = MLELogger(**log_config)
# Save a torch model
model = np.array([1, 2, 3, 4])
log.update(time_tic, stats_tic, model, save=True)
# Assert the existence of the files
file_to_check = os.path.join(
log_config["experiment_dir"], "models/final", "final_no_seed_provided.pkl"
)
assert os.path.exists(file_to_check)
# Load log and afterwards the model
relog = load_log(log_config["experiment_dir"])
remodel = load_model(relog.meta.model_ckpt, log_config["model_type"], model)
assert (remodel == model).all()
# Finally -- clean up
shutil.rmtree(log_config["experiment_dir"])
| 31.017544
| 85
| 0.656533
|
e43183e60013956a529daeebefc2fcafdda45f94
| 5,072
|
py
|
Python
|
src/piptool.py
|
seanxwzhang/rules_pip
|
fede654462980b2dd9b60c041e043e8e628f05bd
|
[
"MIT"
] | null | null | null |
src/piptool.py
|
seanxwzhang/rules_pip
|
fede654462980b2dd9b60c041e043e8e628f05bd
|
[
"MIT"
] | null | null | null |
src/piptool.py
|
seanxwzhang/rules_pip
|
fede654462980b2dd9b60c041e043e8e628f05bd
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import re
import sys
from pip._internal.req.req_file import parse_requirements
from pip._internal.download import PipSession
def clean_name(name):
# Escape any illegal characters with underscore.
return re.sub("[-.+]", "_", name)
def is_pinned_requirement(ireq):
"""
Returns whether an InstallRequirement is a "pinned" requirement.
An InstallRequirement is considered pinned if:
- Is not editable
- It has exactly one specifier
- That specifier is "=="
- The version does not contain a wildcard
Examples:
django==1.8 # pinned
django>1.8 # NOT pinned
django~=1.8 # NOT pinned
django==1.* # NOT pinned
"""
if ireq.editable:
return False
if ireq.req is None or len(ireq.specifier._specs) != 1:
return False
op, version = next(iter(ireq.specifier._specs))._spec
return (op == "==" or op == "===") and not version.endswith(".*")
def as_tuple(ireq):
"""
Pulls out the (name: str, version:str, extras:(str)) tuple from
the pinned InstallRequirement.
"""
if not is_pinned_requirement(ireq):
raise TypeError("Expected a pinned InstallRequirement, got {}".format(ireq))
name = ireq.name
version = next(iter(ireq.specifier._specs))._spec[1]
extras = tuple(sorted(ireq.extras))
return name, version, extras
def repository_name(name, version, python_version):
"""Returns the canonical name of the Bazel repository for a package.
:param name: package nane
:param version: package verion
:param python_version: python major version
:returns: repo name
:rtype: str
"""
canonical = "pypi__{}__{}_{}".format(python_version, name, version)
return clean_name(canonical)
def whl_library(name, extras, repo_name, pip_repo_name, python_interpreter):
"""FIXME! briefly describe function
:param name: package nane
:param extras: extras for this lib
:param repo_name: repo name used for this lib
:param pip_repo_name: pip_import repo
:returns: whl_library rule definition
:rtype: str
"""
# Indentation here matters
return """
if "{repo_name}" not in native.existing_rules():
whl_library(
name = "{repo_name}",
pkg = "{name}",
requirements_repo = "@{pip_repo_name}",
python_interpreter = "{python_interpreter}",
extras = [{extras}],
pip_args = pip_args,
)""".format(
name=name,
repo_name=repo_name,
pip_repo_name=pip_repo_name,
python_interpreter=python_interpreter,
extras=",".join(['"%s"' % extra for extra in extras]),
)
def get_requirements(requirement):
"""Parse a requirement file
:param requirement: path to requirement file
:returns: list of InstallRequirement
:rtype: list[InstallRequirements]
"""
session = PipSession()
return parse_requirements(requirement, session=session)
def main():
logging.basicConfig()
parser = argparse.ArgumentParser(
description="Import Python dependencies into Bazel."
)
parser.add_argument("--name", action="store", help=("The namespace of the import."))
parser.add_argument(
"--input",
action="store",
help=("The requirements.txt file to import."),
required=True,
)
parser.add_argument(
"--output",
action="store",
help=("The requirements.bzl file to export."),
required=True,
)
args = parser.parse_args()
reqs = get_requirements(args.input)
python_version = "%d%d" % (sys.version_info[0], sys.version_info[1])
whl_targets = []
whl_libraries = []
for req in reqs:
name, version, extras = as_tuple(req)
repo_name = repository_name(name, version, python_version)
whl_targets.append(
",".join(
['"%s": "@%s//:pkg"' % (name.lower(), repo_name)]
+ [
# For every extra that is possible from this requirements.txt
'"%s[%s]": "@%s//:%s"'
% (name.lower(), extra.lower(), repo_name, extra)
for extra in extras
]
)
)
whl_libraries.append(
whl_library(name, extras, repo_name, args.name, sys.executable)
)
with open(args.output, "w") as _f:
_f.write(
"""\
# Install pip requirements.
load("@com_github_ali5h_rules_pip//:defs.bzl", "whl_library")
def pip_install(pip_args=[]):
{whl_libraries}
_requirements = {{
{mappings}
}}
def requirement(name, target=None):
name_key = name.lower()
if name_key not in _requirements:
fail("Could not find pip-provided dependency: '%s'" % name)
req = _requirements[name_key]
if target != None:
pkg, _, _ = req.partition("//")
req = pkg + target
return req
""".format(
whl_libraries="\n".join(whl_libraries), mappings=",".join(whl_targets)
)
)
if __name__ == "__main__":
main()
| 27.868132
| 88
| 0.619085
|
da2c213a4631fc7e52e3379e84b2d265e459f982
| 11,702
|
py
|
Python
|
micromamba/tests/helpers.py
|
wulmer/mamba
|
5961d76afdd8b0f070bf0f2da396ef25289c965c
|
[
"BSD-3-Clause"
] | null | null | null |
micromamba/tests/helpers.py
|
wulmer/mamba
|
5961d76afdd8b0f070bf0f2da396ef25289c965c
|
[
"BSD-3-Clause"
] | null | null | null |
micromamba/tests/helpers.py
|
wulmer/mamba
|
5961d76afdd8b0f070bf0f2da396ef25289c965c
|
[
"BSD-3-Clause"
] | null | null | null |
import errno
import json
import os
import platform
import random
import shutil
import string
import subprocess
import sys
from enum import Enum
from pathlib import Path
import pytest
import yaml
class DryRun(Enum):
OFF = "OFF"
DRY = "DRY"
ULTRA_DRY = "ULTRA_DRY"
use_offline = False
channel = ["-c", "conda-forge"]
dry_run_tests = DryRun(
os.environ["MAMBA_DRY_RUN_TESTS"]
if ("MAMBA_DRY_RUN_TESTS" in os.environ)
else "OFF"
)
MAMBA_NO_PREFIX_CHECK = 1 << 0
MAMBA_ALLOW_EXISTING_PREFIX = 1 << 1
MAMBA_ALLOW_MISSING_PREFIX = 1 << 2
MAMBA_ALLOW_NOT_ENV_PREFIX = 1 << 3
MAMBA_EXPECT_EXISTING_PREFIX = 1 << 4
MAMBA_NOT_ALLOW_EXISTING_PREFIX = 0
MAMBA_NOT_ALLOW_MISSING_PREFIX = 0
MAMBA_NOT_ALLOW_NOT_ENV_PREFIX = 0
MAMBA_NOT_EXPECT_EXISTING_PREFIX = 0
if platform.system() == "Windows":
xtensor_hpp = "Library/include/xtensor/xtensor.hpp"
xsimd_hpp = "Library/include/xsimd/xsimd.hpp"
else:
xtensor_hpp = "include/xtensor/xtensor.hpp"
xsimd_hpp = "include/xsimd/xsimd.hpp"
def get_umamba(cwd=os.getcwd()):
if os.getenv("TEST_MAMBA_EXE"):
umamba = os.getenv("TEST_MAMBA_EXE")
else:
if platform.system() == "Windows":
umamba_bin = "micromamba.exe"
else:
umamba_bin = "micromamba"
umamba = os.path.join(cwd, "build", "micromamba", umamba_bin)
if not Path(umamba).exists():
print("MICROMAMBA NOT FOUND!")
return umamba
def random_string(N=10):
return "".join(random.choices(string.ascii_uppercase + string.digits, k=N))
def shell(*args, cwd=os.getcwd()):
umamba = get_umamba(cwd=cwd)
cmd = [umamba, "shell"] + [arg for arg in args if arg]
if "--print-config-only" in args:
cmd += ["--debug"]
res = subprocess.check_output(cmd)
if "--json" in args:
try:
j = json.loads(res)
return j
except json.decoder.JSONDecodeError as e:
print(f"Error when loading JSON output from {res}")
raise (e)
if "--print-config-only" in args:
return yaml.load(res, Loader=yaml.FullLoader)
return res.decode()
def info(*args):
umamba = get_umamba()
cmd = [umamba, "info"] + [arg for arg in args if arg]
res = subprocess.check_output(cmd)
if "--json" in args:
try:
j = json.loads(res)
return j
except json.decoder.JSONDecodeError as e:
print(f"Error when loading JSON output from {res}")
raise (e)
return res.decode()
def install(*args, default_channel=True, no_rc=True, no_dry_run=False):
umamba = get_umamba()
cmd = [umamba, "install", "-y"] + [arg for arg in args if arg]
if "--print-config-only" in args:
cmd += ["--debug"]
if default_channel:
cmd += channel
if no_rc:
cmd += ["--no-rc"]
if use_offline:
cmd += ["--offline"]
if (dry_run_tests == DryRun.DRY) and "--dry-run" not in args and not no_dry_run:
cmd += ["--dry-run"]
cmd += ["--log-level=info"]
res = subprocess.check_output(cmd)
if "--json" in args:
try:
j = json.loads(res)
return j
except:
print(res.decode())
return
if "--print-config-only" in args:
return yaml.load(res, Loader=yaml.FullLoader)
return res.decode()
def create(*args, default_channel=True, no_rc=True, no_dry_run=False, always_yes=True):
umamba = get_umamba()
cmd = [umamba, "create"] + [arg for arg in args if arg]
if "--print-config-only" in args:
cmd += ["--debug"]
if always_yes:
cmd += ["-y"]
if default_channel:
cmd += channel
if no_rc:
cmd += ["--no-rc"]
if use_offline:
cmd += ["--offline"]
if (dry_run_tests == DryRun.DRY) and "--dry-run" not in args and not no_dry_run:
cmd += ["--dry-run"]
try:
res = subprocess.check_output(cmd)
if "--json" in args:
j = json.loads(res)
return j
if "--print-config-only" in args:
return yaml.load(res, Loader=yaml.FullLoader)
return res.decode()
except subprocess.CalledProcessError as e:
print(f"Error when executing '{' '.join(cmd)}'")
raise (e)
def remove(*args, no_dry_run=False):
umamba = get_umamba()
cmd = [umamba, "remove", "-y"] + [arg for arg in args if arg]
if "--print-config-only" in args:
cmd += ["--debug"]
if (dry_run_tests == DryRun.DRY) and "--dry-run" not in args and not no_dry_run:
cmd += ["--dry-run"]
try:
res = subprocess.check_output(cmd)
if "--json" in args:
j = json.loads(res)
return j
if "--print-config-only" in args:
return yaml.load(res, Loader=yaml.FullLoader)
return res.decode()
except subprocess.CalledProcessError as e:
print(f"Error when executing '{' '.join(cmd)}'")
raise (e)
def update(*args, default_channel=True, no_rc=True, no_dry_run=False):
umamba = get_umamba()
cmd = [umamba, "update", "-y"] + [arg for arg in args if arg]
if use_offline:
cmd += ["--offline"]
if no_rc:
cmd += ["--no-rc"]
if default_channel:
cmd += channel
if (dry_run_tests == DryRun.DRY) and "--dry-run" not in args and not no_dry_run:
cmd += ["--dry-run"]
try:
res = subprocess.check_output(cmd)
if "--json" in args:
try:
j = json.loads(res)
return j
except json.decoder.JSONDecodeError as e:
print(f"Error when loading JSON output from {res}")
raise (e)
print(f"Error when executing '{' '.join(cmd)}'")
raise
return res.decode()
except subprocess.CalledProcessError as e:
print(f"Error when executing '{' '.join(cmd)}'")
raise (e)
def run_env(*args, f=None):
umamba = get_umamba()
cmd = [umamba, "env"] + [arg for arg in args if arg]
res = subprocess.check_output(cmd)
if "--json" in args:
j = json.loads(res)
return j
return res.decode()
def umamba_list(*args):
umamba = get_umamba()
cmd = [umamba, "list"] + [arg for arg in args if arg]
res = subprocess.check_output(cmd)
if "--json" in args:
j = json.loads(res)
return j
return res.decode()
def get_concrete_pkg(t, needle):
pkgs = t["actions"]["LINK"]
for p in pkgs:
if p["name"] == needle:
return f"{p['name']}-{p['version']}-{p['build_string']}"
raise RuntimeError("Package not found in transaction")
def get_env(n, f=None):
root_prefix = os.getenv("MAMBA_ROOT_PREFIX")
if f:
return Path(os.path.join(root_prefix, "envs", n, f))
else:
return Path(os.path.join(root_prefix, "envs", n))
def get_pkg(n, f=None, root_prefix=None):
if not root_prefix:
root_prefix = os.getenv("MAMBA_ROOT_PREFIX")
if f:
return Path(os.path.join(root_prefix, "pkgs", n, f))
else:
return Path(os.path.join(root_prefix, "pkgs", n))
def get_tarball(n):
root_prefix = os.getenv("MAMBA_ROOT_PREFIX")
return Path(os.path.join(root_prefix, "pkgs", n + ".tar.bz2"))
def get_concrete_pkg_info(env, pkg_name):
with open(os.path.join(env, "conda-meta", pkg_name + ".json")) as fi:
return json.load(fi)
def read_windows_registry(target_path): # pragma: no cover
import winreg
# HKEY_LOCAL_MACHINE\Software\Microsoft\Command Processor\AutoRun
# HKEY_CURRENT_USER\Software\Microsoft\Command Processor\AutoRun
# returns value_value, value_type -or- None, None if target does not exist
main_key, the_rest = target_path.split("\\", 1)
subkey_str, value_name = the_rest.rsplit("\\", 1)
main_key = getattr(winreg, main_key)
try:
key = winreg.OpenKey(main_key, subkey_str, 0, winreg.KEY_READ)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
return None, None
try:
value_tuple = winreg.QueryValueEx(key, value_name)
value_value = value_tuple[0]
if isinstance(value_value, str):
value_value = value_value.strip()
value_type = value_tuple[1]
return value_value, value_type
except Exception:
# [WinError 2] The system cannot find the file specified
winreg.CloseKey(key)
return None, None
finally:
winreg.CloseKey(key)
def write_windows_registry(target_path, value_value, value_type): # pragma: no cover
import winreg
main_key, the_rest = target_path.split("\\", 1)
subkey_str, value_name = the_rest.rsplit("\\", 1)
main_key = getattr(winreg, main_key)
try:
key = winreg.OpenKey(main_key, subkey_str, 0, winreg.KEY_WRITE)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
key = winreg.CreateKey(main_key, subkey_str)
try:
winreg.SetValueEx(key, value_name, 0, value_type, value_value)
finally:
winreg.CloseKey(key)
@pytest.fixture(scope="session")
def cache_warming():
cache = Path(os.path.expanduser(os.path.join("~", "cache" + random_string())))
os.makedirs(cache)
os.environ["CONDA_PKGS_DIRS"] = str(cache)
tmp_prefix = os.path.expanduser(os.path.join("~", "tmpprefix" + random_string()))
res = create("-p", tmp_prefix, "xtensor", "--json", no_dry_run=True)
pkg_name = get_concrete_pkg(res, "xtensor")
yield cache, pkg_name
if "CONDA_PKGS_DIRS" in os.environ:
os.environ.pop("CONDA_PKGS_DIRS")
rmtree(cache)
rmtree(tmp_prefix)
@pytest.fixture(scope="session")
def existing_cache(cache_warming):
yield cache_warming[0]
@pytest.fixture(scope="session")
def repodata_files(existing_cache):
yield [f for f in existing_cache.iterdir() if f.is_file() and f.suffix == ".json"]
@pytest.fixture(scope="session")
def test_pkg(cache_warming):
yield cache_warming[1]
@pytest.fixture
def first_cache_is_writable():
return True
def link_dir(new_dir, existing_dir, prefixes=None):
for i in existing_dir.iterdir():
if i.is_dir():
subdir = new_dir / i.name
os.makedirs(subdir, exist_ok=True)
link_dir(subdir, i)
elif i.is_symlink():
linkto = os.readlink(i)
os.symlink(linkto, new_dir / i.name)
elif i.is_file():
os.makedirs(new_dir, exist_ok=True)
name = i.name
os.link(i, new_dir / name)
def recursive_chmod(path: Path, permission, is_root=True):
p = Path(path)
if not p.is_symlink():
os.chmod(p, permission)
if p.is_dir():
for i in p.iterdir():
recursive_chmod(i, permission, is_root=False)
def rmtree(path: Path):
p = Path(path)
recursive_chmod(p, 0o700)
def handleError(func, path, exc_info):
recursive_chmod(path, 0o700)
func(path)
if p.is_dir():
shutil.rmtree(p, onerror=handleError)
else:
os.remove(p)
def get_fake_activate(prefix):
prefix = Path(prefix)
env = os.environ.copy()
curpath = env["PATH"]
curpath = curpath.split(os.pathsep)
if platform.system() == "Windows":
addpath = [
prefix,
prefix / "Library" / "mingw-w64" / "bin",
prefix / "Library" / "usr" / "bin",
prefix / "Library" / "bin",
prefix / "Scripts",
prefix / "bin",
]
else:
addpath = [prefix / "bin"]
env["PATH"] = os.pathsep.join([str(x) for x in addpath + curpath])
env["CONDA_PREFIX"] = str(prefix)
return env
| 27.599057
| 87
| 0.607418
|
fe89e329de68f460717efdc26f00a88a7bed34ec
| 13,451
|
py
|
Python
|
art/classifiers/ensemble.py
|
ebubae/adversarial-robustness-toolbox
|
55efab2c1a60ae14c37b72fe84778355314396ea
|
[
"MIT"
] | 17
|
2019-09-15T04:00:31.000Z
|
2021-03-18T08:12:21.000Z
|
art/classifiers/ensemble.py
|
ebubae/adversarial-robustness-toolbox
|
55efab2c1a60ae14c37b72fe84778355314396ea
|
[
"MIT"
] | null | null | null |
art/classifiers/ensemble.py
|
ebubae/adversarial-robustness-toolbox
|
55efab2c1a60ae14c37b72fe84778355314396ea
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the classifier `EnsembleClassifier` for ensembles of multiple classifiers.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
from art.classifiers.classifier import Classifier
logger = logging.getLogger(__name__)
class EnsembleClassifier(Classifier):
"""
Class allowing to aggregate multiple classifiers as an ensemble. The individual classifiers are expected to be
trained when the ensemble is created and no training procedures are provided through this class.
"""
def __init__(self, classifiers, classifier_weights=None, channel_index=3, clip_values=None, defences=None,
preprocessing=(0, 1)):
"""
Initialize a :class:`.EnsembleClassifier` object. The data range values and colour channel index have to
be consistent for all the classifiers in the ensemble.
:param classifiers: List of :class:`.Classifier` instances to be ensembled together.
:type classifiers: `list`
:param classifier_weights: List of weights, one scalar per classifier, to assign to their prediction when
aggregating results. If `None`, all classifiers are assigned the same weight.
:type classifier_weights: `list` or `np.ndarray` or `None`
:param channel_index: Index of the axis in data containing the color channels or features.
:type channel_index: `int`
:param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and
maximum values allowed for features. If floats are provided, these will be used as the range of all
features. If arrays are provided, each value will be considered the bound for a feature, thus
the shape of clip values needs to match the total number of features.
:type clip_values: `tuple`
:param defences: Defences to be activated with the classifier.
:type defences: `str` or `list(str)`
:param preprocessing: Tuple of the form `(substractor, divider)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be substracted from the input. The input will then
be divided by the second one.
:type preprocessing: `tuple`
"""
super(EnsembleClassifier, self).__init__(clip_values=clip_values, channel_index=channel_index,
defences=defences, preprocessing=preprocessing)
if classifiers is None or not classifiers:
raise ValueError('No classifiers provided for the ensemble.')
self._nb_classifiers = len(classifiers)
# Assert all classifiers are the right shape(s)
for classifier in classifiers:
if not isinstance(classifier, Classifier):
raise TypeError('Expected type `Classifier`, found %s instead.' % type(classifier))
if clip_values != classifier.clip_values:
raise ValueError('Incompatible `clip_values` between classifiers in the ensemble. Found %s and %s.'
% (str(clip_values), str(classifier.clip_values)))
if classifier.nb_classes != classifiers[0].nb_classes:
raise ValueError('Incompatible output shapes between classifiers in the ensemble. Found %s and %s.'
% (str(classifier.nb_classes), str(classifiers[0].nb_classes)))
if classifier.input_shape != classifiers[0].input_shape:
raise ValueError('Incompatible input shapes between classifiers in the ensemble. Found %s and %s.'
% (str(classifier.input_shape), str(classifiers[0].input_shape)))
self._input_shape = classifiers[0].input_shape
self._nb_classes = classifiers[0].nb_classes
# Set weights for classifiers
if classifier_weights is None:
classifier_weights = np.ones(self._nb_classifiers) / self._nb_classifiers
self._classifier_weights = classifier_weights
self._classifiers = classifiers
def predict(self, x, logits=False, batch_size=128, **kwargs):
"""
Perform prediction for a batch of inputs. Predictions from classifiers are aggregated at probabilities level,
as logits are not comparable between models. If logits prediction was specified, probabilities are converted
back to logits after aggregation.
:param x: Test set.
:type x: `np.ndarray`
:param logits: `True` if the prediction should be done at the logits layer.
:type logits: `bool`
:param raw: Return the individual classifier raw outputs (not aggregated).
:type raw: `bool`
:return: Array of predictions of shape `(nb_inputs, self.nb_classes)`, or of shape
`(nb_classifiers, nb_inputs, self.nb_classes)` if `raw=True`.
:rtype: `np.ndarray`
"""
if 'raw' in kwargs:
raw = kwargs['raw']
else:
raise ValueError('Missing argument `raw`.')
preds = np.array([self._classifier_weights[i] * self._classifiers[i].predict(x, raw and logits)
for i in range(self._nb_classifiers)])
if raw:
return preds
# Aggregate predictions only at probabilities level, as logits are not comparable between models
var_z = np.sum(preds, axis=0)
# Convert back to logits if needed
if logits:
eps = 10e-8
var_z = np.log(np.clip(var_z, eps, 1. - eps))
return var_z
def fit(self, x, y, batch_size=128, nb_epochs=20, **kwargs):
"""
Fit the classifier on the training set `(x, y)`. This function is not supported for ensembles.
:param x: Training data.
:type x: `np.ndarray`
:param y: Labels, one-vs-rest encoding.
:type y: `np.ndarray`
:param batch_size: Size of batches.
:type batch_size: `int`
:param nb_epochs: Number of epochs to use for training.
:type nb_epochs: `int`
:param kwargs: Dictionary of framework-specific arguments.
:type kwargs: `dict`
:return: `None`
"""
raise NotImplementedError
def fit_generator(self, generator, nb_epochs=20, **kwargs):
"""
Fit the classifier using the generator that yields batches as specified. This function is not supported for
ensembles.
:param generator: Batch generator providing `(x, y)` for each epoch. If the generator can be used for native
training in Keras, it will.
:type generator: :class:`.DataGenerator`
:param nb_epochs: Number of epochs to use for trainings.
:type nb_epochs: `int`
:param kwargs: Dictionary of framework-specific argument.
:type kwargs: `dict`
:return: `None`
"""
raise NotImplementedError
@property
def layer_names(self):
"""
Return the hidden layers in the model, if applicable. This function is not supported for ensembles.
:return: The hidden layers in the model, input and output layers excluded.
:rtype: `list`
.. warning:: `layer_names` tries to infer the internal structure of the model.
This feature comes with no guarantees on the correctness of the result.
The intended order of the layers tries to match their order in the model, but this is not
guaranteed either.
"""
raise NotImplementedError
def get_activations(self, x, layer, batch_size=128):
"""
Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and
`nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by
calling `layer_names`. This function is not supported for ensembles.
:param x: Input for computing the activations.
:type x: `np.ndarray`
:param layer: Layer for computing the activations
:type layer: `int` or `str`
:param batch_size: Size of batches.
:type batch_size: `int`
:return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.
:rtype: `np.ndarray`
"""
raise NotImplementedError
def class_gradient(self, x, label=None, logits=False, **kwargs):
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param label: Index of a specific per-class derivative. If `None`, then gradients for all
classes will be computed.
:type label: `int`
:param logits: `True` if the prediction should be done at the logits layer.
:type logits: `bool`
:param raw: Return the individual classifier raw outputs (not aggregated).
:type raw: `bool`
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified. If `raw=True`, an additional
dimension is added at the beginning of the array, indexing the different classifiers.
:rtype: `np.ndarray`
"""
if 'raw' in kwargs:
raw = kwargs['raw']
else:
raise ValueError('Missing argument `raw`.')
grads = np.array([self._classifier_weights[i] * self._classifiers[i].class_gradient(x, label, logits)
for i in range(self._nb_classifiers)])
if raw:
return grads
return np.sum(grads, axis=0)
def loss_gradient(self, x, y, **kwargs):
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param y: Correct labels, one-vs-rest encoding.
:type y: `np.ndarray`
:param raw: Return the individual classifier raw outputs (not aggregated).
:type raw: `bool`
:return: Array of gradients of the same shape as `x`. If `raw=True`, shape becomes `[nb_classifiers, x.shape]`.
:rtype: `np.ndarray`
"""
if 'raw' in kwargs:
raw = kwargs['raw']
else:
raise ValueError('Missing argument `raw`.')
grads = np.array([self._classifier_weights[i] * self._classifiers[i].loss_gradient(x, y)
for i in range(self._nb_classifiers)])
if raw:
return grads
return np.sum(grads, axis=0)
def set_learning_phase(self, train):
"""
Set the learning phase for the backend framework.
:param train: True to set the learning phase to training, False to set it to prediction.
:type train: `bool`
"""
if self._learning is not None and isinstance(train, bool):
for classifier in self._classifiers:
classifier.set_learning_phase(train)
self._learning_phase = train
def __repr__(self):
repr_ = "%s(classifiers=%r, classifier_weights=%r, channel_index=%r, clip_values=%r, defences=%r, " \
"preprocessing=%r)" \
% (self.__module__ + '.' + self.__class__.__name__,
self._classifiers, self._classifier_weights, self.channel_index, self.clip_values, self.defences,
self.preprocessing)
return repr_
def save(self, filename, path=None):
"""
Save a model to file in the format specific to the backend framework. This function is not supported for
ensembles.
:param filename: Name of the file where to store the model.
:type filename: `str`
:param path: Path of the folder where to store the model. If no path is specified, the model will be stored in
the default data location of the library `DATA_PATH`.
:type path: `str`
:return: None
"""
raise NotImplementedError
| 46.867596
| 120
| 0.644785
|
2a5d1eae480cc7de69c8fccc849d80082bb230f6
| 22,849
|
py
|
Python
|
python/lwt_interface/dict_encoding_testlib.py
|
minesrebollo/tskit
|
c381917280fc7dccaa615cb9c51e172ae90cb3ed
|
[
"MIT"
] | null | null | null |
python/lwt_interface/dict_encoding_testlib.py
|
minesrebollo/tskit
|
c381917280fc7dccaa615cb9c51e172ae90cb3ed
|
[
"MIT"
] | 96
|
2021-06-22T16:05:43.000Z
|
2022-03-28T19:06:31.000Z
|
python/lwt_interface/dict_encoding_testlib.py
|
daniel-goldstein/tskit
|
0972c6168b7c4c2b50878e677199a7f4b030cb7a
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2018-2020 Tskit Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test definitions for the low-level LightweightTableCollection class
defined here. These tests are not intended to be executed directly,
but should be imported into another test module that imports a
compiled module exporting the LightweightTableCollection class.
See the test_example_c_module file for an example.
"""
import json
import kastore
import msprime
import numpy as np
import pytest
import tskit
import tskit.util as util
lwt_module = None
@pytest.fixture(scope="session")
def full_ts():
"""
Return a tree sequence that has data in all fields.
"""
"""
A tree sequence with data in all fields - duplcated from tskit's conftest.py
as other test suites using this file will not have that fixture defined.
"""
n = 10
t = 1
population_configurations = [
msprime.PopulationConfiguration(n // 2),
msprime.PopulationConfiguration(n // 2),
msprime.PopulationConfiguration(0),
]
demographic_events = [
msprime.MassMigration(time=t, source=0, destination=2),
msprime.MassMigration(time=t, source=1, destination=2),
]
ts = msprime.simulate(
population_configurations=population_configurations,
demographic_events=demographic_events,
random_seed=1,
mutation_rate=1,
record_migrations=True,
)
tables = ts.dump_tables()
# TODO replace this with properly linked up individuals using sim_ancestry
# once 1.0 is released.
for j in range(n):
tables.individuals.add_row(flags=j, location=(j, j), parents=(j - 1, j - 1))
for name, table in tables.name_map.items():
if name != "provenances":
table.metadata_schema = tskit.MetadataSchema({"codec": "json"})
metadatas = [f"n_{name}_{u}" for u in range(len(table))]
metadata, metadata_offset = tskit.pack_strings(metadatas)
table.set_columns(
**{
**table.asdict(),
"metadata": metadata,
"metadata_offset": metadata_offset,
}
)
tables.metadata_schema = tskit.MetadataSchema({"codec": "json"})
tables.metadata = "Test metadata"
# Add some more provenance so we have enough rows for the offset deletion test.
for j in range(10):
tables.provenances.add_row(timestamp="x" * j, record="y" * j)
return tables.tree_sequence()
# The ts above is used for the whole test session, but our tests need fresh tables to
# modify
@pytest.fixture
def tables(full_ts):
return full_ts.dump_tables()
def test_check_ts_full(tmp_path, full_ts):
"""
Check that the example ts has data in all fields
"""
full_ts.dump(tmp_path / "tables")
store = kastore.load(tmp_path / "tables")
for v in store.values():
# Check we really have data in every field
assert v.nbytes > 0
class TestEncodingVersion:
def test_version(self):
lwt = lwt_module.LightweightTableCollection()
assert lwt.asdict()["encoding_version"] == (1, 3)
class TestRoundTrip:
"""
Tests if we can do a simple round trip on simulated data.
"""
def verify(self, tables):
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(tables.asdict())
other_tables = tskit.TableCollection.fromdict(lwt.asdict())
assert tables == other_tables
def test_simple(self):
ts = msprime.simulate(10, mutation_rate=1, random_seed=2)
self.verify(ts.tables)
def test_empty(self):
tables = tskit.TableCollection(sequence_length=1)
self.verify(tables)
def test_individuals(self):
n = 10
ts = msprime.simulate(n, mutation_rate=1, random_seed=2)
tables = ts.dump_tables()
for j in range(n):
tables.individuals.add_row(
flags=j, location=(j, j), parents=(j, j), metadata=b"x" * j
)
self.verify(tables)
def test_sequence_length(self):
ts = msprime.simulate(
10, recombination_rate=0.1, mutation_rate=1, length=0.99, random_seed=2
)
self.verify(ts.tables)
def test_migration(self):
pop_configs = [msprime.PopulationConfiguration(5) for _ in range(2)]
migration_matrix = [[0, 1], [1, 0]]
ts = msprime.simulate(
population_configurations=pop_configs,
migration_matrix=migration_matrix,
mutation_rate=1,
record_migrations=True,
random_seed=1,
)
self.verify(ts.tables)
def test_example(self, tables):
tables.metadata_schema = tskit.MetadataSchema(
{
"codec": "struct",
"type": "object",
"properties": {"top-level": {"type": "string", "binaryFormat": "50p"}},
}
)
tables.metadata = {"top-level": "top-level-metadata"}
for table in tskit.TABLE_NAMES:
t = getattr(tables, table)
if hasattr(t, "metadata_schema"):
t.packset_metadata([f"{table}-{i}".encode() for i in range(t.num_rows)])
t.metadata_schema = tskit.MetadataSchema(
{
"codec": "struct",
"type": "object",
"properties": {
table: {"type": "string", "binaryFormat": "50p"}
},
}
)
self.verify(tables)
class TestMissingData:
"""
Tests what happens when we have missing data in the encoded dict.
"""
def test_missing_sequence_length(self, tables):
d = tables.asdict()
del d["sequence_length"]
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(TypeError):
lwt.fromdict(d)
def test_missing_metadata(self, tables):
assert tables.metadata != b""
d = tables.asdict()
del d["metadata"]
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(d)
tables = tskit.TableCollection.fromdict(lwt.asdict())
# Empty byte field still gets interpreted by schema
with pytest.raises(json.decoder.JSONDecodeError):
tables.metadata
def test_missing_metadata_schema(self, tables):
assert repr(tables.metadata_schema) != ""
d = tables.asdict()
del d["metadata_schema"]
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(d)
tables = tskit.TableCollection.fromdict(lwt.asdict())
assert repr(tables.metadata_schema) == ""
def test_missing_tables(self, tables):
d = tables.asdict()
table_names = d.keys() - {
"sequence_length",
"metadata",
"metadata_schema",
"encoding_version",
"indexes",
}
for table_name in table_names:
d = tables.asdict()
del d[table_name]
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(TypeError):
lwt.fromdict(d)
class TestBadTypes:
"""
Tests for setting each column to a type that can't be converted to 1D numpy array.
"""
def verify_columns(self, value, tables):
d = tables.asdict()
table_names = set(d.keys()) - {
"sequence_length",
"metadata",
"metadata_schema",
"encoding_version",
"indexes",
}
for table_name in table_names:
table_dict = d[table_name]
for colname in set(table_dict.keys()) - {"metadata_schema"}:
copy = dict(table_dict)
copy[colname] = value
lwt = lwt_module.LightweightTableCollection()
d = tables.asdict()
d[table_name] = copy
with pytest.raises(ValueError):
lwt.fromdict(d)
def test_2d_array(self, tables):
self.verify_columns([[1, 2], [3, 4]], tables)
def test_str(self, tables):
self.verify_columns("aserg", tables)
def test_bad_top_level_types(self, tables):
d = tables.asdict()
for key in set(d.keys()) - {"encoding_version", "indexes"}:
bad_type_dict = tables.asdict()
# A list should be a ValueError for both the tables and sequence_length
bad_type_dict[key] = ["12345"]
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(TypeError):
lwt.fromdict(bad_type_dict)
class TestBadLengths:
"""
Tests for setting each column to a length incompatible with the table.
"""
def verify(self, num_rows, tables):
d = tables.asdict()
table_names = set(d.keys()) - {
"sequence_length",
"metadata",
"metadata_schema",
"encoding_version",
"indexes",
}
for table_name in sorted(table_names):
table_dict = d[table_name]
for colname in set(table_dict.keys()) - {"metadata_schema"}:
copy = dict(table_dict)
copy[colname] = table_dict[colname][:num_rows].copy()
lwt = lwt_module.LightweightTableCollection()
d = tables.asdict()
d[table_name] = copy
with pytest.raises(ValueError):
lwt.fromdict(d)
def test_two_rows(self, tables):
self.verify(2, tables)
def test_zero_rows(self, tables):
self.verify(0, tables)
def test_bad_index_length(self, tables):
for col in ("insertion", "removal"):
d = tables.asdict()
d["indexes"][f"edge_{col}_order"] = d["indexes"][f"edge_{col}_order"][:-1]
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(
ValueError,
match="^edge_insertion_order and"
" edge_removal_order must be the same"
" length$",
):
lwt.fromdict(d)
d = tables.asdict()
for col in ("insertion", "removal"):
d["indexes"][f"edge_{col}_order"] = d["indexes"][f"edge_{col}_order"][:-1]
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(
ValueError,
match="^edge_insertion_order and edge_removal_order must be"
" the same length as the number of edges$",
):
lwt.fromdict(d)
class TestRequiredAndOptionalColumns:
"""
Tests that specifying None for some columns will give the intended
outcome.
"""
def verify_required_columns(self, tables, table_name, required_cols):
d = tables.asdict()
table_dict = {col: None for col in d[table_name].keys()}
for col in required_cols:
table_dict[col] = d[table_name][col]
lwt = lwt_module.LightweightTableCollection()
d[table_name] = table_dict
lwt.fromdict(d)
other = lwt.asdict()
for col in required_cols:
assert np.array_equal(other[table_name][col], table_dict[col])
# Any one of these required columns as None gives an error.
for col in required_cols:
d = tables.asdict()
copy = dict(table_dict)
copy[col] = None
d[table_name] = copy
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(TypeError):
lwt.fromdict(d)
# Removing any one of these required columns gives an error.
for col in required_cols:
d = tables.asdict()
copy = dict(table_dict)
del copy[col]
d[table_name] = copy
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(TypeError):
lwt.fromdict(d)
def verify_optional_column(self, tables, table_len, table_name, col_name):
d = tables.asdict()
table_dict = d[table_name]
table_dict[col_name] = None
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(d)
out = lwt.asdict()
assert np.array_equal(
out[table_name][col_name], np.zeros(table_len, dtype=np.int32) - 1
)
def verify_offset_pair(
self, tables, table_len, table_name, col_name, required=False
):
offset_col = col_name + "_offset"
if not required:
d = tables.asdict()
table_dict = d[table_name]
table_dict[col_name] = None
table_dict[offset_col] = None
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(d)
out = lwt.asdict()
assert out[table_name][col_name].shape == (0,)
assert np.array_equal(
out[table_name][offset_col],
np.zeros(table_len + 1, dtype=np.uint32),
)
d = tables.asdict()
table_dict = d[table_name]
del table_dict[col_name]
del table_dict[offset_col]
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(d)
out = lwt.asdict()
assert out[table_name][col_name].shape == (0,)
assert np.array_equal(
out[table_name][offset_col],
np.zeros(table_len + 1, dtype=np.uint32),
)
# Setting one or the other raises a TypeError
d = tables.asdict()
table_dict = d[table_name]
table_dict[col_name] = None
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(TypeError):
lwt.fromdict(d)
d = tables.asdict()
table_dict = d[table_name]
del table_dict[col_name]
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(TypeError):
lwt.fromdict(d)
d = tables.asdict()
table_dict = d[table_name]
table_dict[offset_col] = None
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(TypeError):
lwt.fromdict(d)
d = tables.asdict()
table_dict = d[table_name]
del table_dict[offset_col]
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(TypeError):
lwt.fromdict(d)
d = tables.asdict()
table_dict = d[table_name]
bad_offset = np.zeros_like(table_dict[offset_col])
bad_offset[:-1] = table_dict[offset_col][:-1][::-1]
bad_offset[-1] = table_dict[offset_col][-1]
table_dict[offset_col] = bad_offset
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(ValueError):
lwt.fromdict(d)
def verify_metadata_schema(self, tables, table_name):
d = tables.asdict()
d[table_name]["metadata_schema"] = None
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(d)
out = lwt.asdict()
assert "metadata_schema" not in out[table_name]
tables = tskit.TableCollection.fromdict(out)
assert repr(getattr(tables, table_name).metadata_schema) == ""
def test_individuals(self, tables):
self.verify_required_columns(tables, "individuals", ["flags"])
self.verify_offset_pair(
tables, len(tables.individuals), "individuals", "location"
)
self.verify_offset_pair(
tables, len(tables.individuals), "individuals", "parents"
)
self.verify_offset_pair(
tables, len(tables.individuals), "individuals", "metadata"
)
self.verify_metadata_schema(tables, "individuals")
# Verify optional parents column
d = tables.asdict()
d["individuals"]["parents"] = None
d["individuals"]["parents_offset"] = None
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(d)
out = lwt.asdict()
assert all(val == [] for val in out["individuals"]["parents"])
def test_nodes(self, tables):
self.verify_offset_pair(tables, len(tables.nodes), "nodes", "metadata")
self.verify_optional_column(tables, len(tables.nodes), "nodes", "population")
self.verify_optional_column(tables, len(tables.nodes), "nodes", "individual")
self.verify_required_columns(tables, "nodes", ["flags", "time"])
self.verify_metadata_schema(tables, "nodes")
def test_edges(self, tables):
self.verify_required_columns(
tables, "edges", ["left", "right", "parent", "child"]
)
self.verify_offset_pair(tables, len(tables.edges), "edges", "metadata")
self.verify_metadata_schema(tables, "edges")
def test_migrations(self, tables):
self.verify_required_columns(
tables, "migrations", ["left", "right", "node", "source", "dest", "time"]
)
self.verify_offset_pair(
tables, len(tables.migrations), "migrations", "metadata"
)
self.verify_optional_column(tables, len(tables.nodes), "nodes", "individual")
self.verify_metadata_schema(tables, "migrations")
def test_sites(self, tables):
self.verify_required_columns(
tables, "sites", ["position", "ancestral_state", "ancestral_state_offset"]
)
self.verify_offset_pair(tables, len(tables.sites), "sites", "metadata")
self.verify_metadata_schema(tables, "sites")
def test_mutations(self, tables):
self.verify_required_columns(
tables,
"mutations",
["site", "node", "derived_state", "derived_state_offset"],
)
self.verify_offset_pair(tables, len(tables.mutations), "mutations", "metadata")
self.verify_metadata_schema(tables, "mutations")
# Verify optional time column
d = tables.asdict()
d["mutations"]["time"] = None
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(d)
out = lwt.asdict()
assert all(util.is_unknown_time(val) for val in out["mutations"]["time"])
def test_populations(self, tables):
self.verify_required_columns(
tables, "populations", ["metadata", "metadata_offset"]
)
self.verify_metadata_schema(tables, "populations")
self.verify_offset_pair(tables, len(tables.nodes), "nodes", "metadata", True)
def test_provenances(self, tables):
self.verify_required_columns(
tables,
"provenances",
["record", "record_offset", "timestamp", "timestamp_offset"],
)
def test_index(self, tables):
d = tables.asdict()
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(d)
other = lwt.asdict()
assert np.array_equal(
d["indexes"]["edge_insertion_order"],
other["indexes"]["edge_insertion_order"],
)
assert np.array_equal(
d["indexes"]["edge_removal_order"], other["indexes"]["edge_removal_order"]
)
# index is optional
d = tables.asdict()
del d["indexes"]
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(d)
# and a tc without indexes has empty dict
assert lwt.asdict()["indexes"] == {}
# Both columns must be provided, if one is
for col in ("insertion", "removal"):
d = tables.asdict()
del d["indexes"][f"edge_{col}_order"]
lwt = lwt_module.LightweightTableCollection()
with pytest.raises(
TypeError,
match="^edge_insertion_order and "
"edge_removal_order must be specified "
"together$",
):
lwt.fromdict(d)
def test_top_level_metadata(self, tables):
d = tables.asdict()
# None should give default value
d["metadata"] = None
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(d)
out = lwt.asdict()
assert "metadata" not in out
tables = tskit.TableCollection.fromdict(out)
with pytest.raises(json.decoder.JSONDecodeError):
tables.metadata
# Missing is tested in TestMissingData above
def test_top_level_metadata_schema(self, tables):
d = tables.asdict()
# None should give default value
d["metadata_schema"] = None
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(d)
out = lwt.asdict()
assert "metadata_schema" not in out
tables = tskit.TableCollection.fromdict(out)
assert repr(tables.metadata_schema) == ""
# Missing is tested in TestMissingData above
class TestLifecycle:
def test_unassigned_empty(self):
lwt_dict = lwt_module.LightweightTableCollection().asdict()
assert tskit.TableCollection.fromdict(lwt_dict) == tskit.TableCollection(-1)
def test_del_empty(self):
lwt = lwt_module.LightweightTableCollection()
lwt_dict = lwt.asdict()
del lwt
assert tskit.TableCollection.fromdict(lwt_dict) == tskit.TableCollection(-1)
def test_del_full(self, tables):
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(tables.asdict())
lwt_dict = lwt.asdict()
del lwt
assert tskit.TableCollection.fromdict(lwt_dict) == tables
def test_del_lwt_and_tables(self, tables):
lwt = lwt_module.LightweightTableCollection()
lwt.fromdict(tables.asdict())
lwt_dict = lwt.asdict()
del lwt
tables2 = tables.copy()
del tables
assert tskit.TableCollection.fromdict(lwt_dict) == tables2
| 35.926101
| 88
| 0.606153
|
7c34568b9cd77af451940da55488080f01f24771
| 700
|
py
|
Python
|
versions.py
|
shamanenas/learning-cnn
|
7ddbe770c8243eb8bc4cfc7740ea16624ff6968b
|
[
"CC0-1.0"
] | null | null | null |
versions.py
|
shamanenas/learning-cnn
|
7ddbe770c8243eb8bc4cfc7740ea16624ff6968b
|
[
"CC0-1.0"
] | null | null | null |
versions.py
|
shamanenas/learning-cnn
|
7ddbe770c8243eb8bc4cfc7740ea16624ff6968b
|
[
"CC0-1.0"
] | null | null | null |
# check library version numbers
# scipy
import scipy
print('scipy: %s' % scipy.__version__)
# numpy
import numpy
print('numpy: %s' % numpy.__version__)
# matplotlib
import matplotlib
print('matplotlib: %s' % matplotlib.__version__)
# pandas
import pandas
print('pandas: %s' % pandas.__version__)
# statsmodels
import statsmodels
print('statsmodels: %s' % statsmodels.__version__)
# scikit-learn
import sklearn
print('sklearn: %s' % sklearn.__version__)
import theano
print('theano: %s' % theano.__version__)
import tensorflow
print('tensorflow: %s' % tensorflow.__version__)
import keras
print('keras: %s' % keras.__version__)
import PIL
print('Pillow: ', PIL.__version__)
| 18.918919
| 51
| 0.728571
|
1fbf1473cb57b06fdfd6c8b345352b4ddd79d761
| 11,951
|
py
|
Python
|
pyglet/libs/win32/__init__.py
|
swipswaps/pyglet
|
2bfd7ee52482b805ae076cf5036c5628e8a72224
|
[
"BSD-3-Clause"
] | 1
|
2020-04-12T15:20:34.000Z
|
2020-04-12T15:20:34.000Z
|
pyglet/libs/win32/__init__.py
|
swipswaps/pyglet
|
2bfd7ee52482b805ae076cf5036c5628e8a72224
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/libs/win32/__init__.py
|
swipswaps/pyglet
|
2bfd7ee52482b805ae076cf5036c5628e8a72224
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2019 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import struct
import pyglet
from . import constants
from .types import *
IS64 = struct.calcsize("P") == 8
_debug_win32 = pyglet.options['debug_win32']
if _debug_win32:
import traceback
_GetLastError = windll.kernel32.GetLastError
_SetLastError = windll.kernel32.SetLastError
_FormatMessageA = windll.kernel32.FormatMessageA
_log_win32 = open('debug_win32.log', 'w')
def format_error(err):
msg = create_string_buffer(256)
_FormatMessageA(constants.FORMAT_MESSAGE_FROM_SYSTEM,
c_void_p(),
err,
0,
msg,
len(msg),
c_void_p())
return msg.value
class DebugLibrary:
def __init__(self, lib):
self.lib = lib
def __getattr__(self, name):
fn = getattr(self.lib, name)
def f(*args):
_SetLastError(0)
result = fn(*args)
err = _GetLastError()
if err != 0:
for entry in traceback.format_list(traceback.extract_stack()[:-1]):
_log_win32.write(entry)
print(format_error(err), file=_log_win32)
return result
return f
else:
DebugLibrary = lambda lib: lib
_gdi32 = DebugLibrary(windll.gdi32)
_kernel32 = DebugLibrary(windll.kernel32)
_user32 = DebugLibrary(windll.user32)
_dwmapi = DebugLibrary(windll.dwmapi)
# _gdi32
_gdi32.AddFontMemResourceEx.restype = HANDLE
_gdi32.AddFontMemResourceEx.argtypes = [PVOID, DWORD, PVOID, POINTER(DWORD)]
_gdi32.ChoosePixelFormat.restype = c_int
_gdi32.ChoosePixelFormat.argtypes = [HDC, POINTER(PIXELFORMATDESCRIPTOR)]
_gdi32.CreateBitmap.restype = HBITMAP
_gdi32.CreateBitmap.argtypes = [c_int, c_int, UINT, UINT, c_void_p]
_gdi32.CreateCompatibleDC.restype = HDC
_gdi32.CreateCompatibleDC.argtypes = [HDC]
_gdi32.CreateDIBitmap.restype = HBITMAP
_gdi32.CreateDIBitmap.argtypes = [HDC, POINTER(BITMAPINFOHEADER), DWORD, c_void_p, POINTER(BITMAPINFO), UINT]
_gdi32.CreateDIBSection.restype = HBITMAP
_gdi32.CreateDIBSection.argtypes = [HDC, c_void_p, UINT, c_void_p, HANDLE, DWORD] # POINTER(BITMAPINFO)
_gdi32.CreateFontIndirectA.restype = HFONT
_gdi32.CreateFontIndirectA.argtypes = [POINTER(LOGFONT)]
_gdi32.DeleteDC.restype = BOOL
_gdi32.DeleteDC.argtypes = [HDC]
_gdi32.DeleteObject.restype = BOOL
_gdi32.DeleteObject.argtypes = [HGDIOBJ]
_gdi32.DescribePixelFormat.restype = c_int
_gdi32.DescribePixelFormat.argtypes = [HDC, c_int, UINT, POINTER(PIXELFORMATDESCRIPTOR)]
_gdi32.ExtTextOutA.restype = BOOL
_gdi32.ExtTextOutA.argtypes = [HDC, c_int, c_int, UINT, LPRECT, c_char_p, UINT, POINTER(INT)]
_gdi32.GdiFlush.restype = BOOL
_gdi32.GdiFlush.argtypes = []
_gdi32.GetCharABCWidthsW.restype = BOOL
_gdi32.GetCharABCWidthsW.argtypes = [HDC, UINT, UINT, POINTER(ABC)]
_gdi32.GetCharWidth32W.restype = BOOL
_gdi32.GetCharWidth32W.argtypes = [HDC, UINT, UINT, POINTER(INT)]
_gdi32.GetStockObject.restype = HGDIOBJ
_gdi32.GetStockObject.argtypes = [c_int]
_gdi32.GetTextMetricsA.restype = BOOL
_gdi32.GetTextMetricsA.argtypes = [HDC, POINTER(TEXTMETRIC)]
_gdi32.SelectObject.restype = HGDIOBJ
_gdi32.SelectObject.argtypes = [HDC, HGDIOBJ]
_gdi32.SetBkColor.restype = COLORREF
_gdi32.SetBkColor.argtypes = [HDC, COLORREF]
_gdi32.SetBkMode.restype = c_int
_gdi32.SetBkMode.argtypes = [HDC, c_int]
_gdi32.SetPixelFormat.restype = BOOL
_gdi32.SetPixelFormat.argtypes = [HDC, c_int, POINTER(PIXELFORMATDESCRIPTOR)]
_gdi32.SetTextColor.restype = COLORREF
_gdi32.SetTextColor.argtypes = [HDC, COLORREF]
_gdi32.SwapBuffers.restype = BOOL
_gdi32.SwapBuffers.argtypes = [HDC]
_kernel32.CloseHandle.restype = BOOL
_kernel32.CloseHandle.argtypes = [HANDLE]
_kernel32.CreateEventW.restype = HANDLE
_kernel32.CreateEventW.argtypes = [POINTER(SECURITY_ATTRIBUTES), BOOL, BOOL, c_wchar_p]
_kernel32.CreateWaitableTimerA.restype = HANDLE
_kernel32.CreateWaitableTimerA.argtypes = [POINTER(SECURITY_ATTRIBUTES), BOOL, c_char_p]
_kernel32.GetCurrentThreadId.restype = DWORD
_kernel32.GetCurrentThreadId.argtypes = []
_kernel32.GetModuleHandleW.restype = HMODULE
_kernel32.GetModuleHandleW.argtypes = [c_wchar_p]
_kernel32.GlobalAlloc.restype = HGLOBAL
_kernel32.GlobalAlloc.argtypes = [UINT, c_size_t]
_kernel32.GlobalLock.restype = LPVOID
_kernel32.GlobalLock.argtypes = [HGLOBAL]
_kernel32.GlobalUnlock.restype = BOOL
_kernel32.GlobalUnlock.argtypes = [HGLOBAL]
_kernel32.SetLastError.restype = DWORD
_kernel32.SetLastError.argtypes = []
_kernel32.SetWaitableTimer.restype = BOOL
_kernel32.SetWaitableTimer.argtypes = [HANDLE, POINTER(LARGE_INTEGER), LONG, LPVOID, LPVOID, BOOL] # TIMERAPCPROC
_kernel32.WaitForSingleObject.restype = DWORD
_kernel32.WaitForSingleObject.argtypes = [HANDLE, DWORD]
_user32.AdjustWindowRectEx.restype = BOOL
_user32.AdjustWindowRectEx.argtypes = [LPRECT, DWORD, BOOL, DWORD]
_user32.ChangeDisplaySettingsExW.restype = LONG
_user32.ChangeDisplaySettingsExW.argtypes = [c_wchar_p, POINTER(DEVMODE), HWND, DWORD, LPVOID]
_user32.ClientToScreen.restype = BOOL
_user32.ClientToScreen.argtypes = [HWND, LPPOINT]
_user32.ClipCursor.restype = BOOL
_user32.ClipCursor.argtypes = [LPRECT]
_user32.CreateIconIndirect.restype = HICON
_user32.CreateIconIndirect.argtypes = [POINTER(ICONINFO)]
_user32.CreateWindowExW.restype = HWND
_user32.CreateWindowExW.argtypes = [DWORD, c_wchar_p, c_wchar_p, DWORD, c_int, c_int, c_int, c_int, HWND, HMENU, HINSTANCE, LPVOID]
_user32.DefWindowProcW.restype = LRESULT
_user32.DefWindowProcW.argtypes = [HWND, UINT, WPARAM, LPARAM]
_user32.DestroyWindow.restype = BOOL
_user32.DestroyWindow.argtypes = [HWND]
_user32.DispatchMessageW.restype = LRESULT
_user32.DispatchMessageW.argtypes = [LPMSG]
_user32.EnumDisplayMonitors.restype = BOOL
_user32.EnumDisplayMonitors.argtypes = [HDC, LPRECT, MONITORENUMPROC, LPARAM]
_user32.EnumDisplaySettingsW.restype = BOOL
_user32.EnumDisplaySettingsW.argtypes = [c_wchar_p, DWORD, POINTER(DEVMODE)]
_user32.FillRect.restype = c_int
_user32.FillRect.argtypes = [HDC, LPRECT, HBRUSH]
_user32.GetClientRect.restype = BOOL
_user32.GetClientRect.argtypes = [HWND, LPRECT]
_user32.GetCursorPos.restype = BOOL
_user32.GetCursorPos.argtypes = [LPPOINT]
# workaround for win 64-bit, see issue #664
_user32.GetDC.restype = c_void_p # HDC
_user32.GetDC.argtypes = [c_void_p] # [HWND]
_user32.GetDesktopWindow.restype = HWND
_user32.GetDesktopWindow.argtypes = []
_user32.GetKeyState.restype = c_short
_user32.GetKeyState.argtypes = [c_int]
_user32.GetMessageW.restype = BOOL
_user32.GetMessageW.argtypes = [LPMSG, HWND, UINT, UINT]
_user32.GetMonitorInfoW.restype = BOOL
_user32.GetMonitorInfoW.argtypes = [HMONITOR, POINTER(MONITORINFOEX)]
_user32.GetQueueStatus.restype = DWORD
_user32.GetQueueStatus.argtypes = [UINT]
_user32.GetSystemMetrics.restype = c_int
_user32.GetSystemMetrics.argtypes = [c_int]
_user32.LoadCursorW.restype = HCURSOR
_user32.LoadCursorW.argtypes = [HINSTANCE, c_wchar_p]
_user32.LoadIconW.restype = HICON
_user32.LoadIconW.argtypes = [HINSTANCE, c_wchar_p]
_user32.MapVirtualKeyW.restype = UINT
_user32.MapVirtualKeyW.argtypes = [UINT, UINT]
_user32.MapWindowPoints.restype = c_int
_user32.MapWindowPoints.argtypes = [HWND, HWND, c_void_p, UINT] # HWND, HWND, LPPOINT, UINT
_user32.MsgWaitForMultipleObjects.restype = DWORD
_user32.MsgWaitForMultipleObjects.argtypes = [DWORD, POINTER(HANDLE), BOOL, DWORD, DWORD]
_user32.PeekMessageW.restype = BOOL
_user32.PeekMessageW.argtypes = [LPMSG, HWND, UINT, UINT, UINT]
_user32.PostThreadMessageW.restype = BOOL
_user32.PostThreadMessageW.argtypes = [DWORD, UINT, WPARAM, LPARAM]
_user32.RegisterClassW.restype = ATOM
_user32.RegisterClassW.argtypes = [POINTER(WNDCLASS)]
_user32.RegisterHotKey.restype = BOOL
_user32.RegisterHotKey.argtypes = [HWND, c_int, UINT, UINT]
_user32.ReleaseCapture.restype = BOOL
_user32.ReleaseCapture.argtypes = []
# workaround for win 64-bit, see issue #664
_user32.ReleaseDC.restype = c_int32 # c_int
_user32.ReleaseDC.argtypes = [c_void_p, c_void_p] # [HWND, HDC]
_user32.ScreenToClient.restype = BOOL
_user32.ScreenToClient.argtypes = [HWND, LPPOINT]
_user32.SetCapture.restype = HWND
_user32.SetCapture.argtypes = [HWND]
_user32.SetClassLongW.restype = DWORD
_user32.SetClassLongW.argtypes = [HWND, c_int, LONG]
if IS64:
_user32.SetClassLongPtrW.restype = ULONG
_user32.SetClassLongPtrW.argtypes = [HWND, c_int, LONG_PTR]
else:
_user32.SetClassLongPtrW = _user32.SetClassLongW
_user32.SetCursor.restype = HCURSOR
_user32.SetCursor.argtypes = [HCURSOR]
_user32.SetCursorPos.restype = BOOL
_user32.SetCursorPos.argtypes = [c_int, c_int]
_user32.SetFocus.restype = HWND
_user32.SetFocus.argtypes = [HWND]
_user32.SetForegroundWindow.restype = BOOL
_user32.SetForegroundWindow.argtypes = [HWND]
_user32.SetTimer.restype = UINT_PTR
_user32.SetTimer.argtypes = [HWND, UINT_PTR, UINT, TIMERPROC]
_user32.SetWindowLongW.restype = LONG
_user32.SetWindowLongW.argtypes = [HWND, c_int, LONG]
_user32.SetWindowPos.restype = BOOL
_user32.SetWindowPos.argtypes = [HWND, HWND, c_int, c_int, c_int, c_int, UINT]
_user32.SetWindowTextW.restype = BOOL
_user32.SetWindowTextW.argtypes = [HWND, c_wchar_p]
_user32.ShowCursor.restype = c_int
_user32.ShowCursor.argtypes = [BOOL]
_user32.ShowWindow.restype = BOOL
_user32.ShowWindow.argtypes = [HWND, c_int]
_user32.TrackMouseEvent.restype = BOOL
_user32.TrackMouseEvent.argtypes = [POINTER(TRACKMOUSEEVENT)]
_user32.TranslateMessage.restype = BOOL
_user32.TranslateMessage.argtypes = [LPMSG]
_user32.UnregisterClassW.restype = BOOL
_user32.UnregisterClassW.argtypes = [c_wchar_p, HINSTANCE]
_user32.UnregisterHotKey.restype = BOOL
_user32.UnregisterHotKey.argtypes = [HWND, c_int]
# Raw inputs
_user32.RegisterRawInputDevices.restype = BOOL
_user32.RegisterRawInputDevices.argtypes = [PCRAWINPUTDEVICE, UINT, UINT]
_user32.GetRawInputData.restype = UINT
_user32.GetRawInputData.argtypes = [HRAWINPUT, UINT, LPVOID, PUINT, UINT]
#dwmapi
_dwmapi.DwmIsCompositionEnabled.restype = c_int
_dwmapi.DwmIsCompositionEnabled.argtypes = [POINTER(INT)]
_dwmapi.DwmFlush.restype = c_int
_dwmapi.DwmFlush.argtypes = []
| 43.300725
| 131
| 0.764036
|
0fa1c30b900432ed241a8d73daff39f31764f17d
| 1,220
|
py
|
Python
|
dedupe/tfidf.py
|
dwillis/dedupe
|
d85833a5f5c2de5671df51e828ba1609de7b8f05
|
[
"MIT"
] | 1
|
2021-01-13T12:10:49.000Z
|
2021-01-13T12:10:49.000Z
|
dedupe/tfidf.py
|
dwillis/dedupe
|
d85833a5f5c2de5671df51e828ba1609de7b8f05
|
[
"MIT"
] | null | null | null |
dedupe/tfidf.py
|
dwillis/dedupe
|
d85833a5f5c2de5671df51e828ba1609de7b8f05
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import collections
import math
import re
import core
class TfidfPredicate(float):
def __new__(self, threshold):
return float.__new__(self, threshold)
def __init__(self, threshold):
self.__name__ = 'TF-IDF:' + str(threshold)
def documentFrequency(corpus):
num_docs = 0
term_num_docs = collections.defaultdict(int)
num_docs = len(corpus)
stop_word_threshold = num_docs * 1
stop_words = []
for (doc_id, doc) in corpus.iteritems():
tokens = getTokens(doc)
for token in set(tokens):
term_num_docs[token] += 1
for (term, count) in term_num_docs.iteritems():
if count < stop_word_threshold:
term_num_docs[term] = math.log((num_docs + 0.5) / (float(count) + 0.5))
else:
term_num_docs[term] = 0
stop_words.append(term)
if stop_words:
print 'stop words:', stop_words
# term : num_docs_containing_term
term_num_docs_default = collections.defaultdict(lambda : math.log((num_docs + 0.5) / 0.5))
term_num_docs_default.update(term_num_docs)
return term_num_docs_default
def getTokens(str):
return str.lower().split()
| 25.416667
| 94
| 0.65082
|
bc063e66b880cc772a374f21694e2efda372c8eb
| 5,641
|
py
|
Python
|
sources_non_forked/ultisnips/pythonx/UltiSnips/text_objects/transformation.py
|
khatchad/vimrc
|
e4fb69d3b7a8635f0881461853c9144763fae4c7
|
[
"MIT"
] | 1
|
2017-04-24T04:07:48.000Z
|
2017-04-24T04:07:48.000Z
|
sources_non_forked/ultisnips/pythonx/UltiSnips/text_objects/transformation.py
|
RobotMa/vimrc
|
5beda397d3c6f88b8542d843107a64c42bf13c93
|
[
"MIT"
] | null | null | null |
sources_non_forked/ultisnips/pythonx/UltiSnips/text_objects/transformation.py
|
RobotMa/vimrc
|
5beda397d3c6f88b8542d843107a64c42bf13c93
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
"""Implements TabStop transformations."""
import re
import sys
from UltiSnips.text import unescape, fill_in_whitespace
from UltiSnips.text_objects.mirror import Mirror
def _find_closing_brace(string, start_pos):
"""Finds the corresponding closing brace after start_pos."""
bracks_open = 1
escaped = False
for idx, char in enumerate(string[start_pos:]):
if char == "(":
if not escaped:
bracks_open += 1
elif char == ")":
if not escaped:
bracks_open -= 1
if not bracks_open:
return start_pos + idx + 1
if char == "\\":
escaped = not escaped
else:
escaped = False
def _split_conditional(string):
"""Split the given conditional 'string' into its arguments."""
bracks_open = 0
args = []
carg = ""
escaped = False
for idx, char in enumerate(string):
if char == "(":
if not escaped:
bracks_open += 1
elif char == ")":
if not escaped:
bracks_open -= 1
elif char == ":" and not bracks_open and not escaped:
args.append(carg)
carg = ""
escaped = False
continue
carg += char
if char == "\\":
escaped = not escaped
else:
escaped = False
args.append(carg)
return args
def _replace_conditional(match, string):
"""Replaces a conditional match in a transformation."""
conditional_match = _CONDITIONAL.search(string)
while conditional_match:
start = conditional_match.start()
end = _find_closing_brace(string, start + 4)
args = _split_conditional(string[start + 4 : end - 1])
rv = ""
if match.group(int(conditional_match.group(1))):
rv = unescape(_replace_conditional(match, args[0]))
elif len(args) > 1:
rv = unescape(_replace_conditional(match, args[1]))
string = string[:start] + rv + string[end:]
conditional_match = _CONDITIONAL.search(string)
return string
_ONE_CHAR_CASE_SWITCH = re.compile(r"\\([ul].)", re.DOTALL)
_LONG_CASEFOLDINGS = re.compile(r"\\([UL].*?)\\E", re.DOTALL)
_DOLLAR = re.compile(r"\$(\d+)", re.DOTALL)
_CONDITIONAL = re.compile(r"\(\?(\d+):", re.DOTALL)
class _CleverReplace:
"""Mimics TextMates replace syntax."""
def __init__(self, expression):
self._expression = expression
def replace(self, match):
"""Replaces 'match' through the correct replacement string."""
transformed = self._expression
# Replace all $? with capture groups
transformed = _DOLLAR.subn(lambda m: match.group(int(m.group(1))), transformed)[
0
]
# Replace Case switches
def _one_char_case_change(match):
"""Replaces one character case changes."""
if match.group(1)[0] == "u":
return match.group(1)[-1].upper()
else:
return match.group(1)[-1].lower()
transformed = _ONE_CHAR_CASE_SWITCH.subn(_one_char_case_change, transformed)[0]
def _multi_char_case_change(match):
"""Replaces multi character case changes."""
if match.group(1)[0] == "U":
return match.group(1)[1:].upper()
else:
return match.group(1)[1:].lower()
transformed = _LONG_CASEFOLDINGS.subn(_multi_char_case_change, transformed)[0]
transformed = _replace_conditional(match, transformed)
return unescape(fill_in_whitespace(transformed))
# flag used to display only one time the lack of unidecode
UNIDECODE_ALERT_RAISED = False
class TextObjectTransformation:
"""Base class for Transformations and ${VISUAL}."""
def __init__(self, token):
self._convert_to_ascii = False
self._find = None
if token.search is None:
return
flags = 0
self._match_this_many = 1
if token.options:
if "g" in token.options:
self._match_this_many = 0
if "i" in token.options:
flags |= re.IGNORECASE
if "m" in token.options:
flags |= re.MULTILINE
if "a" in token.options:
self._convert_to_ascii = True
self._find = re.compile(token.search, flags | re.DOTALL)
self._replace = _CleverReplace(token.replace)
def _transform(self, text):
"""Do the actual transform on the given text."""
global UNIDECODE_ALERT_RAISED # pylint:disable=global-statement
if self._convert_to_ascii:
try:
import unidecode
text = unidecode.unidecode(text)
except Exception: # pylint:disable=broad-except
if UNIDECODE_ALERT_RAISED == False:
UNIDECODE_ALERT_RAISED = True
sys.stderr.write(
"Please install unidecode python package in order to "
"be able to make ascii conversions.\n"
)
if self._find is None:
return text
return self._find.subn(self._replace.replace, text, self._match_this_many)[0]
class Transformation(Mirror, TextObjectTransformation):
"""See module docstring."""
def __init__(self, parent, ts, token):
Mirror.__init__(self, parent, ts, token)
TextObjectTransformation.__init__(self, token)
def _get_text(self):
return self._transform(self._ts.current_text)
| 31.513966
| 88
| 0.587839
|
6d215eed1e981386d76062fcdf77f1f55f00234b
| 15,764
|
py
|
Python
|
certbot/tests/account_test.py
|
silverbacknet/certbot
|
270b5535e24fd3dab4c05fa8929adca8117942f1
|
[
"Apache-2.0"
] | null | null | null |
certbot/tests/account_test.py
|
silverbacknet/certbot
|
270b5535e24fd3dab4c05fa8929adca8117942f1
|
[
"Apache-2.0"
] | 2
|
2019-11-20T07:08:26.000Z
|
2020-11-05T23:31:48.000Z
|
certbot/tests/account_test.py
|
silverbacknet/certbot
|
270b5535e24fd3dab4c05fa8929adca8117942f1
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for certbot._internal.account."""
import datetime
import json
import unittest
import josepy as jose
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock
import pytz
from acme import messages
from certbot import errors
from certbot.compat import filesystem
from certbot.compat import misc
from certbot.compat import os
import certbot.tests.util as test_util
KEY = jose.JWKRSA.load(test_util.load_vector("rsa512_key.pem"))
class AccountTest(unittest.TestCase):
"""Tests for certbot._internal.account.Account."""
def setUp(self):
from certbot._internal.account import Account
self.regr = mock.MagicMock()
self.meta = Account.Meta(
creation_host="test.certbot.org",
creation_dt=datetime.datetime(
2015, 7, 4, 14, 4, 10, tzinfo=pytz.UTC))
self.acc = Account(self.regr, KEY, self.meta)
self.regr.__repr__ = mock.MagicMock(return_value="i_am_a_regr")
with mock.patch("certbot._internal.account.socket") as mock_socket:
mock_socket.getfqdn.return_value = "test.certbot.org"
with mock.patch("certbot._internal.account.datetime") as mock_dt:
mock_dt.datetime.now.return_value = self.meta.creation_dt
self.acc_no_meta = Account(self.regr, KEY)
def test_init(self):
self.assertEqual(self.regr, self.acc.regr)
self.assertEqual(KEY, self.acc.key)
self.assertEqual(self.meta, self.acc_no_meta.meta)
def test_id(self):
self.assertEqual(
self.acc.id, "7adac10320f585ddf118429c0c4af2cd")
def test_slug(self):
self.assertEqual(
self.acc.slug, "test.certbot.org@2015-07-04T14:04:10Z (7ada)")
def test_repr(self):
self.assertTrue(repr(self.acc).startswith(
"<Account(i_am_a_regr, 7adac10320f585ddf118429c0c4af2cd, Meta("))
class MetaTest(unittest.TestCase):
"""Tests for certbot._internal.account.Meta."""
def test_deserialize_partial(self):
from certbot._internal.account import Account
meta = Account.Meta.json_loads(
'{'
' "creation_dt": "2020-06-13T07:46:45Z",'
' "creation_host": "hyperion.localdomain"'
'}')
self.assertIsNotNone(meta.creation_dt)
self.assertIsNotNone(meta.creation_host)
self.assertIsNone(meta.register_to_eff)
def test_deserialize_full(self):
from certbot._internal.account import Account
meta = Account.Meta.json_loads(
'{'
' "creation_dt": "2020-06-13T07:46:45Z",'
' "creation_host": "hyperion.localdomain",'
' "register_to_eff": "bar"'
'}')
self.assertIsNotNone(meta.creation_dt)
self.assertIsNotNone(meta.creation_host)
self.assertIsNotNone(meta.register_to_eff)
class ReportNewAccountTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.account.report_new_account."""
def _call(self):
from certbot._internal.account import report_new_account
report_new_account(self.config)
@mock.patch("certbot._internal.account.zope.component.queryUtility")
def test_no_reporter(self, mock_zope):
mock_zope.return_value = None
self._call()
@mock.patch("certbot._internal.account.zope.component.queryUtility")
def test_it(self, mock_zope):
self._call()
call_list = mock_zope().add_message.call_args_list
self.assertTrue(self.config.config_dir in call_list[0][0][0])
class AccountMemoryStorageTest(unittest.TestCase):
"""Tests for certbot._internal.account.AccountMemoryStorage."""
def setUp(self):
from certbot._internal.account import AccountMemoryStorage
self.storage = AccountMemoryStorage()
def test_it(self):
account = mock.Mock(id="x")
self.assertEqual([], self.storage.find_all())
self.assertRaises(errors.AccountNotFound, self.storage.load, "x")
self.storage.save(account, None)
self.assertEqual([account], self.storage.find_all())
self.assertEqual(account, self.storage.load("x"))
self.storage.save(account, None)
self.assertEqual([account], self.storage.find_all())
class AccountFileStorageTest(test_util.ConfigTestCase):
"""Tests for certbot._internal.account.AccountFileStorage."""
def setUp(self):
super(AccountFileStorageTest, self).setUp()
from certbot._internal.account import AccountFileStorage
self.storage = AccountFileStorage(self.config)
from certbot._internal.account import Account
new_authzr_uri = "hi"
self.acc = Account(
regr=messages.RegistrationResource(
uri=None, body=messages.Registration(),
new_authzr_uri=new_authzr_uri),
key=KEY)
self.mock_client = mock.MagicMock()
self.mock_client.directory.new_authz = new_authzr_uri
def test_init_creates_dir(self):
self.assertTrue(os.path.isdir(
misc.underscores_for_unsupported_characters_in_path(self.config.accounts_dir)))
def test_save_and_restore(self):
self.storage.save(self.acc, self.mock_client)
account_path = os.path.join(self.config.accounts_dir, self.acc.id)
self.assertTrue(os.path.exists(account_path))
for file_name in "regr.json", "meta.json", "private_key.json":
self.assertTrue(os.path.exists(
os.path.join(account_path, file_name)))
self.assertTrue(
filesystem.check_mode(os.path.join(account_path, "private_key.json"), 0o400))
# restore
loaded = self.storage.load(self.acc.id)
self.assertEqual(self.acc, loaded)
def test_save_and_restore_old_version(self):
"""Saved regr should include a new_authzr_uri for older Certbots"""
self.storage.save(self.acc, self.mock_client)
path = os.path.join(self.config.accounts_dir, self.acc.id, "regr.json")
with open(path, "r") as f:
regr = json.load(f)
self.assertTrue("new_authzr_uri" in regr)
def test_update_regr(self):
self.storage.update_regr(self.acc, self.mock_client)
account_path = os.path.join(self.config.accounts_dir, self.acc.id)
self.assertTrue(os.path.exists(account_path))
self.assertTrue(os.path.exists(os.path.join(account_path, "regr.json")))
self.assertFalse(os.path.exists(os.path.join(account_path, "meta.json")))
self.assertFalse(os.path.exists(os.path.join(account_path, "private_key.json")))
def test_update_meta(self):
self.storage.update_meta(self.acc)
account_path = os.path.join(self.config.accounts_dir, self.acc.id)
self.assertTrue(os.path.exists(account_path))
self.assertTrue(os.path.exists(os.path.join(account_path, "meta.json")))
self.assertFalse(os.path.exists(os.path.join(account_path, "regr.json")))
self.assertFalse(os.path.exists(os.path.join(account_path, "private_key.json")))
def test_find_all(self):
self.storage.save(self.acc, self.mock_client)
self.assertEqual([self.acc], self.storage.find_all())
def test_find_all_none_empty_list(self):
self.assertEqual([], self.storage.find_all())
def test_find_all_accounts_dir_absent(self):
os.rmdir(self.config.accounts_dir)
self.assertEqual([], self.storage.find_all())
def test_find_all_load_skips(self):
# pylint: disable=protected-access
self.storage._load_for_server_path = mock.MagicMock(
side_effect=["x", errors.AccountStorageError, "z"])
with mock.patch("certbot._internal.account.os.listdir") as mock_listdir:
mock_listdir.return_value = ["x", "y", "z"]
self.assertEqual(["x", "z"], self.storage.find_all())
def test_load_non_existent_raises_error(self):
self.assertRaises(errors.AccountNotFound, self.storage.load, "missing")
def _set_server(self, server):
self.config.server = server
from certbot._internal.account import AccountFileStorage
self.storage = AccountFileStorage(self.config)
def test_find_all_neither_exists(self):
self._set_server('https://acme-staging-v02.api.letsencrypt.org/directory')
self.assertEqual([], self.storage.find_all())
self.assertEqual([], self.storage.find_all())
self.assertFalse(os.path.islink(self.config.accounts_dir))
def test_find_all_find_before_save(self):
self._set_server('https://acme-staging-v02.api.letsencrypt.org/directory')
self.assertEqual([], self.storage.find_all())
self.storage.save(self.acc, self.mock_client)
self.assertEqual([self.acc], self.storage.find_all())
self.assertEqual([self.acc], self.storage.find_all())
self.assertFalse(os.path.islink(self.config.accounts_dir))
# we shouldn't have created a v1 account
prev_server_path = 'https://acme-staging.api.letsencrypt.org/directory'
self.assertFalse(os.path.isdir(self.config.accounts_dir_for_server_path(prev_server_path)))
def test_find_all_save_before_find(self):
self._set_server('https://acme-staging-v02.api.letsencrypt.org/directory')
self.storage.save(self.acc, self.mock_client)
self.assertEqual([self.acc], self.storage.find_all())
self.assertEqual([self.acc], self.storage.find_all())
self.assertFalse(os.path.islink(self.config.accounts_dir))
self.assertTrue(os.path.isdir(self.config.accounts_dir))
prev_server_path = 'https://acme-staging.api.letsencrypt.org/directory'
self.assertFalse(os.path.isdir(self.config.accounts_dir_for_server_path(prev_server_path)))
def test_find_all_server_downgrade(self):
# don't use v2 accounts with a v1 url
self._set_server('https://acme-staging-v02.api.letsencrypt.org/directory')
self.assertEqual([], self.storage.find_all())
self.storage.save(self.acc, self.mock_client)
self.assertEqual([self.acc], self.storage.find_all())
self._set_server('https://acme-staging.api.letsencrypt.org/directory')
self.assertEqual([], self.storage.find_all())
def test_upgrade_version_staging(self):
self._set_server('https://acme-staging.api.letsencrypt.org/directory')
self.storage.save(self.acc, self.mock_client)
self._set_server('https://acme-staging-v02.api.letsencrypt.org/directory')
self.assertEqual([self.acc], self.storage.find_all())
def test_upgrade_version_production(self):
self._set_server('https://acme-v01.api.letsencrypt.org/directory')
self.storage.save(self.acc, self.mock_client)
self._set_server('https://acme-v02.api.letsencrypt.org/directory')
self.assertEqual([self.acc], self.storage.find_all())
@mock.patch('certbot.compat.os.rmdir')
def test_corrupted_account(self, mock_rmdir):
# pylint: disable=protected-access
self._set_server('https://acme-staging.api.letsencrypt.org/directory')
self.storage.save(self.acc, self.mock_client)
mock_rmdir.side_effect = OSError
self.storage._load_for_server_path = mock.MagicMock(
side_effect=errors.AccountStorageError)
self._set_server('https://acme-staging-v02.api.letsencrypt.org/directory')
self.assertEqual([], self.storage.find_all())
def test_upgrade_load(self):
self._set_server('https://acme-staging.api.letsencrypt.org/directory')
self.storage.save(self.acc, self.mock_client)
prev_account = self.storage.load(self.acc.id)
self._set_server('https://acme-staging-v02.api.letsencrypt.org/directory')
account = self.storage.load(self.acc.id)
self.assertEqual(prev_account, account)
def test_upgrade_load_single_account(self):
self._set_server('https://acme-staging.api.letsencrypt.org/directory')
self.storage.save(self.acc, self.mock_client)
prev_account = self.storage.load(self.acc.id)
self._set_server_and_stop_symlink('https://acme-staging-v02.api.letsencrypt.org/directory')
account = self.storage.load(self.acc.id)
self.assertEqual(prev_account, account)
def test_load_ioerror(self):
self.storage.save(self.acc, self.mock_client)
mock_open = mock.mock_open()
mock_open.side_effect = IOError
with mock.patch("six.moves.builtins.open", mock_open):
self.assertRaises(
errors.AccountStorageError, self.storage.load, self.acc.id)
def test_save_ioerrors(self):
mock_open = mock.mock_open()
mock_open.side_effect = IOError # TODO: [None, None, IOError]
with mock.patch("six.moves.builtins.open", mock_open):
self.assertRaises(
errors.AccountStorageError, self.storage.save,
self.acc, self.mock_client)
def test_delete(self):
self.storage.save(self.acc, self.mock_client)
self.storage.delete(self.acc.id)
self.assertRaises(errors.AccountNotFound, self.storage.load, self.acc.id)
def test_delete_no_account(self):
self.assertRaises(errors.AccountNotFound, self.storage.delete, self.acc.id)
def _assert_symlinked_account_removed(self):
# create v1 account
self._set_server('https://acme-staging.api.letsencrypt.org/directory')
self.storage.save(self.acc, self.mock_client)
# ensure v2 isn't already linked to it
with mock.patch('certbot._internal.constants.LE_REUSE_SERVERS', {}):
self._set_server('https://acme-staging-v02.api.letsencrypt.org/directory')
self.assertRaises(errors.AccountNotFound, self.storage.load, self.acc.id)
def _test_delete_folders(self, server_url):
# create symlinked servers
self._set_server('https://acme-staging.api.letsencrypt.org/directory')
self.storage.save(self.acc, self.mock_client)
self._set_server('https://acme-staging-v02.api.letsencrypt.org/directory')
self.storage.load(self.acc.id)
# delete starting at given server_url
self._set_server(server_url)
self.storage.delete(self.acc.id)
# make sure we're gone from both urls
self._set_server('https://acme-staging.api.letsencrypt.org/directory')
self.assertRaises(errors.AccountNotFound, self.storage.load, self.acc.id)
self._set_server('https://acme-staging-v02.api.letsencrypt.org/directory')
self.assertRaises(errors.AccountNotFound, self.storage.load, self.acc.id)
def test_delete_folders_up(self):
self._test_delete_folders('https://acme-staging.api.letsencrypt.org/directory')
self._assert_symlinked_account_removed()
def test_delete_folders_down(self):
self._test_delete_folders('https://acme-staging-v02.api.letsencrypt.org/directory')
self._assert_symlinked_account_removed()
def _set_server_and_stop_symlink(self, server_path):
self._set_server(server_path)
with open(os.path.join(self.config.accounts_dir, 'foo'), 'w') as f:
f.write('bar')
def test_delete_shared_account_up(self):
self._set_server_and_stop_symlink('https://acme-staging-v02.api.letsencrypt.org/directory')
self._test_delete_folders('https://acme-staging.api.letsencrypt.org/directory')
def test_delete_shared_account_down(self):
self._set_server_and_stop_symlink('https://acme-staging-v02.api.letsencrypt.org/directory')
self._test_delete_folders('https://acme-staging-v02.api.letsencrypt.org/directory')
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 43.546961
| 99
| 0.688594
|
807d657fc565e899b959bfaa2484b8d73f93e971
| 321
|
py
|
Python
|
gym/envs/toy_text/__init__.py
|
richardmeng/gym
|
a6c06fc1e3e8842c3e3fffd9e701dd1beeba5541
|
[
"MIT"
] | 49
|
2017-12-11T11:00:02.000Z
|
2022-03-30T05:19:31.000Z
|
gym_customize/python2/gym/envs/toy_text/__init__.py
|
KuangenZhang/hybrid-zero-RL-rabbit
|
1b67d4082e279205e676972932345c3e83227538
|
[
"MIT"
] | 2
|
2018-01-01T17:39:56.000Z
|
2019-07-24T04:49:08.000Z
|
gym_customize/python2/gym/envs/toy_text/__init__.py
|
KuangenZhang/hybrid-zero-RL-rabbit
|
1b67d4082e279205e676972932345c3e83227538
|
[
"MIT"
] | 12
|
2017-12-13T11:52:17.000Z
|
2020-12-03T00:53:29.000Z
|
from gym.envs.toy_text.blackjack import BlackjackEnv
from gym.envs.toy_text.roulette import RouletteEnv
from gym.envs.toy_text.frozen_lake import FrozenLakeEnv
from gym.envs.toy_text.nchain import NChainEnv
from gym.envs.toy_text.hotter_colder import HotterColder
from gym.envs.toy_text.guessing_game import GuessingGame
| 45.857143
| 56
| 0.869159
|
3a3c0740f9006944e85dc707bb6f4aff63f4b0ab
| 1,873
|
py
|
Python
|
bedevere/__main__.py
|
sabderemane/bedevere
|
1f7ab8324e3f75071af10fd086bccfe44067d869
|
[
"Apache-2.0"
] | null | null | null |
bedevere/__main__.py
|
sabderemane/bedevere
|
1f7ab8324e3f75071af10fd086bccfe44067d869
|
[
"Apache-2.0"
] | null | null | null |
bedevere/__main__.py
|
sabderemane/bedevere
|
1f7ab8324e3f75071af10fd086bccfe44067d869
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import importlib
import os
import sys
import traceback
import aiohttp
from aiohttp import web
import cachetools
from gidgethub import aiohttp as gh_aiohttp
from gidgethub import routing
from gidgethub import sansio
from . import backport, gh_issue, close_pr, filepaths, news, stage
import sentry_sdk
router = routing.Router(backport.router, gh_issue.router, close_pr.router,
filepaths.router, news.router,
stage.router)
cache = cachetools.LRUCache(maxsize=500)
sentry_sdk.init(os.environ.get("SENTRY_DSN"))
async def main(request):
try:
body = await request.read()
secret = os.environ.get("GH_SECRET")
event = sansio.Event.from_http(request.headers, body, secret=secret)
print('GH delivery ID', event.delivery_id, file=sys.stderr)
if event.event == "ping":
return web.Response(status=200)
oauth_token = os.environ.get("GH_AUTH")
async with aiohttp.ClientSession() as session:
gh = gh_aiohttp.GitHubAPI(session, "python/bedevere",
oauth_token=oauth_token,
cache=cache)
# Give GitHub some time to reach internal consistency.
await asyncio.sleep(1)
await router.dispatch(event, gh, session=session)
try:
print('GH requests remaining:', gh.rate_limit.remaining)
except AttributeError:
pass
return web.Response(status=200)
except Exception as exc:
traceback.print_exc(file=sys.stderr)
return web.Response(status=500)
if __name__ == "__main__": # pragma: no cover
app = web.Application()
app.router.add_post("/", main)
port = os.environ.get("PORT")
if port is not None:
port = int(port)
web.run_app(app, port=port)
| 32.293103
| 76
| 0.640149
|
1c9c0f212c16b26823b1296c2ef213a7d4f4abb4
| 3,207
|
py
|
Python
|
Iris_recog/locate.py
|
Theocrat/Iris
|
5aaba5dc915f53d148106c0c6bca57e09c548d9c
|
[
"MIT"
] | null | null | null |
Iris_recog/locate.py
|
Theocrat/Iris
|
5aaba5dc915f53d148106c0c6bca57e09c548d9c
|
[
"MIT"
] | null | null | null |
Iris_recog/locate.py
|
Theocrat/Iris
|
5aaba5dc915f53d148106c0c6bca57e09c548d9c
|
[
"MIT"
] | null | null | null |
from pupil import *
from iris import *
from numpy import zeros
from skimage import draw
from imworks import *
def locate(fname):
pupil_img = pupil_detect(fname)
rows = pupil_img.shape[0]
cols = pupil_img.shape[1]
for col in range(cols):
col = cols - 1 - col
if sum(pupil_img[:,col]) > 0:
east_mark = col
break
for col in range(east_mark):
col = east_mark - 1 - col
if sum(pupil_img[:,col]) == 0:
west_mark = col
break
for row in range(rows):
row = rows - 1 - row
if sum(pupil_img[row,:]) > 0:
south_mark = row
break
for row in range(south_mark):
row = south_mark - 1 - row
if sum(pupil_img[row,:]) == 0:
north_mark = row
break
center_x = (west_mark + east_mark) / 2
center_y = (north_mark + south_mark) / 2
lines = zeros([rows,cols])
rr, cc = draw.line(south_mark,east_mark,north_mark,east_mark)
lines[rr,cc] = 1
rr, cc = draw.line(south_mark,west_mark,north_mark,west_mark)
lines[rr,cc] = 1
rr, cc = draw.line(south_mark,west_mark,south_mark,east_mark)
lines[rr,cc] = 1
rr, cc = draw.line(north_mark,west_mark,north_mark,east_mark)
lines[rr,cc] = 1
rr, cc = draw.circle(center_y,center_x,3)
lines[rr,cc] = 1
#Locating Iris bounding box
iris_img = iris_detect(fname)
x = east_mark
while(iris_img[center_y,x]) == 1: x += 1
iris_east = x
x = west_mark
while(iris_img[center_y,x]) == 1: x -= 1
iris_west = x
rr, cc = draw.line(0,iris_east,rows-1,iris_east)
lines[rr,cc] = 1
rr, cc = draw.line(0,iris_west,rows-1,iris_west)
lines[rr,cc] = 1
# Displaying bounding boxes with lines
full_color = zeros([rows,cols,3])
for i in range(rows):
for j in range(cols):
full_color[i,j,0] = pupil_img[i,j]
full_color[i,j,1] = lines[i,j]
for i in range(rows):
for j in range(cols):
full_color[i,j,2] = iris_img[i,j]
#print('Eastern distance: ' + str(iris_east - center_x))
#print('Western distance: ' + str(center_x - iris_west))
#disp(full_color)
# Generating mask:
radius = max([(iris_east - center_x),(center_x - iris_west)])
mask = zeros([rows,cols])
rr, cc = draw.circle(center_y, center_x,radius)
for i in range(len(rr)):
if rr[i] < 0: rr[i] = 0
if rr[i] >= rows: rr[i] = rows - 1
for i in range(len(cc)):
if cc[i] < 0: cc[i] = 0
if cc[i] >= cols: cc[i] = cols - 1
mask[rr,cc] = 1
rr, cc = draw.circle(center_y, center_x,(0.5*(east_mark-west_mark)))
for i in range(len(rr)):
if rr[i] < 0: rr[i] = 0
if rr[i] >= rows: rr[i] = rows - 1
for i in range(len(cc)):
if cc[i] < 0: cc[i] = 0
if cc[i] >= cols: cc[i] = cols - 1
mask[rr,cc] = 0
img = bnw(fname)
pad = 6
masked_eye = zeros([img.shape[0]-2*pad,img.shape[1]-2*pad])
for i in range(rows):
for j in range(cols):
masked_eye[i,j] = min([mask[i,j],img[pad+i,pad+j]])
check_mask = zeros([rows,cols,3])
for i in range(rows):
for j in range(cols):
check_mask[i,j,0] = img[i,j] * 0.8
check_mask[i,j,1] = img[i,j] * (0.8 + 0.2*mask[i-2*pad,j-2*pad])
check_mask[i,j,2] = img[i,j] * (0.8 + 0.2*mask[i-2*pad,j-2*pad])
inner_radius = 0.5 * (east_mark - west_mark)
outer_radius = 0.5 * (iris_east - iris_west)
center_r, center_c = center_y,center_x
return [inner_radius, outer_radius, (center_r, center_c)]
| 25.862903
| 69
| 0.641721
|
cea47fb5b07dbc601f680ea22f3791d575d54ca8
| 873
|
py
|
Python
|
gen_captcha.py
|
thbspan/tensorflow_captcha
|
d7ef2b64df95b040fb6e7479e5ec137c89083030
|
[
"Apache-2.0"
] | null | null | null |
gen_captcha.py
|
thbspan/tensorflow_captcha
|
d7ef2b64df95b040fb6e7479e5ec137c89083030
|
[
"Apache-2.0"
] | 7
|
2020-09-26T02:29:17.000Z
|
2022-03-12T00:40:35.000Z
|
gen_captcha.py
|
thbspan/tensorflow_captcha
|
d7ef2b64df95b040fb6e7479e5ec137c89083030
|
[
"Apache-2.0"
] | null | null | null |
import random
from captcha.image import ImageCaptcha
import config
def random_captcha_text(char_set=None, captcha_size=4):
if char_set is None:
char_set = config.VALIDATE_CHAR
captcha_text = []
for i in range(captcha_size):
c = random.choice(char_set)
captcha_text.append(c)
return captcha_text
# 生成字符对应的验证码
def gen_captcha_text_and_image():
image = ImageCaptcha(width=config.IMAGE_WIDTH, height=config.IMAGE_HEIGHT, font_sizes=[config.FONT_SIZE])
# 获得随机生成的验证码
captcha_text = random_captcha_text()
# 把验证码列表转为字符串
captcha_text = ''.join(captcha_text)
# 生成验证码
image.generate(captcha_text)
# 写到文件
image.write(captcha_text, config.IMAGE_DIR + captcha_text + '.jpg')
if __name__ == '__main__':
num = 10000
for i in range(num):
gen_captcha_text_and_image()
print("gen done!")
| 24.25
| 109
| 0.701031
|
c7d723ccf13e4ca0045d70dd69540d20bb1c9c49
| 572
|
py
|
Python
|
blender/arm/logicnode/material/LN_set_object_material.py
|
Lykdraft/armory
|
da1cf33930ce9a8b1865d35c128fe4842bef2933
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/material/LN_set_object_material.py
|
Lykdraft/armory
|
da1cf33930ce9a8b1865d35c128fe4842bef2933
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/material/LN_set_object_material.py
|
Lykdraft/armory
|
da1cf33930ce9a8b1865d35c128fe4842bef2933
|
[
"Zlib"
] | null | null | null |
from arm.logicnode.arm_nodes import *
class SetMaterialNode(ArmLogicTreeNode):
"""Use to set the material of an object."""
bl_idname = 'LNSetMaterialNode'
bl_label = 'Set Object Material'
arm_version = 1
def init(self, context):
super(SetMaterialNode, self).init(context)
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('ArmNodeSocketObject', 'Object')
self.add_input('NodeSocketShader', 'Material')
self.add_output('ArmNodeSocketAction', 'Out')
add_node(SetMaterialNode, category=PKG_AS_CATEGORY)
| 33.647059
| 55
| 0.704545
|
f6d10861138cbaa1e6428a14c38a1ba78d8350be
| 1,813
|
py
|
Python
|
alexa-aws-whatsnew/code/awsfeedparserlambda.py
|
karthiksambandam/my-sam-apps
|
ad22c423231e2099d5c38793827f6173bb9f0acb
|
[
"Apache-2.0"
] | null | null | null |
alexa-aws-whatsnew/code/awsfeedparserlambda.py
|
karthiksambandam/my-sam-apps
|
ad22c423231e2099d5c38793827f6173bb9f0acb
|
[
"Apache-2.0"
] | null | null | null |
alexa-aws-whatsnew/code/awsfeedparserlambda.py
|
karthiksambandam/my-sam-apps
|
ad22c423231e2099d5c38793827f6173bb9f0acb
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import json
from botocore.vendored import requests
import feedparser
import boto3
from datetime import datetime
from datetime import timedelta
import time
import re
import os
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
cleantext = cleantext.replace(' ','')
return cleantext
def lambda_handler(event, context):
aws_rss_url = "https://aws.amazon.com/new/feed/"
aws_feed_tbl = os.environ['awslaunchdetails_tbl']
ttl_days = int(os.environ['retention_value'])
feed = feedparser.parse( aws_rss_url )
dynamo_tbl = boto3.resource('dynamodb').Table(aws_feed_tbl)
filter_date = datetime.now() - timedelta(days=1)
expiry_date = datetime.now() + timedelta(days=ttl_days)
expiry_epoch = long(time.mktime(expiry_date.timetuple()))
for item in feed[ "items" ]:
record={}
record["guid"]=item["guid"]
record["title"]=item[ "title" ]
record["description"]=cleanhtml(item["description"])
record["url"]=item["link"]
record["catagories"]=[]
record["ttl"]=expiry_epoch
for tag in item["tags"]:
categories=tag["term"].split(",")
for everyCat in categories:
result=everyCat.partition("aws-")
if not result[2]:
result=everyCat.partition("amazon-")
if result[2]:
text = result[2].replace("-"," ")
record["catagories"].append(text)
offset_str = item["published"].rpartition(' ')
pub_datetime = datetime.strptime(offset_str[0], '%a, %d %b %Y %H:%M:%S')
record["pub_date"]=pub_datetime.strftime("%Y-%m-%d")
if pub_datetime > filter_date:
dynamo_tbl.put_item(Item=record)
| 34.865385
| 80
| 0.629344
|
197b5cc2e0d87cee5363678b49f4fd12c0b9f776
| 709
|
py
|
Python
|
Desafios/desafio-79.py
|
marielitonmb/Curso-Python3
|
26215c47c4d1eadf940b8024305b7e9ff600883b
|
[
"MIT"
] | null | null | null |
Desafios/desafio-79.py
|
marielitonmb/Curso-Python3
|
26215c47c4d1eadf940b8024305b7e9ff600883b
|
[
"MIT"
] | null | null | null |
Desafios/desafio-79.py
|
marielitonmb/Curso-Python3
|
26215c47c4d1eadf940b8024305b7e9ff600883b
|
[
"MIT"
] | null | null | null |
# Aula 17 - Desafio 79: Valores unicos em uma lista
# Ler varios numeros e coloca-los numa lista, caso um numero ja exista na lista, ele nao eh adicionado.
# No final, mostrar todos os valores unicos lidos em ordem crescente.
lista = []
while True:
num = int(input('Digite um numero: '))
if num in lista:
print(f'{num} ja foi registrado, informe outro numero.')
else:
lista.append(num)
continua = ' '
while continua not in 'SN':
continua = str(input('Deseja continuar [S/N]? ')).upper().strip()[0]
if continua == 'N':
break
elif continua == 'S':
pass
lista.sort()
print('-'*30)
print(f'Lista final em ordem crescente:')
print(f'{lista}')
| 28.36
| 103
| 0.631876
|
adae10f0bb6ad773d2b04364222c419d1e0a0b99
| 1,169
|
py
|
Python
|
nipype/interfaces/camino/tests/test_auto_Image2Voxel.py
|
abelalez/nipype
|
878271bd906768f11c4cabd04e5d1895551ce8a7
|
[
"Apache-2.0"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
nipype/interfaces/camino/tests/test_auto_Image2Voxel.py
|
abelalez/nipype
|
878271bd906768f11c4cabd04e5d1895551ce8a7
|
[
"Apache-2.0"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
nipype/interfaces/camino/tests/test_auto_Image2Voxel.py
|
abelalez/nipype
|
878271bd906768f11c4cabd04e5d1895551ce8a7
|
[
"Apache-2.0"
] | 1
|
2020-07-17T12:49:49.000Z
|
2020-07-17T12:49:49.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..convert import Image2Voxel
def test_Image2Voxel_inputs():
input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
in_file=dict(
argstr='-4dimage %s',
mandatory=True,
position=1,
),
out_file=dict(
argstr='> %s',
genfile=True,
position=-1,
),
out_type=dict(
argstr='-outputdatatype %s',
position=2,
usedefault=True,
),
)
inputs = Image2Voxel.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Image2Voxel_outputs():
output_map = dict(voxel_order=dict(), )
outputs = Image2Voxel.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 28.512195
| 67
| 0.569718
|
c0021df11fcd215faf14844ac879de890b38be36
| 9,595
|
py
|
Python
|
homeassistant/components/brother/const.py
|
dummys/home-assistant
|
dd908caebade15adf061fade686355b94ed2f43a
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
homeassistant/components/brother/const.py
|
dummys/home-assistant
|
dd908caebade15adf061fade686355b94ed2f43a
|
[
"Apache-2.0"
] | 70
|
2020-07-23T07:13:50.000Z
|
2022-03-31T06:01:52.000Z
|
homeassistant/components/brother/const.py
|
dummys/home-assistant
|
dd908caebade15adf061fade686355b94ed2f43a
|
[
"Apache-2.0"
] | 6
|
2018-02-04T03:48:55.000Z
|
2022-01-24T20:37:04.000Z
|
"""Constants for Brother integration."""
from __future__ import annotations
from typing import Final
from homeassistant.components.sensor import ATTR_STATE_CLASS, STATE_CLASS_MEASUREMENT
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ICON,
DEVICE_CLASS_TIMESTAMP,
PERCENTAGE,
)
from .model import SensorDescription
ATTR_BELT_UNIT_REMAINING_LIFE: Final = "belt_unit_remaining_life"
ATTR_BLACK_DRUM_COUNTER: Final = "black_drum_counter"
ATTR_BLACK_DRUM_REMAINING_LIFE: Final = "black_drum_remaining_life"
ATTR_BLACK_DRUM_REMAINING_PAGES: Final = "black_drum_remaining_pages"
ATTR_BLACK_INK_REMAINING: Final = "black_ink_remaining"
ATTR_BLACK_TONER_REMAINING: Final = "black_toner_remaining"
ATTR_BW_COUNTER: Final = "b/w_counter"
ATTR_COLOR_COUNTER: Final = "color_counter"
ATTR_COUNTER: Final = "counter"
ATTR_CYAN_DRUM_COUNTER: Final = "cyan_drum_counter"
ATTR_CYAN_DRUM_REMAINING_LIFE: Final = "cyan_drum_remaining_life"
ATTR_CYAN_DRUM_REMAINING_PAGES: Final = "cyan_drum_remaining_pages"
ATTR_CYAN_INK_REMAINING: Final = "cyan_ink_remaining"
ATTR_CYAN_TONER_REMAINING: Final = "cyan_toner_remaining"
ATTR_DRUM_COUNTER: Final = "drum_counter"
ATTR_DRUM_REMAINING_LIFE: Final = "drum_remaining_life"
ATTR_DRUM_REMAINING_PAGES: Final = "drum_remaining_pages"
ATTR_DUPLEX_COUNTER: Final = "duplex_unit_pages_counter"
ATTR_ENABLED: Final = "enabled"
ATTR_FUSER_REMAINING_LIFE: Final = "fuser_remaining_life"
ATTR_LABEL: Final = "label"
ATTR_LASER_REMAINING_LIFE: Final = "laser_remaining_life"
ATTR_MAGENTA_DRUM_COUNTER: Final = "magenta_drum_counter"
ATTR_MAGENTA_DRUM_REMAINING_LIFE: Final = "magenta_drum_remaining_life"
ATTR_MAGENTA_DRUM_REMAINING_PAGES: Final = "magenta_drum_remaining_pages"
ATTR_MAGENTA_INK_REMAINING: Final = "magenta_ink_remaining"
ATTR_MAGENTA_TONER_REMAINING: Final = "magenta_toner_remaining"
ATTR_MANUFACTURER: Final = "Brother"
ATTR_PAGE_COUNTER: Final = "page_counter"
ATTR_PF_KIT_1_REMAINING_LIFE: Final = "pf_kit_1_remaining_life"
ATTR_PF_KIT_MP_REMAINING_LIFE: Final = "pf_kit_mp_remaining_life"
ATTR_REMAINING_PAGES: Final = "remaining_pages"
ATTR_STATUS: Final = "status"
ATTR_UNIT: Final = "unit"
ATTR_UPTIME: Final = "uptime"
ATTR_YELLOW_DRUM_COUNTER: Final = "yellow_drum_counter"
ATTR_YELLOW_DRUM_REMAINING_LIFE: Final = "yellow_drum_remaining_life"
ATTR_YELLOW_DRUM_REMAINING_PAGES: Final = "yellow_drum_remaining_pages"
ATTR_YELLOW_INK_REMAINING: Final = "yellow_ink_remaining"
ATTR_YELLOW_TONER_REMAINING: Final = "yellow_toner_remaining"
DATA_CONFIG_ENTRY: Final = "config_entry"
DOMAIN: Final = "brother"
UNIT_PAGES: Final = "p"
PRINTER_TYPES: Final = ["laser", "ink"]
SNMP: Final = "snmp"
ATTRS_MAP: Final[dict[str, tuple[str, str]]] = {
ATTR_DRUM_REMAINING_LIFE: (ATTR_DRUM_REMAINING_PAGES, ATTR_DRUM_COUNTER),
ATTR_BLACK_DRUM_REMAINING_LIFE: (
ATTR_BLACK_DRUM_REMAINING_PAGES,
ATTR_BLACK_DRUM_COUNTER,
),
ATTR_CYAN_DRUM_REMAINING_LIFE: (
ATTR_CYAN_DRUM_REMAINING_PAGES,
ATTR_CYAN_DRUM_COUNTER,
),
ATTR_MAGENTA_DRUM_REMAINING_LIFE: (
ATTR_MAGENTA_DRUM_REMAINING_PAGES,
ATTR_MAGENTA_DRUM_COUNTER,
),
ATTR_YELLOW_DRUM_REMAINING_LIFE: (
ATTR_YELLOW_DRUM_REMAINING_PAGES,
ATTR_YELLOW_DRUM_COUNTER,
),
}
SENSOR_TYPES: Final[dict[str, SensorDescription]] = {
ATTR_STATUS: {
ATTR_ICON: "mdi:printer",
ATTR_LABEL: ATTR_STATUS.title(),
ATTR_UNIT: None,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: None,
},
ATTR_PAGE_COUNTER: {
ATTR_ICON: "mdi:file-document-outline",
ATTR_LABEL: ATTR_PAGE_COUNTER.replace("_", " ").title(),
ATTR_UNIT: UNIT_PAGES,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_BW_COUNTER: {
ATTR_ICON: "mdi:file-document-outline",
ATTR_LABEL: ATTR_BW_COUNTER.replace("_", " ").title(),
ATTR_UNIT: UNIT_PAGES,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_COLOR_COUNTER: {
ATTR_ICON: "mdi:file-document-outline",
ATTR_LABEL: ATTR_COLOR_COUNTER.replace("_", " ").title(),
ATTR_UNIT: UNIT_PAGES,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_DUPLEX_COUNTER: {
ATTR_ICON: "mdi:file-document-outline",
ATTR_LABEL: ATTR_DUPLEX_COUNTER.replace("_", " ").title(),
ATTR_UNIT: UNIT_PAGES,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_DRUM_REMAINING_LIFE: {
ATTR_ICON: "mdi:chart-donut",
ATTR_LABEL: ATTR_DRUM_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_BLACK_DRUM_REMAINING_LIFE: {
ATTR_ICON: "mdi:chart-donut",
ATTR_LABEL: ATTR_BLACK_DRUM_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_CYAN_DRUM_REMAINING_LIFE: {
ATTR_ICON: "mdi:chart-donut",
ATTR_LABEL: ATTR_CYAN_DRUM_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_MAGENTA_DRUM_REMAINING_LIFE: {
ATTR_ICON: "mdi:chart-donut",
ATTR_LABEL: ATTR_MAGENTA_DRUM_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_YELLOW_DRUM_REMAINING_LIFE: {
ATTR_ICON: "mdi:chart-donut",
ATTR_LABEL: ATTR_YELLOW_DRUM_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_BELT_UNIT_REMAINING_LIFE: {
ATTR_ICON: "mdi:current-ac",
ATTR_LABEL: ATTR_BELT_UNIT_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_FUSER_REMAINING_LIFE: {
ATTR_ICON: "mdi:water-outline",
ATTR_LABEL: ATTR_FUSER_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_LASER_REMAINING_LIFE: {
ATTR_ICON: "mdi:spotlight-beam",
ATTR_LABEL: ATTR_LASER_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_PF_KIT_1_REMAINING_LIFE: {
ATTR_ICON: "mdi:printer-3d",
ATTR_LABEL: ATTR_PF_KIT_1_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_PF_KIT_MP_REMAINING_LIFE: {
ATTR_ICON: "mdi:printer-3d",
ATTR_LABEL: ATTR_PF_KIT_MP_REMAINING_LIFE.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_BLACK_TONER_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_BLACK_TONER_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_CYAN_TONER_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_CYAN_TONER_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_MAGENTA_TONER_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_MAGENTA_TONER_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_YELLOW_TONER_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_YELLOW_TONER_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_BLACK_INK_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_BLACK_INK_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_CYAN_INK_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_CYAN_INK_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_MAGENTA_INK_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_MAGENTA_INK_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_YELLOW_INK_REMAINING: {
ATTR_ICON: "mdi:printer-3d-nozzle",
ATTR_LABEL: ATTR_YELLOW_INK_REMAINING.replace("_", " ").title(),
ATTR_UNIT: PERCENTAGE,
ATTR_ENABLED: True,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
},
ATTR_UPTIME: {
ATTR_ICON: None,
ATTR_LABEL: ATTR_UPTIME.title(),
ATTR_UNIT: None,
ATTR_ENABLED: False,
ATTR_STATE_CLASS: None,
ATTR_DEVICE_CLASS: DEVICE_CLASS_TIMESTAMP,
},
}
| 37.189922
| 85
| 0.704534
|
102c2b6217d8bde53eef005cebce9240fdf8d4d7
| 29,388
|
py
|
Python
|
probatus/feature_elimination/feature_elimination.py
|
maastrichtlawtech/probatus
|
fe0442acc2e51b6c5116b5a97005a548c381f662
|
[
"MIT"
] | null | null | null |
probatus/feature_elimination/feature_elimination.py
|
maastrichtlawtech/probatus
|
fe0442acc2e51b6c5116b5a97005a548c381f662
|
[
"MIT"
] | null | null | null |
probatus/feature_elimination/feature_elimination.py
|
maastrichtlawtech/probatus
|
fe0442acc2e51b6c5116b5a97005a548c381f662
|
[
"MIT"
] | 1
|
2021-03-01T19:53:56.000Z
|
2021-03-01T19:53:56.000Z
|
from probatus.utils import (
preprocess_data,
shap_calc,
calculate_shap_importance,
BaseFitComputePlotClass,
preprocess_labels,
get_single_scorer,
)
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, check_cv
from sklearn.base import clone, is_classifier
from joblib import Parallel, delayed
import warnings
class ShapRFECV(BaseFitComputePlotClass):
"""
This class performs Backwards Recursive Feature Elimination, using SHAP feature importance. At each round, for a
given feature set, starting from all available features, the following steps are applied:
1. (Optional) Tune the hyperparameters of the model using [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoCV.html)
or [RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html?highlight=randomized#sklearn.model_selection.RandomizedSearchCV),
2. Apply Cross-validation (CV) to estimate the SHAP feature importance on the provided dataset. In each CV
iteration, the model is fitted on the train folds, and applied on the validation fold to estimate
SHAP feature importance.
3. Remove `step` lowest SHAP importance features from the dataset.
At the end of the process, the user can plot the performance of the model for each iteration, and select the
optimal number of features and the features set.
The functionality is similar to [RFECV](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html).
The main difference is removing the lowest importance features based on SHAP features importance. It also
supports the use of [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)
and [RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html)
passed as the `clf`, thanks to which` you can perform hyperparameter optimization at each step of the search.
hyperparameters of the model at each round, to tune the model for each features set. Lastly, it supports
categorical features (object and category dtype) and missing values in the data, as long as the model supports
them.
We recommend using [LGBMClassifier](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMClassifier.html),
because by default it handles missing values and categorical features. In case of other models, make sure to
handle these issues for your dataset and consider impact it might have on features importance.
Example:
```python
import numpy as np
import pandas as pd
from probatus.feature_elimination import ShapRFECV
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
feature_names = ['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f20']
# Prepare two samples
X, y = make_classification(n_samples=200, class_sep=0.05, n_informative=6, n_features=20,
random_state=0, n_redundant=10, n_clusters_per_class=1)
X = pd.DataFrame(X, columns=feature_names)
# Prepare model and parameter search space
clf = RandomForestClassifier(max_depth=5, class_weight='balanced')
param_grid = {
'n_estimators': [5, 7, 10],
'min_samples_leaf': [3, 5, 7, 10],
}
search = RandomizedSearchCV(clf, param_grid)
# Run feature elimination
shap_elimination = ShapRFECV(
clf=search, step=0.2, cv=10, scoring='roc_auc', n_jobs=3)
report = shap_elimination.fit_compute(X, y)
# Make plots
performance_plot = shap_elimination.plot()
# Get final feature set
final_features_set = shap_elimination.get_reduced_features_set(num_features=3)
```
<img src="../img/shaprfecv.png" width="500" />
"""
def __init__(
self,
clf,
step=1,
min_features_to_select=1,
cv=None,
scoring="roc_auc",
n_jobs=-1,
verbose=0,
random_state=None,
):
"""
This method initializes the class:
Args:
clf (binary classifier, GridSearchCV or RandomizedSearchCV):
A model that will be optimized and trained at each round of features elimination. The recommended model
is [LGBMClassifier](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMClassifier.html),
because it by default handles the missing values and categorical variables. This parameter also supports
[GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)
and [RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html).
step (int or float, optional):
Number of lowest importance features removed each round. If it is an int, then each round such number of
features is discarded. If float, such percentage of remaining features (rounded down) is removed each
iteration. It is recommended to use float, since it is faster for a large number of features, and slows
down and becomes more precise towards less features. Note: the last round may remove fewer features in
order to reach min_features_to_select.
If columns_to_keep parameter is specified in the fit method, step is the number of features to remove after
keeping those columns.
min_features_to_select (int, optional):
Minimum number of features to be kept. This is a stopping criterion of the feature elimination. By
default the process stops when one feature is left. If columns_to_keep is specified in the fit method,
it may overide this parameter to the maximum between length of columns_to_keep the two.
cv (int, cross-validation generator or an iterable, optional):
Determines the cross-validation splitting strategy. Compatible with sklearn
[cv parameter](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html).
If None, then cv of 5 is used.
scoring (string or probatus.utils.Scorer, optional):
Metric for which the model performance is calculated. It can be either a metric name aligned with
predefined [classification scorers names in sklearn](https://scikit-learn.org/stable/modules/model_evaluation.html).
Another option is using probatus.utils.Scorer to define a custom metric.
n_jobs (int, optional):
Number of cores to run in parallel while fitting across folds. None means 1 unless in a
`joblib.parallel_backend` context. -1 means using all processors.
verbose (int, optional):
Controls verbosity of the output:
- 0 - nether prints nor warnings are shown
- 1 - 50 - only most important warnings
- 51 - 100 - shows other warnings and prints
- above 100 - presents all prints and all warnings (including SHAP warnings).
random_state (int, optional):
Random state set at each round of feature elimination. If it is None, the results will not be
reproducible and in random search at each iteration a different hyperparameters might be tested. For
reproducible results set it to integer.
"""
self.clf = clf
if isinstance(self.clf, RandomizedSearchCV) or isinstance(
self.clf, GridSearchCV
):
self.search_clf = True
else:
self.search_clf = False
if (isinstance(step, int) or isinstance(step, float)) and step > 0:
self.step = step
else:
raise (
ValueError(
f"The current value of step = {step} is not allowed. "
f"It needs to be a positive integer or positive float."
)
)
if isinstance(min_features_to_select, int) and min_features_to_select > 0:
self.min_features_to_select = min_features_to_select
else:
raise (
ValueError(
f"The current value of min_features_to_select = {min_features_to_select} is not allowed. "
f"It needs to be a greater than or equal to 0."
)
)
self.cv = cv
self.scorer = get_single_scorer(scoring)
self.random_state = random_state
self.n_jobs = n_jobs
self.report_df = pd.DataFrame([])
self.verbose = verbose
def _get_current_features_to_remove(self, shap_importance_df, columns_to_keep=None):
"""
Implements the logic used to determine which features to remove. If step is a positive integer,
at each round step lowest SHAP importance features are selected. If it is a float, such percentage
of remaining features (rounded up) is removed each iteration. It is recommended to use float, since it is
faster for a large set of features, and slows down and becomes more precise towards less features.
Args:
shap_importance_df (pd.DataFrame):
DataFrame presenting SHAP importance of remaining features.
Returns:
(list):
List of features to be removed at a given round.
"""
# Bounding the variable.
num_features_to_remove = 0
# If columns_to_keep is not None, exclude those columns and
# calculate features to remove.
if columns_to_keep is not None:
mask = shap_importance_df.index.isin(columns_to_keep)
shap_importance_df = shap_importance_df[~mask]
# If the step is an int remove n features.
if isinstance(self.step, int):
num_features_to_remove = self._calculate_number_of_features_to_remove(
current_num_of_features=shap_importance_df.shape[0],
num_features_to_remove=self.step,
min_num_features_to_keep=self.min_features_to_select,
)
# If the step is a float remove n * number features that are left, rounded down
elif isinstance(self.step, float):
current_step = int(np.floor(shap_importance_df.shape[0] * self.step))
# The step after rounding down should be at least 1
if current_step < 1:
current_step = 1
num_features_to_remove = self._calculate_number_of_features_to_remove(
current_num_of_features=shap_importance_df.shape[0],
num_features_to_remove=current_step,
min_num_features_to_keep=self.min_features_to_select,
)
if num_features_to_remove == 0:
return []
else:
return shap_importance_df.iloc[-num_features_to_remove:].index.tolist()
@staticmethod
def _calculate_number_of_features_to_remove(
current_num_of_features, num_features_to_remove, min_num_features_to_keep
):
"""
Calculates the number of features to be removed, and makes sure that after removal at least
min_num_features_to_keep are kept
Args:
current_num_of_features (int):
Current number of features in the data.
num_features_to_remove (int):
Number of features to be removed at this stage.
min_num_features_to_keep (int):
Minimum number of features to be left after removal.
Returns:
(int):
Number of features to be removed.
"""
num_features_after_removal = current_num_of_features - num_features_to_remove
if num_features_after_removal >= min_num_features_to_keep:
num_to_remove = num_features_to_remove
else:
# take all available features minus number of them that should stay
num_to_remove = current_num_of_features - min_num_features_to_keep
return num_to_remove
def _report_current_results(
self,
round_number,
current_features_set,
features_to_remove,
train_metric_mean,
train_metric_std,
val_metric_mean,
val_metric_std,
):
"""
This function adds the results from a current iteration to the report.
Args:
round_number (int):
Current number of the round.
current_features_set (list of str):
Current list of features.
features_to_remove (list of str):
List of features to be removed at the end of this iteration.
train_metric_mean (float or int):
Mean scoring metric measured on train set during CV.
train_metric_std (float or int):
Std scoring metric measured on train set during CV.
val_metric_mean (float or int):
Mean scoring metric measured on validation set during CV.
val_metric_std (float or int):
Std scoring metric measured on validation set during CV.
"""
current_results = {
"num_features": len(current_features_set),
"features_set": None,
"eliminated_features": None,
"train_metric_mean": train_metric_mean,
"train_metric_std": train_metric_std,
"val_metric_mean": val_metric_mean,
"val_metric_std": val_metric_std,
}
current_row = pd.DataFrame(current_results, index=[round_number])
current_row["features_set"] = [current_features_set]
current_row["eliminated_features"] = [features_to_remove]
self.report_df = pd.concat([self.report_df, current_row], axis=0)
@staticmethod
def _get_feature_shap_values_per_fold(
X, y, clf, train_index, val_index, scorer, verbose=0
):
"""
This function calculates the shap values on validation set, and Train and Val score.
Args:
X (pd.DataFrame):
Dataset used in CV.
y (pd.Series):
Binary labels for X.
clf (binary classifier):
Model to be fitted on the train folds.
train_index (np.array):
Positions of train folds samples.
val_index (np.array):
Positions of validation fold samples.
scorer (string, callable or None):
A string (see sklearn [model scoring](https://scikit-learn.org/stable/modules/model_evaluation.html)) or
a scorer callable object, function with the signature `scorer(estimator, X, y)`.
verbose (int, optional):
Controls verbosity of the output:
- 0 - neither prints nor warnings are shown
- 1 - 50 - only most important warnings regarding data properties are shown (excluding SHAP warnings)
- 51 - 100 - shows most important warnings, prints of the feature removal process
- above 100 - presents all prints and all warnings (including SHAP warnings).
Returns:
(np.array, float, float):
Tuple with the results: Shap Values on validation fold, train score, validation score.
"""
X_train, X_val = X.iloc[train_index, :], X.iloc[val_index, :]
y_train, y_val = y.iloc[train_index], y.iloc[val_index]
# Fit model with train folds
clf = clf.fit(X_train, y_train)
# Score the model
score_train = scorer(clf, X_train, y_train)
score_val = scorer(clf, X_val, y_val)
# Compute SHAP values
shap_values = shap_calc(clf, X_val, verbose=verbose)
return shap_values, score_train, score_val
def fit(self, X, y, columns_to_keep=None, column_names=None):
"""
Fits the object with the provided data. The algorithm starts with the entire dataset, and then sequentially
eliminates features. If [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)
or [RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html)
object assigned as clf, the hyperparameter optimization is applied first. Then, the SHAP feature importance
is calculated using Cross-Validation, and `step` lowest importance features are removed.
Args:
X (pd.DataFrame):
Provided dataset.
y (pd.Series):
Binary labels for X.
columns_to_keep (list of str, optional):
List of column names to keep. If given, these columns will not be eliminated by the feature elimination process.
However, these feature will used for the calculation of the SHAP values.
column_names (list of str, optional):
List of feature names of the provided samples. If provided it will be used to overwrite the existing
feature names. If not provided the existing feature names are used or default feature names are
generated.
Returns:
(ShapRFECV): Fitted object.
"""
# Set seed for results reproducibility
if self.random_state is not None:
np.random.seed(self.random_state)
# If to columns_to_keep is not provided, then initialise it by an empty string.
# If provided check if all the elements in columns_to_keep are of type string.
if columns_to_keep is None:
len_columns_to_keep = 0
else:
if all(isinstance(x, str) for x in columns_to_keep):
len_columns_to_keep = len(columns_to_keep)
else:
raise (
ValueError(
"The current values of columns_to_keep are not allowed.All the elements should be strings."
)
)
# If the columns_to_keep parameter is provided, check if they match the column names in the X.
if column_names is not None:
if all(x in column_names for x in list(X.columns)):
pass
else:
raise (
ValueError(
"The column names in parameter columns_to_keep and column_names are not macthing."
)
)
# Check that the total number of columns to select is less than total number of columns in the data.
# only when both parameters are provided.
if column_names is not None and columns_to_keep is not None:
if (self.min_features_to_select + len_columns_to_keep) > len(
self.column_names
):
raise ValueError(
"Minimum features to select is greater than number of features."
"Lower the value for min_features_to_select or number of columns in columns_to_keep"
)
self.X, self.column_names = preprocess_data(
X, X_name="X", column_names=column_names, verbose=self.verbose
)
self.y = preprocess_labels(
y, y_name="y", index=self.X.index, verbose=self.verbose
)
self.cv = check_cv(self.cv, self.y, classifier=is_classifier(self.clf))
remaining_features = current_features_set = self.column_names
round_number = 0
# Stop when stopping criteria is met.
stopping_criteria = np.max([self.min_features_to_select, len_columns_to_keep])
# Setting up the min_features_to_select parameter.
if columns_to_keep is None:
pass
else:
self.min_features_to_select = 0
# This ensures that, if columns_to_keep is provided ,the last features remaining are only the columns_to_keep.
if self.verbose > 50:
warnings.warn(f"Minimum features to select : {stopping_criteria}")
while len(current_features_set) > stopping_criteria:
round_number += 1
# Get current dataset info
current_features_set = remaining_features
if columns_to_keep is None:
remaining_removeable_features = list(set(current_features_set))
else:
remaining_removeable_features = list(
set(current_features_set) | set(columns_to_keep)
)
current_X = self.X[remaining_removeable_features]
# Set seed for results reproducibility
if self.random_state is not None:
np.random.seed(self.random_state)
# Optimize parameters
if self.search_clf:
current_search_clf = clone(self.clf).fit(current_X, self.y)
current_clf = current_search_clf.estimator.set_params(
**current_search_clf.best_params_
)
else:
current_clf = clone(self.clf)
# Perform CV to estimate feature importance with SHAP
results_per_fold = Parallel(n_jobs=self.n_jobs)(
delayed(self._get_feature_shap_values_per_fold)(
X=current_X,
y=self.y,
clf=current_clf,
train_index=train_index,
val_index=val_index,
scorer=self.scorer.scorer,
verbose=self.verbose,
)
for train_index, val_index in self.cv.split(current_X, self.y)
)
shap_values = np.vstack(
[current_result[0] for current_result in results_per_fold]
)
scores_train = [current_result[1] for current_result in results_per_fold]
scores_val = [current_result[2] for current_result in results_per_fold]
# Calculate the shap features with remaining features and features to keep.
shap_importance_df = calculate_shap_importance(
shap_values, remaining_removeable_features
)
# Get features to remove
features_to_remove = self._get_current_features_to_remove(
shap_importance_df, columns_to_keep=columns_to_keep
)
remaining_features = list(
set(current_features_set) - set(features_to_remove)
)
# Report results
self._report_current_results(
round_number=round_number,
current_features_set=current_features_set,
features_to_remove=features_to_remove,
train_metric_mean=np.round(np.mean(scores_train), 3),
train_metric_std=np.round(np.std(scores_train), 3),
val_metric_mean=np.round(np.mean(scores_val), 3),
val_metric_std=np.round(np.std(scores_val), 3),
)
if self.verbose > 50:
print(
f"Round: {round_number}, Current number of features: {len(current_features_set)}, "
f'Current performance: Train {self.report_df.loc[round_number]["train_metric_mean"]} '
f'+/- {self.report_df.loc[round_number]["train_metric_std"]}, CV Validation '
f'{self.report_df.loc[round_number]["val_metric_mean"]} '
f'+/- {self.report_df.loc[round_number]["val_metric_std"]}. \n'
f"Features left: {remaining_features}. "
f"Removed features at the end of the round: {features_to_remove}"
)
self.fitted = True
return self
def compute(self):
"""
Checks if fit() method has been run and computes the DataFrame with results of feature elimintation for each
round.
Returns:
(pd.DataFrame):
DataFrame with results of feature elimination for each round.
"""
self._check_if_fitted()
return self.report_df
def fit_compute(self, X, y, columns_to_keep=None, column_names=None):
"""
Fits the object with the provided data. The algorithm starts with the entire dataset, and then sequentially
eliminates features. If [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)
or [RandomizedSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html)
object assigned as clf, the hyperparameter optimization is applied first. Then, the SHAP feature importance
is calculated using Cross-Validation, and `step` lowest importance features are removed. At the end, the
report containing results from each iteration is computed and returned to the user.
Args:
X (pd.DataFrame):
Provided dataset.
y (pd.Series):
Binary labels for X.
columns_to_keep (list of str, optional):
List of columns to keep. If given, these columns will not be eliminated.
column_names (list of str, optional):
List of feature names of the provided samples. If provided it will be used to overwrite the existing
feature names. If not provided the existing feature names are used or default feature names are
generated.
Returns:
(pd.DataFrame):
DataFrame containing results of feature elimination from each iteration.
"""
self.fit(X, y, columns_to_keep=columns_to_keep, column_names=column_names)
return self.compute()
def get_reduced_features_set(self, num_features):
"""
Gets the features set after the feature elimination process, for a given number of features.
Args:
num_features (int):
Number of features in the reduced features set.
Returns:
(list of str):
Reduced features set.
"""
self._check_if_fitted()
if num_features not in self.report_df.num_features.tolist():
raise (
ValueError(
f"The provided number of features has not been achieved at any stage of the process. "
f"You can select one of the following: {self.report_df.num_features.tolist()}"
)
)
else:
return self.report_df[self.report_df.num_features == num_features][
"features_set"
].values[0]
def plot(self, show=True, **figure_kwargs):
"""
Generates plot of the model performance for each iteration of feature elimination.
Args:
show (bool, optional):
If True, the plots are showed to the user, otherwise they are not shown. Not showing plot can be useful,
when you want to edit the returned axis, before showing it.
**figure_kwargs:
Keyword arguments that are passed to the plt.figure, at its initialization.
Returns:
(plt.axis):
Axis containing the performance plot.
"""
x_ticks = list(reversed(self.report_df["num_features"].tolist()))
plt.figure(**figure_kwargs)
plt.plot(
self.report_df["num_features"],
self.report_df["train_metric_mean"],
label="Train Score",
)
plt.fill_between(
pd.to_numeric(self.report_df.num_features, errors="coerce"),
self.report_df["train_metric_mean"] - self.report_df["train_metric_std"],
self.report_df["train_metric_mean"] + self.report_df["train_metric_std"],
alpha=0.3,
)
plt.plot(
self.report_df["num_features"],
self.report_df["val_metric_mean"],
label="Validation Score",
)
plt.fill_between(
pd.to_numeric(self.report_df.num_features, errors="coerce"),
self.report_df["val_metric_mean"] - self.report_df["val_metric_std"],
self.report_df["val_metric_mean"] + self.report_df["val_metric_std"],
alpha=0.3,
)
plt.xlabel("Number of features")
plt.ylabel(f"Performance {self.scorer.metric_name}")
plt.title("Backwards Feature Elimination using SHAP & CV")
plt.legend(loc="lower left")
ax = plt.gca()
ax.invert_xaxis()
ax.set_xticks(x_ticks)
if show:
plt.show()
else:
plt.close()
return ax
| 43.732143
| 195
| 0.628896
|
24d2e42274d97e1d3799685f33e2354c4c033cdf
| 10,569
|
py
|
Python
|
dnacentersdk/api/v1_2_10/fabric_wired.py
|
nonstdout/dnacentersdk
|
dbbbc4baa5300aa9e5c9193f2ea71438018095f5
|
[
"MIT"
] | null | null | null |
dnacentersdk/api/v1_2_10/fabric_wired.py
|
nonstdout/dnacentersdk
|
dbbbc4baa5300aa9e5c9193f2ea71438018095f5
|
[
"MIT"
] | null | null | null |
dnacentersdk/api/v1_2_10/fabric_wired.py
|
nonstdout/dnacentersdk
|
dbbbc4baa5300aa9e5c9193f2ea71438018095f5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""DNA Center Fabric Wired API wrapper.
Copyright (c) 2019-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
)
class FabricWired(object):
"""DNA Center Fabric Wired API (version: 1.2.10).
Wraps the DNA Center Fabric Wired
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new FabricWired
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the DNA Center service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(FabricWired, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def gets_border_device_detail(self,
device_ip_address,
sda_border_device,
headers=None,
**request_parameters):
"""**Beta** - Gets border device detail from SDA Fabric.
Args:
sda_border_device(basestring): sda/border-device path
parameter.
device_ip_address(basestring): device-ip-address path
parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(sda_border_device, basestring,
may_be_none=False)
check_type(device_ip_address, basestring,
may_be_none=False)
if headers is not None:
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
params = {
}
params.update(request_parameters)
params = dict_from_items_with_values(params)
path_params = {
'sda/border-device': sda_border_device,
'device-ip-address': device_ip_address,
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/${sda/border-'
+ 'device}/${device-ip-address}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.get(endpoint_full_url, params=params,
headers=_headers)
else:
json_data = self._session.get(endpoint_full_url, params=params)
return self._object_factory('bpm_98a39bf4485a9871_v1_2_10', json_data)
def adds_border_device(self,
sda_border_device,
headers=None,
payload=None,
active_validation=True,
**request_parameters):
"""Adds border device in SDA Fabric.
Args:
sda_border_device(basestring): sda/border-device path
parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(list): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(payload, list)
check_type(sda_border_device, basestring,
may_be_none=False)
if headers is not None:
if '__runsync' in headers:
check_type(headers.get('__runsync'),
bool)
if '__runsynctimeout' in headers:
check_type(headers.get('__runsynctimeout'),
int)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
params = {
}
params.update(request_parameters)
params = dict_from_items_with_values(params)
path_params = {
'sda/border-device': sda_border_device,
}
_payload = payload or []
if active_validation:
self._request_validator('jsd_bead7b3443b996a7_v1_2_10')\
.validate(_payload)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/${sda/border-device}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.post(endpoint_full_url, params=params,
json=_payload,
headers=_headers)
else:
json_data = self._session.post(endpoint_full_url, params=params,
json=_payload)
return self._object_factory('bpm_bead7b3443b996a7_v1_2_10', json_data)
def deletes_border_device(self,
device_ip_address,
sda_border_device,
headers=None,
**request_parameters):
"""Deletes border device from SDA Fabric.
Args:
sda_border_device(basestring): sda/border-device path
parameter.
device_ip_address(basestring): device-ip-address path
parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
MyDict: JSON response. Access the object's properties by using
the dot notation or the bracket notation.
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the DNA Center cloud returns an error.
"""
check_type(headers, dict)
check_type(sda_border_device, basestring,
may_be_none=False)
check_type(device_ip_address, basestring,
may_be_none=False)
if headers is not None:
if '__runsync' in headers:
check_type(headers.get('__runsync'),
bool)
if '__runsynctimeout' in headers:
check_type(headers.get('__runsynctimeout'),
int)
if 'X-Auth-Token' in headers:
check_type(headers.get('X-Auth-Token'),
basestring, may_be_none=False)
params = {
}
params.update(request_parameters)
params = dict_from_items_with_values(params)
path_params = {
'sda/border-device': sda_border_device,
'device-ip-address': device_ip_address,
}
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
e_url = ('/dna/intent/api/v1/business/${sda/border-'
+ 'device}/${device-ip-address}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
json_data = self._session.delete(endpoint_full_url, params=params,
headers=_headers)
else:
json_data = self._session.delete(endpoint_full_url, params=params)
return self._object_factory('bpm_cb81b93540baaab0_v1_2_10', json_data)
| 37.478723
| 78
| 0.599016
|
31898b2a348795ed40c5adfa1b57c00c38cee3a4
| 2,707
|
py
|
Python
|
escape_game/db.py
|
jpochetedmead/Flask-Adventure-Game
|
43fe7d01e9c5df0e9939876bff7eb0daba7865fc
|
[
"MIT"
] | null | null | null |
escape_game/db.py
|
jpochetedmead/Flask-Adventure-Game
|
43fe7d01e9c5df0e9939876bff7eb0daba7865fc
|
[
"MIT"
] | null | null | null |
escape_game/db.py
|
jpochetedmead/Flask-Adventure-Game
|
43fe7d01e9c5df0e9939876bff7eb0daba7865fc
|
[
"MIT"
] | null | null | null |
import sqlite3
import click
# g is a special object that is unique for each request. It is used to store data that might be accessed by multiple functions during the request. The connection is stored and reused instead of creating a new connection if get_db is called a second time in the same request.
from flask import current_app, g
from flask.cli import with_appcontext
def get_db():
if 'db' not in g:
# sqlite3.connect() establishes a connection to the file pointed at by the DATABASE configuration key. This file doesn’t have to exist yet, and won’t until you initialize the database later.
g.db = sqlite3.connect(
#current_app is another special object that points to the Flask application handling the request. Since you used an application factory, there is no application object when writing the rest of your code. get_db will be called when the application has been created and is handling a request, so current_app can be used.
current_app.config['DATABASE'],
detect_types=sqlite3.PARSE_DECLTYPES
)
# sqlite3.Row tells the connection to return rows that behave like dicts. This allows accessing the columns by name.
g.db.row_factory = sqlite3.Row
return g.db
# close_db checks if a connection was created by checking if g.db was set. If the connection exists, it is closed. Further down you will tell your application about the close_db function in the application factory so that it is called after each request.
def close_db(e=None):
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
db = get_db()
# open_resource() opens a file relative to the flaskr package, which is useful since you won’t necessarily know where that location is when deploying the application later. get_db returns a database connection, which is used to execute the commands read from the file.
with current_app.open_resource('schema.sql') as f:
db.executescript(f.read().decode('utf8'))
# click.command() defines a command line command called init-db that calls the init_db function and shows a success message to the user. You can read Command Line Interface to learn more about writing commands.
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Clear the existing data and create new tables."""
init_db()
click.echo('Initialized the database.')
def init_app(app):
app.teardown_appcontext(close_db) # app.teardown_appcontext() tells Flask to call that function when cleaning up after returning the response.
app.cli.add_command(init_db_command) # app.cli.add_command() adds a new command that can be called with the flask command.
| 57.595745
| 330
| 0.74843
|
5f4f9190f030a4d4d8a0c9fd84252de41567890a
| 17,978
|
py
|
Python
|
evaluation.py
|
coallaoh/wsolevaluation
|
f645640f18eb3e7075ea146bc048cc07d090bfdb
|
[
"MIT"
] | null | null | null |
evaluation.py
|
coallaoh/wsolevaluation
|
f645640f18eb3e7075ea146bc048cc07d090bfdb
|
[
"MIT"
] | null | null | null |
evaluation.py
|
coallaoh/wsolevaluation
|
f645640f18eb3e7075ea146bc048cc07d090bfdb
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) 2020-present NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import cv2
import numpy as np
import os
import torch.utils.data as torchdata
from data_loaders import configure_metadata
from data_loaders import get_image_ids
from data_loaders import get_bounding_boxes
from data_loaders import get_image_sizes
from data_loaders import get_mask_paths
from util import check_scoremap_validity
from util import check_box_convention
from util import t2n
_IMAGENET_MEAN = [0.485, .456, .406]
_IMAGENET_STDDEV = [.229, .224, .225]
_RESIZE_LENGTH = 224
def calculate_multiple_iou(box_a, box_b):
"""
Args:
box_a: numpy.ndarray(dtype=np.int, shape=(num_a, 4))
x0y0x1y1 convention.
box_b: numpy.ndarray(dtype=np.int, shape=(num_b, 4))
x0y0x1y1 convention.
Returns:
ious: numpy.ndarray(dtype=np.int, shape(num_a, num_b))
"""
num_a = box_a.shape[0]
num_b = box_b.shape[0]
check_box_convention(box_a, 'x0y0x1y1')
check_box_convention(box_b, 'x0y0x1y1')
# num_a x 4 -> num_a x num_b x 4
box_a = np.tile(box_a, num_b)
box_a = np.expand_dims(box_a, axis=1).reshape((num_a, num_b, -1))
# num_b x 4 -> num_b x num_a x 4
box_b = np.tile(box_b, num_a)
box_b = np.expand_dims(box_b, axis=1).reshape((num_b, num_a, -1))
# num_b x num_a x 4 -> num_a x num_b x 4
box_b = np.transpose(box_b, (1, 0, 2))
# num_a x num_b
min_x = np.maximum(box_a[:, :, 0], box_b[:, :, 0])
min_y = np.maximum(box_a[:, :, 1], box_b[:, :, 1])
max_x = np.minimum(box_a[:, :, 2], box_b[:, :, 2])
max_y = np.minimum(box_a[:, :, 3], box_b[:, :, 3])
# num_a x num_b
area_intersect = (np.maximum(0, max_x - min_x + 1)
* np.maximum(0, max_y - min_y + 1))
area_a = ((box_a[:, :, 2] - box_a[:, :, 0] + 1) *
(box_a[:, :, 3] - box_a[:, :, 1] + 1))
area_b = ((box_b[:, :, 2] - box_b[:, :, 0] + 1) *
(box_b[:, :, 3] - box_b[:, :, 1] + 1))
denominator = area_a + area_b - area_intersect
degenerate_indices = np.where(denominator <= 0)
denominator[degenerate_indices] = 1
ious = area_intersect / denominator
ious[degenerate_indices] = 0
return ious
def resize_bbox(box, image_size, resize_size):
"""
Args:
box: iterable (ints) of length 4 (x0, y0, x1, y1)
image_size: iterable (ints) of length 2 (width, height)
resize_size: iterable (ints) of length 2 (width, height)
Returns:
new_box: iterable (ints) of length 4 (x0, y0, x1, y1)
"""
check_box_convention(np.array(box), 'x0y0x1y1')
box_x0, box_y0, box_x1, box_y1 = map(float, box)
image_w, image_h = map(float, image_size)
new_image_w, new_image_h = map(float, resize_size)
newbox_x0 = box_x0 * new_image_w / image_w
newbox_y0 = box_y0 * new_image_h / image_h
newbox_x1 = box_x1 * new_image_w / image_w
newbox_y1 = box_y1 * new_image_h / image_h
return int(newbox_x0), int(newbox_y0), int(newbox_x1), int(newbox_y1)
def compute_bboxes_from_scoremaps(scoremap, scoremap_threshold_list):
"""
Args:
scoremap: numpy.ndarray(dtype=np.float32, size=(H, W)) between 0 and 1
scoremap_threshold_list: iterable
Returns:
boxes: list of estimated boxes (list of ints) at each cam threshold
"""
check_scoremap_validity(scoremap)
height, width = scoremap.shape
scoremap_image = np.expand_dims((scoremap * 255).astype(np.uint8), 2)
def scoremap2bbox(threshold):
_, thr_gray_heatmap = cv2.threshold(
src=scoremap_image,
thresh=int(threshold * np.max(scoremap_image)),
maxval=255,
type=cv2.THRESH_BINARY)
contours = cv2.findContours(
image=thr_gray_heatmap,
mode=cv2.RETR_TREE,
method=cv2.CHAIN_APPROX_SIMPLE)[1]
if len(contours) == 0:
return [0, 0, 0, 0]
c = max(contours, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(c)
x0, y0, x1, y1 = x, y, x + w, y + h
x1 = min(x1, width - 1)
y1 = min(y1, height - 1)
return [x0, y0, x1, y1]
estimated_bbox = [scoremap2bbox(threshold)
for threshold in scoremap_threshold_list]
return estimated_bbox
class CamDataset(torchdata.Dataset):
def __init__(self, scoremap_path, image_ids):
self.scoremap_path = scoremap_path
self.image_ids = image_ids
def _load_cam(self, image_id):
scoremap_file = os.path.join(self.scoremap_path, image_id + '.npy')
return np.load(scoremap_file)
def __getitem__(self, index):
image_id = self.image_ids[index]
cam = self._load_cam(image_id)
return cam, image_id
def __len__(self):
return len(self.image_ids)
class LocalizationEvaluator(object):
""" Abstract class for localization evaluation over score maps.
The class is designed to operate in a for loop (e.g. batch-wise cam
score map computation). At initialization, __init__ registers paths to
annotations and data containers for evaluation. At each iteration,
each score map is passed to the accumulate() method along with its image_id.
After the for loop is finalized, compute() is called to compute the final
localization performance.
"""
def __init__(self, metadata, dataset_name, split, threshold_list,
mask_root):
self.metadata = metadata
self.threshold_list = threshold_list
self.dataset_name = dataset_name
self.split = split
self.mask_root = mask_root
def accumulate(self, scoremap, image_id):
raise NotImplementedError
def compute(self):
raise NotImplementedError
class BoxEvaluator(LocalizationEvaluator):
_IOU_THRESHOLD = 0.5
def __init__(self, **kwargs):
super(BoxEvaluator, self).__init__(**kwargs)
self.image_ids = get_image_ids(metadata=self.metadata)
self.resize_length = _RESIZE_LENGTH
self.cnt = 0
self.num_correct = np.zeros(len(self.threshold_list))
self.original_bboxes = get_bounding_boxes(self.metadata)
self.image_sizes = get_image_sizes(self.metadata)
self.gt_bboxes = self._load_resized_boxes(self.original_bboxes)
def _load_resized_boxes(self, original_bboxes):
resized_bbox = {image_id: [
resize_bbox(bbox, self.image_sizes[image_id],
(self.resize_length, self.resize_length))
for bbox in original_bboxes[image_id]]
for image_id in self.image_ids}
return resized_bbox
def accumulate(self, scoremap, image_id):
"""
From a score map, a box is inferred (compute_bboxes_from_scoremaps).
The box is compared against GT boxes. Count a scoremap as a correct
prediction if the IOU against at least one box is greater than a certain
threshold (_IOU_THRESHOLD).
Args:
scoremap: numpy.ndarray(size=(H, W), dtype=np.float)
image_id: string.
"""
boxes_at_thresholds = compute_bboxes_from_scoremaps(
scoremap=scoremap,
scoremap_threshold_list=self.threshold_list)
multiple_iou = calculate_multiple_iou(
np.array(boxes_at_thresholds),
np.array(self.gt_bboxes[image_id]))
correct_threshold_indices = np.where(multiple_iou.max(1)
>= self._IOU_THRESHOLD)[0]
self.num_correct[correct_threshold_indices] += 1
self.cnt += 1
def compute(self):
"""
Returns:
max_localization_accuracy: float. The ratio of images where the
box prediction is correct. The best scoremap threshold is taken
for the final performance.
"""
localization_accuracies = self.num_correct * 100. / float(self.cnt)
max_localization_accuracy = localization_accuracies.max()
print("MaxBoxAcc on split {}: {}"
.format(self.split, max_localization_accuracy))
return max_localization_accuracy
def load_mask_image(file_path, resize_size):
"""
Args:
file_path: string.
resize_size: tuple of ints (height, width)
Returns:
mask: numpy.ndarray(dtype=numpy.float32, shape=(height, width))
"""
mask = np.float32(cv2.imread(file_path, cv2.IMREAD_GRAYSCALE))
mask = cv2.resize(mask, resize_size, interpolation=cv2.INTER_NEAREST)
return mask
def get_mask(mask_root, mask_paths, ignore_path):
"""
Ignore mask is set as the ignore box region \setminus the ground truth
foreground region.
Args:
mask_root: string.
mask_paths: iterable of strings.
ignore_path: string.
Returns:
mask: numpy.ndarray(size=(224, 224), dtype=np.uint8)
"""
mask_all_instances = []
for mask_path in mask_paths:
mask_file = os.path.join(mask_root, mask_path)
mask = load_mask_image(mask_file, (_RESIZE_LENGTH, _RESIZE_LENGTH))
mask_all_instances.append(mask > 0.5)
mask_all_instances = np.stack(mask_all_instances, axis=0).any(axis=0)
ignore_file = os.path.join(mask_root, ignore_path)
ignore_box_mask = load_mask_image(ignore_file,
(_RESIZE_LENGTH, _RESIZE_LENGTH))
ignore_box_mask = ignore_box_mask > 0.5
ignore_mask = np.logical_and(ignore_box_mask,
np.logical_not(mask_all_instances))
if np.logical_and(ignore_mask, mask_all_instances).any():
raise RuntimeError("Ignore and foreground masks intersect.")
return (mask_all_instances.astype(np.uint8) +
255 * ignore_mask.astype(np.uint8))
class MaskEvaluator(LocalizationEvaluator):
def __init__(self, **kwargs):
super(MaskEvaluator, self).__init__(**kwargs)
if self.dataset_name != "OpenImages":
raise ValueError("Mask evaluation must be performed on OpenImages.")
self.mask_paths, self.ignore_paths = get_mask_paths(self.metadata)
# threshold_list is given as [0, bw, 2bw, ..., 1-bw]
# Set bins as [0, bw), [bw, 2bw), ..., [1-bw, 1), [1, 2), [2, 3)
self.num_bins = len(self.threshold_list) + 2
self.threshold_list_right_edge = np.append(self.threshold_list,
[1.0, 2.0, 3.0])
self.gt_true_score_hist = np.zeros(self.num_bins, dtype=np.float)
self.gt_false_score_hist = np.zeros(self.num_bins, dtype=np.float)
def accumulate(self, scoremap, image_id):
"""
Score histograms over the score map values at GT positive and negative
pixels are computed.
Args:
scoremap: numpy.ndarray(size=(H, W), dtype=np.float)
image_id: string.
"""
check_scoremap_validity(scoremap)
gt_mask = get_mask(self.mask_root,
self.mask_paths[image_id],
self.ignore_paths[image_id])
gt_true_scores = scoremap[gt_mask == 1]
gt_false_scores = scoremap[gt_mask == 0]
# histograms in ascending order
gt_true_hist, _ = np.histogram(gt_true_scores,
bins=self.threshold_list_right_edge)
self.gt_true_score_hist += gt_true_hist.astype(np.float)
gt_false_hist, _ = np.histogram(gt_false_scores,
bins=self.threshold_list_right_edge)
self.gt_false_score_hist += gt_false_hist.astype(np.float)
def compute(self):
"""
Arrays are arranged in the following convention (bin edges):
gt_true_score_hist: [0.0, eps), ..., [1.0, 2.0), [2.0, 3.0)
gt_false_score_hist: [0.0, eps), ..., [1.0, 2.0), [2.0, 3.0)
tp, fn, tn, fp: >=2.0, >=1.0, ..., >=0.0
Returns:
auc: float. The area-under-curve of the precision-recall curve.
Also known as average precision (AP).
"""
num_gt_true = self.gt_true_score_hist.sum()
tp = self.gt_true_score_hist[::-1].cumsum()
fn = num_gt_true - tp
num_gt_false = self.gt_false_score_hist.sum()
fp = self.gt_false_score_hist[::-1].cumsum()
tn = num_gt_false - fp
if ((tp + fn) <= 0).all():
raise RuntimeError("No positive ground truth in the eval set.")
if ((tp + fp) <= 0).all():
raise RuntimeError("No positive prediction in the eval set.")
non_zero_indices = (tp + fp) != 0
precision = tp / (tp + fp)
recall = tp / (tp + fn)
auc = (precision[1:] * np.diff(recall))[non_zero_indices[1:]].sum()
auc *= 100
print("Mask AUC on split {}: {}".format(self.split, auc))
return auc
def _get_cam_loader(image_ids, scoremap_path):
return torchdata.DataLoader(
CamDataset(scoremap_path, image_ids),
batch_size=128,
shuffle=False,
num_workers=4,
pin_memory=True)
def evaluate_wsol(scoremap_root, metadata_root, mask_root, dataset_name, split,
cam_curve_interval=.001):
"""
Compute WSOL performances of predicted heatmaps against ground truth
boxes (CUB, ILSVRC) or masks (OpenImages). For boxes, we compute the
gt-known box accuracy (IoU>=0.5) at the optimal heatmap threshold.
For masks, we compute the area-under-curve of the pixel-wise precision-
recall curve.
Args:
scoremap_root: string. Score maps for each eval image are saved under
the output_path, with the name corresponding to their image_ids.
For example, the heatmap for the image "123/456.JPEG" is expected
to be located at "{output_path}/123/456.npy".
The heatmaps must be numpy arrays of type np.float, with 2
dimensions corresponding to height and width. The height and width
must be identical to those of the original image. The heatmap values
must be in the [0, 1] range. The map must attain values 0.0 and 1.0.
See check_scoremap_validity() in util.py for the exact requirements.
metadata_root: string.
mask_root: string.
dataset_name: string. Supports [CUB, ILSVRC, and OpenImages].
split: string. Supports [train, val, test].
cam_curve_interval: float. Default 0.001. At which threshold intervals
will the heatmaps be evaluated?
Returns:
performance: float. For CUB and ILSVRC, maxboxacc is returned.
For OpenImages, area-under-curve of the precision-recall curve
is returned.
"""
print("Loading and evaluating cams.")
metadata = configure_metadata(metadata_root)
image_ids = get_image_ids(metadata)
threshold_list = list(np.arange(0, 1, cam_curve_interval))
evaluator = {"OpenImages": MaskEvaluator,
"CUB": BoxEvaluator,
"ILSVRC": BoxEvaluator
}[dataset_name](metadata=metadata,
dataset_name=dataset_name,
split=split,
threshold_list=threshold_list,
mask_root=mask_root)
cam_loader = _get_cam_loader(image_ids, scoremap_root)
for cams, image_ids in cam_loader:
for cam, image_id in zip(cams, image_ids):
evaluator.accumulate(t2n(cam), image_id)
performance = evaluator.compute()
return performance
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--scoremap_root', type=str,
default='train_log/scoremaps/',
help="The root folder for score maps to be evaluated.")
parser.add_argument('--metadata_root', type=str, default='metadata/',
help="Root folder of metadata.")
parser.add_argument('--mask_root', type=str, default='dataset/',
help="Root folder of masks (OpenImages).")
parser.add_argument('--dataset_name', type=str,
help="One of [CUB, ImageNet, OpenImages].")
parser.add_argument('--split', type=str,
help="One of [val, test]. They correspond to "
"train-fullsup and test, respectively.")
parser.add_argument('--cam_curve_interval', type=float, default=0.01,
help="At which threshold intervals will the score maps "
"be evaluated?.")
args = parser.parse_args()
evaluate_wsol(scoremap_root=args.scoremap_root,
metadata_root=args.metadata_root,
mask_root=args.mask_root,
dataset_name=args.dataset_name,
split=args.split,
cam_curve_interval=args.cam_curve_interval)
if __name__ == "__main__":
main()
| 37.768908
| 80
| 0.634498
|
78dea3cf77dd847188c81be2021dae27d371116a
| 44
|
py
|
Python
|
models/__init__.py
|
lclbm/xrx_pvp_server
|
f40cdbdc5de8c04b8113611c3174c84a200402b1
|
[
"MIT"
] | 1
|
2022-03-31T06:27:03.000Z
|
2022-03-31T06:27:03.000Z
|
models/__init__.py
|
lclbm/xrx_pvp_server
|
f40cdbdc5de8c04b8113611c3174c84a200402b1
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
lclbm/xrx_pvp_server
|
f40cdbdc5de8c04b8113611c3174c84a200402b1
|
[
"MIT"
] | 1
|
2022-03-10T07:39:24.000Z
|
2022-03-10T07:39:24.000Z
|
from .models import PlayerInfo, ActivityInfo
| 44
| 44
| 0.863636
|
e96b5495e2ad22243fe261fefa6316e478fc003d
| 1,337
|
py
|
Python
|
flavio/physics/betadecays/common.py
|
AlexandreCarvunis/flavio
|
c21936a5ff004283b08cab543e2c880e35526bb6
|
[
"MIT"
] | 61
|
2016-03-09T16:19:39.000Z
|
2022-03-30T00:55:51.000Z
|
flavio/physics/betadecays/common.py
|
AlexandreCarvunis/flavio
|
c21936a5ff004283b08cab543e2c880e35526bb6
|
[
"MIT"
] | 167
|
2016-03-15T15:25:57.000Z
|
2022-02-27T22:19:22.000Z
|
flavio/physics/betadecays/common.py
|
AlexandreCarvunis/flavio
|
c21936a5ff004283b08cab543e2c880e35526bb6
|
[
"MIT"
] | 57
|
2016-03-15T14:24:23.000Z
|
2022-01-14T01:00:03.000Z
|
"""Common functions for beta decays."""
import flavio
from flavio.physics.edms.common import proton_charges
from flavio.physics.bdecays.wilsoncoefficients import get_wceff_fccc_std, get_CVLSM
from math import sqrt
def wc_eff(par, wc_obj, scale, nu):
r"""Lee-Yang effective couplings.
See eqS. (2), (9) of arXiv:1803.08732."""
flavio.citations.register("Gonzalez-Alonso:2018omy")
# wilson coefficients
wc = get_wceff_fccc_std(wc_obj, par, 'du', 'e', nu, None, scale, nf=3)
# proton charges
g = proton_charges(par, scale)
gV = g['gV_u-d']
gA = g['gA_u-d']
gS = g['gS_u-d']
gP = g['gP_u-d']
gT = g['gT_u-d']
# radiative corrections
# Note: CVLSM is the universal Marciano-Sirlin result that needs to be
# divided out since it's already contained in the Deltas
CVLSM = get_CVLSM(par, scale, nf=3)
DeltaRV = par['DeltaRV']
DeltaRA = DeltaRV # not needed for superallowed, for neutron difference absorbed in lambda
rV = sqrt(1 + DeltaRV) / CVLSM
rA = sqrt(1 + DeltaRA) / CVLSM
# effective couplings
# note that C_i' = C_i
C = {}
C['V'] = gV * (wc['VL'] * rV + wc['VR'])
C['A'] = -gA * (wc['VL'] * rA - wc['VR'])
C['S'] = gS * (wc['SL'] + wc['SR'])
C['P'] = gP * (wc['SL'] - wc['SR'])
C['T'] = 4 * gT * (wc['T'])
return C
| 33.425
| 95
| 0.608826
|
2ae67bf34529e2821d3d090b120577d94a39235f
| 228
|
py
|
Python
|
readthedocs/organizations/constants.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 4,054
|
2015-01-01T00:58:07.000Z
|
2019-06-28T05:50:49.000Z
|
readthedocs/organizations/constants.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 4,282
|
2015-01-01T21:38:49.000Z
|
2019-06-28T15:41:00.000Z
|
readthedocs/organizations/constants.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 3,224
|
2015-01-01T07:38:45.000Z
|
2019-06-28T09:19:10.000Z
|
"""Constants for ACL."""
from django.utils.translation import gettext_lazy as _
READ_ONLY_ACCESS = 'readonly'
ADMIN_ACCESS = 'admin'
ACCESS_LEVELS = (
(READ_ONLY_ACCESS, _('Read-only')),
(ADMIN_ACCESS, _('Admin')),
)
| 19
| 54
| 0.697368
|
96a12b45102d112891ff494d60d5d99b9972df2e
| 1,921
|
py
|
Python
|
bromelia/config.py
|
post-cyberlabs/bromelia
|
a21c6af591fa3ebe5335f2f34b13b60f03a48b55
|
[
"MIT"
] | null | null | null |
bromelia/config.py
|
post-cyberlabs/bromelia
|
a21c6af591fa3ebe5335f2f34b13b60f03a48b55
|
[
"MIT"
] | null | null | null |
bromelia/config.py
|
post-cyberlabs/bromelia
|
a21c6af591fa3ebe5335f2f34b13b60f03a48b55
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
bromelia.config
~~~~~~~~~~~~~~~
This module contains configuration structures.
:copyright: (c) 2020-present Henrique Marques Ribeiro.
:license: MIT, see LICENSE for more details.
"""
import logging
import os
BASEDIR = os.getcwd()
#: Configs for statemachine.py module
STATE_MACHINE_TICKER = 0.0001
CLOSED = "Closed"
WAIT_CONN_ACK = "Wait-Conn-Ack"
WAIT_I_CEA = "Wait-I-CEA"
WAIT_CONN_ACK_ELECT = "Wait-Conn-Ack/Elect"
WAIT_RETURNS = "Wait-Returns"
I_OPEN = "I-Open"
R_OPEN = "R-Open"
OPEN = "Open"
CLOSING = "Closing"
#: Configs for setup.py module
SEND_BUFFER_MAXIMUM_SIZE = 4096*64
LISTENING_TICKER = 0.01
WAITING_CONN_TIMER = 2
SLEEP_TIMER = 4
#: Configs for bromelia.py module
BROMELIA_TICKER = STATE_MACHINE_TICKER
BROMELIA_LOADING_TICKER = 0.1
SEND_THRESHOLD_TICKER = 0.05
PROCESS_TIMER = 0.001
REQUEST_THRESHOLD = 10
ANSWER_THRESHOLD = 10
SEND_THRESHOLD = 30
#: Configs for transport.py module
TRACKING_SOCKET_EVENTS_TIMEOUT = 1
class Config(dict):
def __init__(self, defaults=None):
dict.__init__(self, defaults or {})
class DiameterLogging(object):
LOGGING_FORMAT = "%(asctime)s [%(levelname)s] [%(process)d] "\
"[%(thread)d:%(threadName)s] %(module)s [%(name)s] "\
"[%(funcName)s]: %(message)s"
LOGGING_DATE_FMT = "%Y-%m-%d %H:%M:%S,uuu"
LOGGING_PATH = os.path.join(BASEDIR, f"dsa_{os.getpid()}.log")
def __init__(self, debug=False, is_logging=False):
if debug:
DiameterLogging.LOGGING_LEVEL = logging.DEBUG
else:
DiameterLogging.LOGGING_LEVEL = logging.INFO
if is_logging:
logging.basicConfig(level=DiameterLogging.LOGGING_LEVEL,
format=DiameterLogging.LOGGING_FORMAT,
filename=DiameterLogging.LOGGING_PATH,
filemode="a")
| 26.315068
| 73
| 0.652264
|
5b19e14479887bd909674bb8978293a72e0023b8
| 4,093
|
py
|
Python
|
synthdnm/swap.py
|
james-guevara/synthdnm
|
8510cfd91438452da553d35894b63c5d75cdd47e
|
[
"MIT"
] | 6
|
2021-02-22T08:29:49.000Z
|
2021-11-26T21:24:49.000Z
|
build/lib/synthdnm/swap.py
|
james-guevara/synthdnm
|
8510cfd91438452da553d35894b63c5d75cdd47e
|
[
"MIT"
] | 1
|
2021-10-04T19:22:34.000Z
|
2021-11-16T21:22:12.000Z
|
synthdnm/swap.py
|
james-guevara/synthdnm
|
8510cfd91438452da553d35894b63c5d75cdd47e
|
[
"MIT"
] | 1
|
2020-11-06T18:57:57.000Z
|
2020-11-06T18:57:57.000Z
|
from collections import OrderedDict
# Skip families that don't have both parents
def skip_families(ped_filename):
num_parents = {}
f = open(ped_filename,"r")
for line in f:
linesplit = line.rstrip().split("\t")
fid, iid, iid_father, iid_mother, sex = linesplit[0],linesplit[1],linesplit[2],linesplit[3],linesplit[4]
if iid_father == "0" and iid_mother == "0": # this means it's one of the parents
if fid not in num_parents: num_parents[fid] = 1
else: num_parents[fid] += 1
return num_parents
# fam/ped file: FID, IID, IID_Father, IID_Mother, Sex, Phenotype
def make_family_ordered_dict(ped_filename, num_parents):
fids_od = OrderedDict()
f = open(ped_filename,"r")
for line in f:
linesplit = line.rstrip().split("\t")
fid, iid, iid_father, iid_mother, sex = linesplit[0],linesplit[1],linesplit[2],linesplit[3],linesplit[4]
if iid_father == "0" and iid_mother == "0": # this means it's one of the parents
if num_parents[fid] != 2: continue # don't use this family if there aren't 2 parents
if fid not in fids_od: fids_od[fid] = [None]*2
if sex == "1": # male (father)
fids_od[fid][0] = iid
elif sex == "2": # female (mother)
fids_od[fid][1] = iid
return fids_od
def get_keys(fids_od):
fid1_key = None
fid1_val = None
fid2_key = None
fid2_val = None
swapped_parents = []
new_families_set = set() # the set of families used in the swap
new_families_dict = {}
for i, (key,val) in enumerate(fids_od.items()):
if not fid1_key: # if fid1_key is None, make this family fid1
fid1_key = key
fid1_val = val
continue
elif not fid2_key: # if fid2_key is None, make this family fid2
fid2_key = key
fid2_val = val
if fid1_key and fid2_key: # swap the parents of these 2 families and then makes the keys none
swapped_parents.append("{}\t{}\t{}\t{}\t{}\t{}".format(fid1_key,fid2_val[0], "0", "0", "1", "0"))
swapped_parents.append("{}\t{}\t{}\t{}\t{}\t{}".format(fid1_key,fid2_val[1], "0", "0", "2", "0"))
swapped_parents.append("{}\t{}\t{}\t{}\t{}\t{}".format(fid2_key,fid1_val[0], "0", "0", "1", "0"))
swapped_parents.append("{}\t{}\t{}\t{}\t{}\t{}".format(fid2_key,fid1_val[1], "0", "0", "2", "0"))
new_families_set.add(fid1_key)
new_families_set.add(fid2_key)
new_families_dict[fid1_key] = fid2_val
new_families_dict[fid2_key] = fid1_val
fid1_key = None
fid2_key = None
return swapped_parents, new_families_set,new_families_dict
def print_new_ped(ped_filename, swapped_parents,new_families_set, new_families_dict,fout):
f = open(ped_filename,"r")
for line in f:
linesplit = line.rstrip().split("\t")
fid, iid, iid_father, iid_mother, sex, phen = linesplit[0],linesplit[1],linesplit[2],linesplit[3],linesplit[4],linesplit[5]
if fid not in new_families_set: continue
if not (iid_father == "0" and iid_mother == "0"): # this means it's *not* one of the parents
iid_father_new, iid_mother_new = new_families_dict[fid][0], new_families_dict[fid][1]
print("{}\t{}\t{}\t{}\t{}\t{}".format(fid,iid,iid_father_new,iid_mother_new,sex,phen), file=fout)
for elem in swapped_parents:
print(elem, file=fout)
def swap_ped(ped_filename):
num_parents = skip_families(ped_filename)
fids_od = make_family_ordered_dict(ped_filename, num_parents)
swapped_parents,new_families_set,new_families_dict = get_keys(fids_od)
from pathlib import Path
ped_stem = Path(ped_filename).stem
ped_parent = str(Path(ped_filename).parent) + "/"
ped_swapped_absolute_path = ped_parent + ped_stem + ".swapped.ped"
fout = open(ped_swapped_absolute_path,"w")
print_new_ped(ped_filename,swapped_parents,new_families_set, new_families_dict,fout)
fout.close()
return ped_swapped_absolute_path
| 45.477778
| 131
| 0.637185
|
6b2bcc2500649cd053016b5daedc16848742ebc0
| 3,115
|
py
|
Python
|
__data3__analyzeImbalanceOfData.py
|
mtkier94/Surrender_Analysis
|
a346099b39f2063e3ceb88c125d754a94cfdd5fa
|
[
"MIT"
] | null | null | null |
__data3__analyzeImbalanceOfData.py
|
mtkier94/Surrender_Analysis
|
a346099b39f2063e3ceb88c125d754a94cfdd5fa
|
[
"MIT"
] | null | null | null |
__data3__analyzeImbalanceOfData.py
|
mtkier94/Surrender_Analysis
|
a346099b39f2063e3ceb88c125d754a94cfdd5fa
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import pickle5 as pickle
import os
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from functions.sub_surrender_profiles import get_risk_drivers
from global_vars import path_tables, getDataPath
def reviewDataBalance(surrender_profile):
path_data = getDataPath(surrender_profile)
# import Training and Test Data
X_train = pd.read_csv(os.path.join(path_data,r'X_train.csv'), index_col= 0)
X_test = pd.read_csv(os.path.join(path_data, r'X_test.csv'), index_col= 0)
y_train = pd.read_csv(os.path.join(path_data, r'y_train.csv'), index_col= 0).values.flatten()
y_test = pd.read_csv(os.path.join(path_data, r'y_test.csv'), index_col= 0 ).values.flatten()
# restrict data to relevant features -> assume proper exploratory data analysis
cache = get_risk_drivers(profile=surrender_profile)
features_profile_lst = []
for el in cache:
if el != 'Premium_freq':
features_profile_lst.append(el)
else:
features_profile_lst.append('Premium_freq_0')
features_profile_lst.append('Premium_freq_1')
X_train, X_test = X_train[[el for el in features_profile_lst]], X_test[[el for el in features_profile_lst]]
# Load Scaling range used in 'Lapse_data_preparation' for later visualization
with open(os.path.join(path_data,'dict_range_scale_{}.pkl'.format(surrender_profile)), 'rb') as f:
dict_range_scale = pickle.load(f)
# Load beta0 of latent surrender model for later visualization
with open(os.path.join(path_data,r'beta0.pkl'), 'rb') as f:
beta0 = pickle.load(f)
# RUS resampling
_, y_train_rus = RandomUnderSampler(sampling_strategy= 'majority').fit_resample(X_train,y_train)
# SMOTE resampling
_, y_train_smote = SMOTE().fit_resample(X_train,y_train)
# analyze balance of data
dict_data = {'numb_train': len(y_train), 'imb_train': np.round_(sum(y_train)/len(y_train),4), 'numb_rus': len(y_train_rus), 'numb_smote': len(y_train_smote), 'numb_test': len(y_test), 'imb_test': np.round_(sum(y_test)/len(y_test),4)}
# save result for profile i
with open(os.path.join(path_tables, r'{}_DataCounts.pkl'.format(surrender_profile)), 'wb') as f:
pickle.dump(dict_data, f, pickle.HIGHEST_PROTOCOL)
print('Profile ', surrender_profile, ' analyzed.')
def createSummaryOfDataBalance():
data = {}
for i in range(4):
if os.path.exists(os.path.join(path_tables, r'{}_DataCounts.pkl'.format(i))):
with open(os.path.join(path_tables, r'{}_DataCounts.pkl'.format(i)), 'rb') as f:
data[i] = pickle.load(f)
df_data = pd.DataFrame.from_dict(data, orient = 'index')
print(df_data)
with open(os.path.join(path_tables,r'Data_review.tex'),'w') as f:
f.write(df_data.to_latex())
if __name__ == '__main__':
for i in [0,1,2,3]:
# analyze data of profile i
reviewDataBalance(i)
# create LaTeX-table for data-balance of all four surrender-profiles
createSummaryOfDataBalance()
| 39.935897
| 237
| 0.701445
|
356f7172ee3c7f6d2033af268bf78182bc349c59
| 495
|
py
|
Python
|
dodo.py
|
pmav99/geoviews
|
ad6e3f028b0532fdf604a92ffca3238c2bd8ef4f
|
[
"BSD-3-Clause"
] | 172
|
2019-11-18T17:30:53.000Z
|
2022-03-31T21:36:41.000Z
|
dodo.py
|
martinfleis/geoviews
|
30abec5741f5173e746630f7b9e4fc6f7adfa102
|
[
"BSD-3-Clause"
] | 166
|
2019-11-14T05:21:03.000Z
|
2022-03-07T17:20:00.000Z
|
dodo.py
|
martinfleis/geoviews
|
30abec5741f5173e746630f7b9e4fc6f7adfa102
|
[
"BSD-3-Clause"
] | 24
|
2019-11-29T06:20:31.000Z
|
2021-12-23T00:58:06.000Z
|
import os
if "PYCTDEV_ECOSYSTEM" not in os.environ:
os.environ["PYCTDEV_ECOSYSTEM"] = "conda"
from pyctdev import * # noqa: api
def task_pip_on_conda():
"""Experimental: provide pip build env via conda"""
return {'actions':[
# some ecosystem=pip build tools must be installed with conda when using conda...
'conda install -y pip twine wheel',
# ..and some are only available via conda-forge
'conda install -y -c conda-forge tox virtualenv'
]}
| 30.9375
| 89
| 0.666667
|
319fd8bf5e84b63937cfa642985a647930a190e3
| 519
|
py
|
Python
|
stix_shifter/stix_transmission/src/modules/base/base_ping.py
|
kant/stix-shifter
|
164ea13c4fc34815df786897c8d882dcdc499680
|
[
"Apache-2.0"
] | 1
|
2020-01-26T04:07:55.000Z
|
2020-01-26T04:07:55.000Z
|
stix_shifter/stix_transmission/src/modules/base/base_ping.py
|
kant/stix-shifter
|
164ea13c4fc34815df786897c8d882dcdc499680
|
[
"Apache-2.0"
] | null | null | null |
stix_shifter/stix_transmission/src/modules/base/base_ping.py
|
kant/stix-shifter
|
164ea13c4fc34815df786897c8d882dcdc499680
|
[
"Apache-2.0"
] | 2
|
2019-06-26T19:23:52.000Z
|
2019-07-09T15:33:16.000Z
|
from abc import ABCMeta, abstractmethod
class BasePing(object, metaclass=ABCMeta):
@abstractmethod
def ping(self):
"""
Sends a basic request to the datasource to confirm we are connected and authenticated
Args:
search_id (str): The datasource query ID.
Returns:
dict: The return value.
keys:
success (bool): True or False
error (str): error message (when success=False)
"""
pass
| 25.95
| 93
| 0.564547
|
3aba0ecc6ef9b0eb71fdc028c54bc38199718570
| 764
|
py
|
Python
|
test/test_issue1160.py
|
donbowman/rdflib
|
c1be731c8e6bbe997cc3f25890bbaf685499c517
|
[
"BSD-3-Clause"
] | 1,424
|
2015-01-04T13:10:22.000Z
|
2022-03-29T15:12:38.000Z
|
test/test_issue1160.py
|
donbowman/rdflib
|
c1be731c8e6bbe997cc3f25890bbaf685499c517
|
[
"BSD-3-Clause"
] | 1,148
|
2015-01-01T18:26:18.000Z
|
2022-03-31T21:51:53.000Z
|
test/test_issue1160.py
|
sa-bpelakh/rdflib
|
42d0ca9af7f3c75a463423444aa42a1b60cfabc8
|
[
"BSD-3-Clause"
] | 459
|
2015-01-03T14:41:34.000Z
|
2022-03-14T22:06:47.000Z
|
import unittest
from unittest import mock
import rdflib
from rdflib import ConjunctiveGraph
from rdflib.parser import URLInputSource
QUERY = """
SELECT DISTINCT ?g
FROM NAMED <http://ns.example.com/named#>
WHERE {
GRAPH ?g {
?s ?p ?o .
}
}
"""
class NamedGraphWithFragmentTest(unittest.TestCase):
def test_named_graph_with_fragment(self):
"""Test that fragment part of the URL is not erased."""
graph = ConjunctiveGraph()
with mock.patch("rdflib.parser.URLInputSource") as load_mock:
# We have to expect an exception here.
self.assertRaises(Exception, graph.query, QUERY)
load_mock.assert_called_with(
rdflib.URIRef("http://ns.example.com/named#"),
"nt",
)
| 23.151515
| 69
| 0.660995
|
c0b6a1e35596489d36fd0dc555843cfd9d5fabb4
| 28,832
|
py
|
Python
|
asset/views.py
|
1049759078/autoops
|
c3618a8c931a16573df7fe422333ff5d151acf85
|
[
"Apache-2.0"
] | null | null | null |
asset/views.py
|
1049759078/autoops
|
c3618a8c931a16573df7fe422333ff5d151acf85
|
[
"Apache-2.0"
] | null | null | null |
asset/views.py
|
1049759078/autoops
|
c3618a8c931a16573df7fe422333ff5d151acf85
|
[
"Apache-2.0"
] | 1
|
2018-09-18T09:01:42.000Z
|
2018-09-18T09:01:42.000Z
|
from django.shortcuts import render, redirect, HttpResponse, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from asset.models import asset, system_users, performance, web_history, data_centers
from .form import AssetForm, SystemUserForm
from names.password_crypt import encrypt_p, decrypt_p, pyecharts_add
from django.contrib.auth.models import User, Group
from guardian.shortcuts import assign_perm, get_perms
from guardian.core import ObjectPermissionChecker
from guardian.decorators import permission_required_or_403
from guardian.shortcuts import get_objects_for_user, get_objects_for_group
from django.contrib.auth.models import Permission
from guardian.models import UserObjectPermission, GroupObjectPermission
from django.views.generic import TemplateView, ListView, View, CreateView, UpdateView, DeleteView, DetailView
from django.urls import reverse_lazy
from tasks.views import ssh
from autoops import settings
from django.db.models import Q
import xlwt, time, json
from django.template import loader
from pyecharts import Gauge, Line
import threading, time, datetime
from tasks.ansible_2420.runner import AdHocRunner, CommandRunner
from tasks.ansible_2420.inventory import BaseInventory
class AssetListAll(TemplateView):
template_name = 'asset/asset.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(AssetListAll, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = {
"asset_active": "active",
"asset_list_active": "active",
"Webssh": getattr(settings, 'Webssh_ip'),
"Webssh_port": getattr(settings, 'Webssh_port'),
'asset_list': get_objects_for_user(self.request.user, 'asset.read_asset')
}
kwargs.update(context)
return super(AssetListAll, self).get_context_data(**kwargs)
def post(self, request):
query = request.POST.get("name")
user = User.objects.get(username=request.user)
if user.is_superuser == 1:
ret = asset.objects.filter(Q(network_ip=query) | Q(manage_ip=query) | Q(hostname=query) | Q(
inner_ip=query) | Q(model=query) | Q(
eth0=query) | Q(eth1=query) | Q(eth2=query) | Q(eth3=query) |
Q(system=query) | Q(system_user__username=query) | Q(
data_center__data_center_list=query) | Q(
cabinet=query) |
Q(position=query) | Q(sn=query)
| Q(uplink_port=query) | Q(product_line__name=query)
)
else:
product1 = Group.objects.get(user=user)
ret = asset.objects.filter(Q(product_line__name=product1) & Q(network_ip=query) | Q(manage_ip=query) | Q(hostname=query) | Q( inner_ip=query) | Q(model=query) | Q(eth0=query) | Q(eth1=query) | Q(eth2=query) | Q(eth3=query) |
Q(system=query) | Q(system_user__username=query)
| Q(data_center__data_center_list=query) | Q(cabinet=query) | Q(position=query) | Q(sn=query)| Q(uplink_port=query))
return render(request, 'asset/asset.html',
{"Webssh": getattr(settings, 'Webssh_ip'),
"Webssh_port": getattr(settings, 'Webssh_port'),
"asset_active": "active",
"asset_list_active": "active", "asset_list": ret})
class AssetAdd(CreateView):
model = asset
form_class = AssetForm
template_name = 'asset/asset-add.html'
success_url = reverse_lazy('asset:asset_list')
@method_decorator(login_required)
@method_decorator(permission_required_or_403('asset.add_asset'))
def dispatch(self, *args, **kwargs):
return super(AssetAdd, self).dispatch(*args, **kwargs)
def form_valid(self, form):
self.asset_save = asset_save = form.save()
myproduct = form.cleaned_data['product_line']
mygroup = Group.objects.get(name=myproduct)
GroupObjectPermission.objects.assign_perm("read_asset", mygroup, obj=asset_save)
GroupObjectPermission.objects.assign_perm("add_asset", mygroup, obj=asset_save, )
GroupObjectPermission.objects.assign_perm("change_asset", mygroup, obj=asset_save)
GroupObjectPermission.objects.assign_perm("delete_asset", mygroup, obj=asset_save)
GroupObjectPermission.objects.assign_perm("task_asset", mygroup, obj=asset_save)
return super(AssetAdd, self).form_valid(form)
def get_success_url(self):
return super(AssetAdd, self).get_success_url()
def get_context_data(self, **kwargs):
context = {
"asset_active": "active",
"asset_list_active": "active",
}
kwargs.update(context)
return super(AssetAdd, self).get_context_data(**kwargs)
class AssetUpdate(UpdateView):
model = asset
form_class = AssetForm
template_name = 'asset/asset-update.html'
success_url = reverse_lazy('asset:asset_list')
@method_decorator(login_required)
@method_decorator(permission_required_or_403('asset.add_asset', (asset, 'id', 'pk')))
def dispatch(self, *args, **kwargs):
return super(AssetUpdate, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = {
"asset_active": "active",
"asset_list_active": "active",
}
kwargs.update(context)
return super(AssetUpdate, self).get_context_data(**kwargs)
def form_invalid(self, form):
print(form.errors)
return super(AssetUpdate, self).form_invalid(form)
def form_valid(self, form):
pk = self.kwargs.get(self.pk_url_kwarg, None)
old_myproduct = asset.objects.get(id=pk).product_line
old_mygroup = Group.objects.get(name=old_myproduct)
new_mygroup = Group.objects.get(name=form.cleaned_data['product_line'])
self.object = form.save()
if old_mygroup != new_mygroup:
GroupObjectPermission.objects.remove_perm("read_asset", old_mygroup, obj=self.object)
GroupObjectPermission.objects.remove_perm("add_asset", old_mygroup, obj=self.object)
GroupObjectPermission.objects.remove_perm("change_asset", old_mygroup, obj=self.object)
GroupObjectPermission.objects.remove_perm("delete_asset", old_mygroup, obj=self.object)
GroupObjectPermission.objects.remove_perm("task_asset", old_mygroup, obj=self.object)
GroupObjectPermission.objects.assign_perm("read_asset", new_mygroup, obj=self.object)
GroupObjectPermission.objects.assign_perm("add_asset", new_mygroup, obj=self.object)
GroupObjectPermission.objects.assign_perm("change_asset", new_mygroup, obj=self.object)
GroupObjectPermission.objects.assign_perm("delete_asset", new_mygroup, obj=self.object)
GroupObjectPermission.objects.assign_perm("task_asset", new_mygroup, obj=self.object)
return super(AssetUpdate, self).form_valid(form)
def get_success_url(self):
return super(AssetUpdate, self).get_success_url()
class AssetDetail(DetailView):
model = asset
template_name = 'asset/asset-detail.html'
@method_decorator(login_required)
@method_decorator(permission_required_or_403('asset.read_asset', (asset, 'id', 'pk')))
def dispatch(self, *args, **kwargs):
return super(AssetDetail, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs,):
pk = self.kwargs.get(self.pk_url_kwarg, None)
detail = asset.objects.get(id=pk)
context = {
"asset_active": "active",
"asset_list_active": "active",
"assets": detail,
"nid": pk,
}
kwargs.update(context)
return super(AssetDetail, self).get_context_data(**kwargs)
class AssetDel(View):
model = asset
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(AssetDel, self).dispatch(*args, **kwargs)
def post(self, request):
ret = {'status': True, 'error': None, }
try:
id = request.POST.get('nid', None)
user = User.objects.get(username=request.user)
checker = ObjectPermissionChecker(user)
assets = asset.objects.get(id=id)
if checker.has_perm('delete_asset', assets, ) == True:
assets.delete()
except Exception as e:
ret = {
"static": False,
"error": '删除请求错误,没有权限{}'.format(e)
}
finally:
return HttpResponse(json.dumps(ret))
class AssetAllDel(View):
model = asset
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(AssetAllDel, self).dispatch(*args, **kwargs)
def post(self, request):
ret = {'status': True, 'error': None, }
try:
ids = request.POST.getlist('id', None)
ids1 = []
for i in ids:
user = User.objects.get(username=request.user)
checker = ObjectPermissionChecker(user)
assets = asset.objects.get(id=i)
if checker.has_perm('delete_asset', assets, ) == True:
ids1.append(i)
idstring = ','.join(ids1)
asset.objects.extra(where=['id IN (' + idstring + ')']).delete()
except Exception as e:
ret['status'] = False
ret['error'] = '删除请求错误,没有权限{}'.format(e)
finally:
return HttpResponse(json.dumps(ret))
@login_required(login_url="/login.html")
def asset_hardware_update(request):
ret = {'status': True, 'error': None, 'data': None}
if request.method == 'POST':
try:
id = request.POST.get('nid', None)
obj = asset.objects.get(id=id)
ip = obj.network_ip
port = obj.port
username = obj.system_user.username
password1 = obj.system_user.password
password = decrypt_p(password1)
assets = [
{
"hostname": 'host',
"ip": ip,
"port": port,
"username": username,
"password": password,
},
]
inventory = BaseInventory(assets)
runner = AdHocRunner(inventory)
tasks = [
{"action": {"module": "setup", "args": ""}, "name": "setup"},
]
result = runner.run(tasks, "all")
data = result.results_raw['ok']['host']['setup']['ansible_facts']
hostname = data['ansible_nodename']
system = data['ansible_distribution'] + " " + data['ansible_distribution_version']
try:
a2 = "parted -l | grep \"Disk \/dev\/[a-z]d\" | awk -F\"[ ]\" '{print $3}' | awk -F\"GB\" '{print $1}'"
s = ssh(ip=ip, port=port, username=username, password=password, cmd=a2)
disk1 = s['data']
disk2 = disk1.rstrip().split("\n")
disk = "+".join(map(str, disk2)) + " 共计:{} GB".format(round(sum(map(float, disk2))))
except Exception as e:
disk = " 共计{}".format(str(sum([int(data["ansible_devices"][i]["sectors"]) * \
int(data["ansible_devices"][i]["sectorsize"]) / 1024 / 1024 / 1024 \
for i in data["ansible_devices"] if
i[0:2] in ("vd", "ss", "sd")])) + str(" GB"))
try:
a1 = "dmidecode | grep -P -A5 \"Memory\ Device\" | grep Size | grep -v \"No Module Installed\" | grep -v \"0\" | awk -F\":\" \'{print $2}\' | awk -F\" \" \'{print $1}\'"
s = ssh(ip=ip, port=port, username=username, password=password, cmd=a1)
memory1 = s['data']
if memory1 == "":
memory0 = []
memory0.append(int(round((data['ansible_memtotal_mb']) / 1000)))
else:
memory2 = memory1.rstrip().split("\n")
memory0 = []
for i in range(len(memory2)):
memory0.append((int(int(memory2[i]) / 1024)))
memory = "+".join(map(str, memory0)) + ' 共计:{} GB'.format((sum(map(int, memory0))))
except Exception as e:
memory = ' 共计:{} GB'.format(round((data['ansible_memtotal_mb'] / 1000)))
sn = data['ansible_product_serial']
model = data["ansible_system_vendor"] + " " + data['ansible_product_name']
cpu = data['ansible_processor'][1] + " {}核心".format(
data['ansible_processor_count'] * data["ansible_processor_cores"])
try:
a = "ipmitool lan print | grep -w \"IP Address \" | awk -F\":\" \ '{print $2}\'"
s = ssh(ip=ip, port=port, username=username, password=password, cmd=a)
manage = s['data']
except Exception as e:
manage = None
net = data["ansible_interfaces"][1:]
net.sort()
try:
eth0 = data['ansible_{}'.format(net[0])]['macaddress']
except Exception as e:
eth0 = None
try:
eth1 = data['ansible_{}'.format(net[1])]['macaddress']
except Exception as e:
eth1 = None
try:
eth2 = data['ansible_{}'.format(net[2])]['macaddress']
except Exception as e:
eth2 = None
try:
eth3 = data['ansible_{}'.format(net[3])]['macaddress']
except Exception as e:
eth3 = None
ass = asset.objects.filter(id=id).first()
ass.hostname = hostname
ass.manage_ip = manage
ass.system = system
ass.memory = memory
ass.disk = disk
ass.sn = sn
ass.model = model
ass.cpu = cpu
ass.eth0 = eth0
ass.eth1 = eth1
ass.eth2 = eth2
ass.eth3 = eth3
ass.save()
except Exception as e:
ret['status'] = False
ret['error'] = '登陆账号权限不够| 请在被添加的主机安装 parted ipmitool dmidecode | 或者 删除 主服务器/root/.ssh/known_hosts 文件'.format(e)
return HttpResponse(json.dumps(ret))
@login_required(login_url="/login.html")
def asset_web_ssh(request):
if request.method == 'POST':
id = request.POST.get('id', None)
obj = asset.objects.get(id=id)
a = asset.objects.get(id=id)
user = User.objects.get(username=request.user)
checker = ObjectPermissionChecker(user)
ret = {}
try:
if checker.has_perm('task_asset', a) == True:
ip = obj.network_ip
port = obj.port
username = obj.system_user.username
password = obj.system_user.password
ret = {"ip": ip,'port':port,"username": username, 'password': password, "static": True}
login_ip = request.META['REMOTE_ADDR']
web_history.objects.create(user=request.user, ip=login_ip, login_user=obj.system_user.username, host=ip)
except Exception as e:
ret['status'] = False
ret['error'] = '请求错误,{}'.format(e)
finally:
return HttpResponse(json.dumps(ret))
def Gauge_cpumem(attr, data):
bar = Gauge("", height=300)
bar.add("", attr, data)
return bar
def Line_network(d, title, title1, date, network_in, network_put):
bar = Line(d, width=1600, height=500)
bar.add(title, date, network_in,is_datazoom_show=True)
bar.add(title1, date, network_put,is_datazoom_show=True)
return bar
@login_required(login_url="/login.html")
@permission_required_or_403('asset.read_asset', (asset, 'id', 'nid'))
def asset_performance(request, nid):
template = loader.get_template('asset/asset-performance.html')
now = datetime.datetime.now()
last_time = now + datetime.timedelta(days=-7)
all = performance.objects.filter(cdate__gt=last_time)
date, cpu_use, mem_use, in_use, out_use = [], [], [], [], []
for i in all:
if i.server_id == int(nid):
date.append(i.cdate.strftime("%m-%d %H:%M"))
cpu_use.append(i.cpu_use)
mem_use.append(i.mem_use)
in_use.append(i.in_use)
out_use.append(i.out_use)
if cpu_use:
cpu_data = cpu_use[-1]
mem_data = mem_use[-1]
else:
cpu_data = 0
mem_data = 0
cpu = Gauge_cpumem(attr="CPU", data=cpu_data)
mem = Gauge_cpumem(attr="内存", data=mem_data)
network = Line_network(d="kb/s", title="进流量", title1="出流量", date=date, network_in=in_use, network_put=mem_use)
history_cpumem = Line_network(d="%", title="CPU", title1="内存", date=date, network_in=cpu_use, network_put=mem_use)
context = dict(
cpu=pyecharts_add(cpu.render_embed())[0],
mem=pyecharts_add(mem.render_embed())[0],
network=pyecharts_add(network.render_embed())[0],
history_cpumem=pyecharts_add(history_cpumem.render_embed())[0],
script_list=cpu.get_js_dependencies(),
asset_active="active",
asset_list_active="active",
onresize=" <script> window.onresize = function () { %s %s %s %s }; </script>" % (
pyecharts_add(cpu.render_embed())[1], pyecharts_add(mem.render_embed())[1],
pyecharts_add(network.render_embed())[1], pyecharts_add(history_cpumem.render_embed())[1],)
)
return HttpResponse(template.render(context, request))
class SystemUserListAll(TemplateView):
template_name = 'asset/system-user.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SystemUserListAll, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = {
"asset_active": "active",
"system_user_list_active": "active",
'user_list': get_objects_for_user(self.request.user, 'asset.read_system_users')
}
kwargs.update(context)
return super(SystemUserListAll, self).get_context_data(**kwargs)
@login_required(login_url="/login.html")
@permission_required_or_403('add_system_users')
def system_user_add(request):
if request.method == 'POST':
form = SystemUserForm(request.POST)
if form.is_valid():
system_save = form.save()
password1 = encrypt_p(form.cleaned_data['password'])
system_save.password = password1
system_save.save()
myproduct = system_users.objects.get(name=form.cleaned_data['name']).product_line
mygroup = Group.objects.get(name=myproduct)
GroupObjectPermission.objects.assign_perm("read_system_users", mygroup, obj=system_save)
GroupObjectPermission.objects.assign_perm("add_system_users", mygroup, obj=system_save)
GroupObjectPermission.objects.assign_perm("change_system_users", mygroup, obj=system_save)
GroupObjectPermission.objects.assign_perm("delete_system_users", mygroup, obj=system_save)
form = SystemUserForm()
return render(request, 'asset/system-user.html',
{"asset_active": "active",
"system_user_list_active": "active",
'user_list': get_objects_for_user(request.user, 'asset.read_system_users')})
else:
form = SystemUserForm()
return render(request, 'asset/system-user-add.html',
{'form': form, "asset_active": "active", "system_user_list_active": "active", })
@login_required(login_url="/login.html")
@permission_required_or_403('change_system_users', (system_users, 'id', 'nid'))
def system_user_update(request, nid):
system_user = get_object_or_404(system_users, id=nid)
if request.method == 'POST':
form = SystemUserForm(request.POST, instance=system_user)
old_product_line = system_users.objects.get(id=nid).product_line
old_mygroup = Group.objects.get(name=old_product_line)
if form.is_valid():
password = form.cleaned_data['password']
myproduct = form.cleaned_data['product_line']
if password != None:
system_save = form.save()
password1 = encrypt_p(form.cleaned_data['password'])
system_save.password = password1
system_save.save()
if old_product_line != myproduct:
mygroup = Group.objects.get(name=myproduct)
GroupObjectPermission.objects.remove_perm("read_system_users", old_mygroup, obj=system_save)
GroupObjectPermission.objects.assign_perm("add_system_users", old_mygroup, obj=system_save)
GroupObjectPermission.objects.assign_perm("change_system_users", old_mygroup, obj=system_save)
GroupObjectPermission.objects.assign_perm("delete_system_users", old_mygroup, obj=system_save)
GroupObjectPermission.objects.assign_perm("read_system_users", mygroup, obj=system_save)
GroupObjectPermission.objects.assign_perm("add_system_users", mygroup, obj=system_save)
GroupObjectPermission.objects.assign_perm("change_system_users", mygroup, obj=system_save)
GroupObjectPermission.objects.assign_perm("delete_system_users", mygroup, obj=system_save)
form = AssetForm()
else:
s = system_users.objects.get(id=nid)
password_old = system_users.objects.get(id=nid).password
old_product_line = system_users.objects.get(id=nid).product_line
s = form.save()
s.password = password_old
s.save()
if old_product_line != myproduct:
mygroup = Group.objects.get(name=myproduct)
GroupObjectPermission.objects.remove_perm("read_system_users", old_mygroup, obj=s)
GroupObjectPermission.objects.assign_perm("add_system_users", old_mygroup, obj=s)
GroupObjectPermission.objects.assign_perm("change_system_users", old_mygroup, obj=s)
GroupObjectPermission.objects.assign_perm("delete_system_users", old_mygroup, obj=s)
GroupObjectPermission.objects.assign_perm("read_system_users", mygroup, obj=s)
GroupObjectPermission.objects.assign_perm("add_system_users", mygroup, obj=s)
GroupObjectPermission.objects.assign_perm("change_system_users", mygroup, obj=s)
GroupObjectPermission.objects.assign_perm("delete_system_users", mygroup, obj=s)
form = AssetForm()
return redirect('system-user.html')
form = SystemUserForm(instance=system_user)
return render(request, 'asset/system-user-update.html', {'form': form, 'nid': nid, "asset_active": "active",
"system_user_list_active": "active",
})
class SystemUserDelete(View):
model = system_users
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SystemUserDelete, self).dispatch(*args, **kwargs)
def post(self, request):
ret = {'status': True, 'error': None, }
try:
id = request.POST.get("nid", None)
user = User.objects.get(username=request.user)
checker = ObjectPermissionChecker(user)
system_u = system_users.objects.get(id=id)
if checker.has_perm('delete_system_users', system_u) == True:
system_u.delete()
except Exception as e:
ret['status'] = False
ret['error'] = '删除请求错误,没有权限{}'.format(e)
finally:
return HttpResponse(json.dumps(ret))
@login_required(login_url="/login.html")
@permission_required_or_403('read_system_users', (system_users, 'id', 'nid'))
def system_user_detail(request, nid):
detail = system_users.objects.get(id=nid)
return render(request, "asset/system-user-detail.html",
{"system_users": detail, "nid": nid, "asset_active": "active",
"system_user_list_active": "active"})
@login_required(login_url="/login.html")
@permission_required_or_403('read_system_users', (system_users, 'id', 'nid'))
def system_user_asset(request, nid):
obj = asset.objects.filter(system_user=nid)
return render(request, "asset/system-user-asset.html", {"nid": nid, "asset_list": obj,
"asset_active": "active",
"system_user_list_active": "active"})
class AssetUpload(View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(AssetUpload, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
with open('{}'.format(request.path[1:]), 'rb') as f:
url = request.path
urls = url.split("/")[-1]
response = HttpResponse(f, content_type='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename={}'.format(urls)
return response
@login_required(login_url="/login.html")
@permission_required_or_403('asset.read_asset')
def export(request):
if request.method == "GET":
a = asset.objects.all()
bt = ['主机名', '外网IP', '管理IP', '内网IP', 'ssh端口', '型号', '系统版本', "网卡1mac地址", "网卡2mac地址", "网卡3mac地址", "网卡4mac地址",
'登陆用户', '数据中心', '机柜', '位置', '序列号', 'CPU', '内存', "硬盘", "上联端口", "出厂时间", "到保时间", '产品线', '是否启用', "备注"
, '创建时间', '更新时间', ]
wb = xlwt.Workbook(encoding='utf-8')
sh = wb.add_sheet("详情")
dateFormat = xlwt.XFStyle()
dateFormat.num_format_str = 'yyyy/mm/dd'
for i in range(len(bt)):
sh.write(0, i, bt[i])
for i in range(len(a)):
sh.write(i + 1, 0, a[i].hostname)
sh.write(i + 1, 1, a[i].network_ip)
sh.write(i + 1, 2, a[i].manage_ip)
sh.write(i + 1, 3, a[i].inner_ip)
sh.write(i + 1, 4, a[i].port)
sh.write(i + 1, 5, a[i].model)
sh.write(i + 1, 6, a[i].system)
sh.write(i + 1, 7, a[i].eth0)
sh.write(i + 1, 8, a[i].eth1)
sh.write(i + 1, 9, a[i].eth2)
sh.write(i + 1, 10, a[i].eth3)
sh.write(i + 1, 11, a[i].system_user.name)
sh.write(i + 1, 12, a[i].data_center.data_center_list)
sh.write(i + 1, 13, a[i].cabinet)
sh.write(i + 1, 14, a[i].position)
sh.write(i + 1, 15, a[i].sn)
sh.write(i + 1, 16, a[i].cpu)
sh.write(i + 1, 17, a[i].memory)
sh.write(i + 1, 18, a[i].disk)
sh.write(i + 1, 19, a[i].uplink_port)
sh.write(i + 1, 20, a[i].ship_time, dateFormat)
sh.write(i + 1, 21, a[i].end_time, dateFormat)
sh.write(i + 1, 22, a[i].product_line.name)
sh.write(i + 1, 23, a[i].is_active)
sh.write(i + 1, 24, a[i].ps)
sh.write(i + 1, 25, a[i].ctime, dateFormat)
sh.write(i + 1, 26, a[i].utime, dateFormat)
response = HttpResponse(content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename=asset' + time.strftime('%Y%m%d', time.localtime(
time.time())) + '.xls'
wb.save(response)
return response
| 41.967977
| 238
| 0.579113
|
c8dde288f313489063dc24c363366afbec40396e
| 24,366
|
py
|
Python
|
cinder/tests/unit/backup/drivers/test_backup_google.py
|
aarunsai81/netapp
|
8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba
|
[
"Apache-2.0"
] | 11
|
2015-08-25T13:11:18.000Z
|
2020-10-15T11:29:20.000Z
|
cinder/tests/unit/backup/drivers/test_backup_google.py
|
aarunsai81/netapp
|
8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba
|
[
"Apache-2.0"
] | 5
|
2018-01-25T11:31:56.000Z
|
2019-05-06T23:13:35.000Z
|
cinder/tests/unit/backup/drivers/test_backup_google.py
|
aarunsai81/netapp
|
8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba
|
[
"Apache-2.0"
] | 11
|
2015-02-20T18:48:24.000Z
|
2021-01-30T20:26:18.000Z
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2016 Vedams Inc.
# Copyright (C) 2016 Google Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Google Backup code.
"""
import bz2
import filecmp
import hashlib
import os
import shutil
import tempfile
import zlib
import mock
from oslo_utils import units
from cinder.backup.drivers import google as google_dr
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import test
from cinder.tests.unit.backup import fake_google_client
from cinder.tests.unit.backup import fake_google_client2
from cinder.tests.unit import fake_constants as fake
class FakeMD5(object):
def __init__(self, *args, **kwargs):
pass
@classmethod
def digest(self):
return 'gcscindermd5'
@classmethod
def hexdigest(self):
return 'gcscindermd5'
class FakeObjectName(object):
@classmethod
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup.id)
volume = 'volume_%s' % (backup.volume_id)
prefix = volume + '_' + backup_name
return prefix
def gcs_client(func):
@mock.patch.object(google_dr.client, 'GoogleCredentials',
fake_google_client.FakeGoogleCredentials)
@mock.patch.object(google_dr.discovery, 'build',
fake_google_client.FakeGoogleDiscovery.Build)
@mock.patch.object(google_dr, 'GoogleMediaIoBaseDownload',
fake_google_client.FakeGoogleMediaIoBaseDownload)
@mock.patch.object(hashlib, 'md5', FakeMD5)
def func_wrapper(self, *args, **kwargs):
return func(self, *args, **kwargs)
return func_wrapper
def gcs_client2(func):
@mock.patch.object(google_dr.client, 'GoogleCredentials',
fake_google_client2.FakeGoogleCredentials)
@mock.patch.object(google_dr.discovery, 'build',
fake_google_client2.FakeGoogleDiscovery.Build)
@mock.patch.object(google_dr, 'GoogleMediaIoBaseDownload',
fake_google_client2.FakeGoogleMediaIoBaseDownload)
@mock.patch.object(google_dr.GoogleBackupDriver,
'_generate_object_name_prefix',
FakeObjectName._fake_generate_object_name_prefix)
@mock.patch.object(hashlib, 'md5', FakeMD5)
def func_wrapper(self, *args, **kwargs):
return func(self, *args, **kwargs)
return func_wrapper
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
def fake_delete(self, backup):
raise exception.BackupOperationError()
def _fake_delete_object(self, bucket_name, object_name):
raise AssertionError('delete_object method should not be called.')
class GoogleBackupDriverTestCase(test.TestCase):
"""Test Case for Google"""
_DEFAULT_VOLUME_ID = 'c7eb81f4-bec6-4730-a60f-8888885874df'
def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID):
vol = {'id': volume_id,
'size': 1,
'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self,
volume_id=_DEFAULT_VOLUME_ID,
container=google_dr.CONF.backup_gcs_bucket,
parent_id=None,
service_metadata=None):
try:
db.volume_get(self.ctxt, volume_id)
except exception.NotFound:
self._create_volume_db_entry(volume_id=volume_id)
kwargs = {'size': 1,
'container': container,
'volume_id': volume_id,
'parent_id': parent_id,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'service_metadata': service_metadata,
}
backup = objects.Backup(context=self.ctxt, **kwargs)
backup.create()
return backup
def setUp(self):
super(GoogleBackupDriverTestCase, self).setUp()
self.flags(backup_gcs_bucket='gcscinderbucket')
self.flags(backup_gcs_credential_file='test-file')
self.flags(backup_gcs_project_id='test-gcs')
self.ctxt = context.get_admin_context()
self.volume_file = tempfile.NamedTemporaryFile()
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(self.volume_file.close)
# Remove tempdir.
self.addCleanup(shutil.rmtree, self.temp_dir)
for _i in range(0, 64):
self.volume_file.write(os.urandom(units.Ki))
@gcs_client
def test_backup(self):
volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec2'
container_name = 'test-bucket'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
result = service.backup(backup, self.volume_file)
self.assertIsNone(result)
@gcs_client
def test_backup_uncompressed(self):
volume_id = '2b9f10a3-42b4-4fdf-b316-000000ceb039'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
@gcs_client
def test_backup_bz2(self):
volume_id = 'dc0fee35-b44e-4f13-80d6-000000e1b50c'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='bz2')
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
@gcs_client
def test_backup_zlib(self):
volume_id = '5cea0535-b6fb-4531-9a38-000000bea094'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zlib')
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
@gcs_client
def test_backup_default_container(self):
volume_id = '9552017f-c8b9-4e4e-a876-00000053349c'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=None)
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertEqual('gcscinderbucket', backup.container)
@gcs_client
@mock.patch('httplib2.proxy_info_from_url')
def test_backup_proxy_configured(self, mock_proxy_info):
google_dr.CONF.set_override("backup_gcs_proxy_url",
"http://myproxy.example.com")
google_dr.GoogleBackupDriver(self.ctxt)
mock_proxy_info.assert_called_with("http://myproxy.example.com")
@gcs_client
@mock.patch('httplib2.proxy_info_from_environment')
def test_backup_proxy_environment(self, mock_proxy_env):
google_dr.GoogleBackupDriver(self.ctxt)
mock_proxy_env.assert_called_once_with()
@gcs_client
@mock.patch('cinder.backup.drivers.google.GoogleBackupDriver.'
'_send_progress_end')
@mock.patch('cinder.backup.drivers.google.GoogleBackupDriver.'
'_send_progress_notification')
def test_backup_default_container_notify(self, _send_progress,
_send_progress_end):
volume_id = '87dd0eed-2598-4ebd-8ebb-000000ac578a'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=None)
# If the backup_object_number_per_notification is set to 1,
# the _send_progress method will be called for sure.
google_dr.CONF.set_override("backup_object_number_per_notification", 1)
google_dr.CONF.set_override("backup_gcs_enable_progress_timer", False)
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the backup_object_number_per_notification is increased to
# another value, the _send_progress method will not be called.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
google_dr.CONF.set_override("backup_object_number_per_notification",
10)
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertFalse(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the timer is enabled, the _send_progress will be called,
# since the timer can trigger the progress notification.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
google_dr.CONF.set_override("backup_object_number_per_notification",
10)
google_dr.CONF.set_override("backup_gcs_enable_progress_timer", True)
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
@gcs_client
def test_backup_custom_container(self):
volume_id = '1da9859e-77e5-4731-bd58-000000ca119e'
container_name = 'fake99'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertEqual(container_name, backup.container)
@gcs_client2
def test_backup_shafile(self):
volume_id = '6465dad4-22af-48f7-8a1a-000000218907'
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertEqual(container_name, backup.container)
# Verify sha contents
content1 = service._read_sha256file(backup)
self.assertEqual(64 * units.Ki / content1['chunk_size'],
len(content1['sha256s']))
@gcs_client2
def test_backup_cmp_shafiles(self):
volume_id = '1a99ac67-c534-4fe3-b472-0000001785e2'
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service1 = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service1.backup(backup, self.volume_file)
self.assertEqual(container_name, backup.container)
# Create incremental backup with no change to contents
deltabackup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
parent_id=backup.id)
service2 = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service2.backup(deltabackup, self.volume_file)
self.assertEqual(container_name, deltabackup.container)
# Compare shas from both files
content1 = service1._read_sha256file(backup)
content2 = service2._read_sha256file(deltabackup)
self.assertEqual(len(content1['sha256s']), len(content2['sha256s']))
self.assertEqual(set(content1['sha256s']), set(content2['sha256s']))
@gcs_client2
def test_backup_delta_two_objects_change(self):
volume_id = '30dab288-265a-4583-9abe-000000d42c67'
self.flags(backup_gcs_object_size=8 * units.Ki)
self.flags(backup_gcs_block_size=units.Ki)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service1 = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service1.backup(backup, self.volume_file)
self.assertEqual(container_name, backup.container)
# Create incremental backup with no change to contents
self.volume_file.seek(2 * 8 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
self.volume_file.seek(4 * 8 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
deltabackup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
parent_id=backup.id)
service2 = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service2.backup(deltabackup, self.volume_file)
self.assertEqual(container_name, deltabackup.container)
content1 = service1._read_sha256file(backup)
content2 = service2._read_sha256file(deltabackup)
# Verify that two shas are changed at index 16 and 32
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][32], content2['sha256s'][32])
@gcs_client2
def test_backup_delta_two_blocks_in_object_change(self):
volume_id = 'b943e84f-aa67-4331-9ab2-000000cf19ba'
self.flags(backup_gcs_object_size=8 * units.Ki)
self.flags(backup_gcs_block_size=units.Ki)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service1 = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service1.backup(backup, self.volume_file)
self.assertEqual(container_name, backup.container)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
self.volume_file.seek(20 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
deltabackup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
parent_id=backup.id)
service2 = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service2.backup(deltabackup, self.volume_file)
self.assertEqual(container_name, deltabackup.container)
# Verify that two shas are changed at index 16 and 20
content1 = service1._read_sha256file(backup)
content2 = service2._read_sha256file(deltabackup)
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
@gcs_client
def test_create_backup_fail(self):
volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec3'
container_name = 'gcs_api_failure'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
self.assertRaises(exception.GCSApiFailure,
service.backup,
backup, self.volume_file)
@gcs_client
def test_create_backup_fail2(self):
volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec4'
container_name = 'gcs_oauth2_failure'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
self.assertRaises(exception.GCSOAuth2Failure,
service.backup,
backup, self.volume_file)
@gcs_client
@mock.patch.object(google_dr.GoogleBackupDriver, '_backup_metadata',
fake_backup_metadata)
def test_backup_backup_metadata_fail(self):
"""Test of when an exception occurs in backup().
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process of an
exception handler.
"""
volume_id = '020d9142-339c-4876-a445-000000f1520c'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
# We expect that an exception be notified directly.
self.assertRaises(exception.BackupDriverException,
service.backup,
backup, self.volume_file)
@gcs_client
@mock.patch.object(google_dr.GoogleBackupDriver, '_backup_metadata',
fake_backup_metadata)
@mock.patch.object(google_dr.GoogleBackupDriver, 'delete', fake_delete)
def test_backup_backup_metadata_fail2(self):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process when the
second exception occurs in self.delete().
"""
volume_id = '2164421d-f181-4db7-b9bd-000000eeb628'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
# We expect that the second exception is notified.
self.assertRaises(exception.BackupOperationError,
service.backup,
backup, self.volume_file)
@gcs_client
def test_restore(self):
volume_id = 'c2a81f09-f480-4325-8424-00000071685b'
backup = self._create_backup_db_entry(volume_id=volume_id)
service = google_dr.GoogleBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
service.restore(backup, volume_id, volume_file)
@gcs_client
def test_restore_fail(self):
volume_id = 'c2a81f09-f480-4325-8424-00000071685b'
container_name = 'gcs_connection_failure'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = google_dr.GoogleBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
self.assertRaises(exception.GCSConnectionFailure,
service.restore,
backup, volume_id, volume_file)
@gcs_client2
def test_restore_delta(self):
volume_id = '04d83506-bcf7-4ff5-9c65-00000051bd2e'
self.flags(backup_gcs_object_size=8 * units.Ki)
self.flags(backup_gcs_block_size=units.Ki)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service1 = google_dr.GoogleBackupDriver(self.ctxt)
self.volume_file.seek(0)
service1.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
self.volume_file.seek(20 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
deltabackup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
parent_id=backup.id)
self.volume_file.seek(0)
service2 = google_dr.GoogleBackupDriver(self.ctxt)
service2.backup(deltabackup, self.volume_file, True)
with tempfile.NamedTemporaryFile() as restored_file:
service2.restore(deltabackup, volume_id,
restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
@gcs_client
def test_delete(self):
volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31'
object_prefix = 'test_prefix'
backup = self._create_backup_db_entry(volume_id=volume_id,
service_metadata=object_prefix)
service = google_dr.GoogleBackupDriver(self.ctxt)
service.delete(backup)
@gcs_client
@mock.patch.object(google_dr.GoogleBackupDriver, 'delete_object',
_fake_delete_object)
def test_delete_without_object_prefix(self):
volume_id = 'ee30d649-72a6-49a5-b78d-000000edb6b1'
backup = self._create_backup_db_entry(volume_id=volume_id)
service = google_dr.GoogleBackupDriver(self.ctxt)
service.delete(backup)
@gcs_client
def test_get_compressor(self):
service = google_dr.GoogleBackupDriver(self.ctxt)
compressor = service._get_compressor('None')
self.assertIsNone(compressor)
compressor = service._get_compressor('zlib')
self.assertEqual(zlib, compressor)
compressor = service._get_compressor('bz2')
self.assertEqual(bz2, compressor)
self.assertRaises(ValueError, service._get_compressor, 'fake')
@gcs_client
def test_prepare_output_data_effective_compression(self):
service = google_dr.GoogleBackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = b'\0' * 128
result = service._prepare_output_data(fake_data)
self.assertEqual('zlib', result[0])
self.assertGreater(len(fake_data), len(result))
@gcs_client
def test_prepare_output_data_no_compresssion(self):
self.flags(backup_compression_algorithm='none')
service = google_dr.GoogleBackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = b'\0' * 128
result = service._prepare_output_data(fake_data)
self.assertEqual('none', result[0])
self.assertEqual(fake_data, result[1])
@gcs_client
def test_prepare_output_data_ineffective_compression(self):
service = google_dr.GoogleBackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = b'\0' * 128
# Pre-compress so that compression in the driver will be ineffective.
already_compressed_data = service.compressor.compress(fake_data)
result = service._prepare_output_data(already_compressed_data)
self.assertEqual('none', result[0])
self.assertEqual(already_compressed_data, result[1])
| 41.794168
| 79
| 0.649306
|
8c2e5494d8a9ba54d7f2f92155450b2909954a6f
| 9,190
|
py
|
Python
|
fastmot/videoio.py
|
viniciusguigo/FastMOT
|
9f544c89e5d3e6dca711abee90ac604ed661397f
|
[
"MIT"
] | null | null | null |
fastmot/videoio.py
|
viniciusguigo/FastMOT
|
9f544c89e5d3e6dca711abee90ac604ed661397f
|
[
"MIT"
] | null | null | null |
fastmot/videoio.py
|
viniciusguigo/FastMOT
|
9f544c89e5d3e6dca711abee90ac604ed661397f
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from enum import Enum
from collections import deque
from urllib.parse import urlparse
import subprocess
import threading
import logging
import cv2
LOGGER = logging.getLogger(__name__)
WITH_GSTREAMER = True
class Protocol(Enum):
IMAGE = 0
VIDEO = 1
CSI = 2
V4L2 = 3
RTSP = 4
HTTP = 5
class VideoIO:
"""
Class for capturing from a video file, an image sequence, or a camera, and saving video output.
Encoding, decoding, and scaling can be accelerated using the GStreamer backend.
Parameters
----------
size : (int, int)
Width and height of each frame to output.
config : Dict
Camera and buffer configuration.
input_uri : string
URI to an input video file or capturing device.
output_uri : string
URI to an output video file.
proc_fps : int
Estimated processing speed. This depends on compute and scene complexity.
"""
def __init__(self, size, config, input_uri, output_uri=None, proc_fps=30):
self.size = size
self.input_uri = input_uri
self.output_uri = output_uri
self.proc_fps = proc_fps
self.camera_resolution = config['camera_resolution']
self.frame_rate = config['frame_rate']
self.buffer_size = config['buffer_size']
self.protocol = self._parse_uri(self.input_uri)
self.is_file = self.protocol == Protocol.IMAGE or self.protocol == Protocol.VIDEO
if WITH_GSTREAMER:
self.cap = cv2.VideoCapture(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
else:
self.cap = cv2.VideoCapture(self.input_uri)
self.frame_queue = deque([], maxlen=self.buffer_size)
self.cond = threading.Condition()
self.exit_event = threading.Event()
self.capture_thread = threading.Thread(target=self._capture_frames)
ret, frame = self.cap.read()
if not ret:
raise RuntimeError('Unable to read video stream')
self.frame_queue.append(frame)
width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.cap_fps = self.cap.get(cv2.CAP_PROP_FPS)
self.do_resize = (width, height) != self.size
if self.cap_fps == 0:
self.cap_fps = self.frame_rate # fallback to config if unknown
LOGGER.info('%dx%d stream @ %d FPS', width, height, self.cap_fps)
if self.output_uri is not None:
Path(self.output_uri).parent.mkdir(parents=True, exist_ok=True)
output_fps = 1 / self.cap_dt
if WITH_GSTREAMER:
self.writer = cv2.VideoWriter(self._gst_write_pipeline(), cv2.CAP_GSTREAMER, 0,
output_fps, self.size, True)
else:
fourcc = cv2.VideoWriter_fourcc(*'avc1')
self.writer = cv2.VideoWriter(self.output_uri, fourcc, output_fps, self.size, True)
@property
def cap_dt(self):
# limit capture interval at processing latency for camera
return 1 / self.cap_fps if self.is_file else 1 / min(self.cap_fps, self.proc_fps)
def start_capture(self):
"""
Start capturing from video file or device.
"""
if not self.cap.isOpened():
self.cap.open(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
if not self.capture_thread.is_alive():
self.capture_thread.start()
def stop_capture(self):
"""
Stop capturing from video file or device.
"""
with self.cond:
self.exit_event.set()
self.cond.notify()
self.frame_queue.clear()
self.capture_thread.join()
def read(self):
"""
Returns the next video frame.
Returns None if there are no more frames.
"""
with self.cond:
while len(self.frame_queue) == 0 and not self.exit_event.is_set():
self.cond.wait()
if len(self.frame_queue) == 0 and self.exit_event.is_set():
return None
frame = self.frame_queue.popleft()
self.cond.notify()
if self.do_resize:
frame = cv2.resize(frame, self.size)
return frame
def write(self, frame):
"""
Writes the next video frame.
"""
assert hasattr(self, 'writer')
self.writer.write(frame)
def release(self):
"""
Closes video file or capturing device.
"""
self.stop_capture()
if hasattr(self, 'writer'):
self.writer.release()
self.cap.release()
def _gst_cap_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvvidconv' in gst_elements and self.protocol != Protocol.V4L2:
# format conversion for hardware decoder
cvt_pipeline = (
'nvvidconv interpolation-method=5 ! '
'video/x-raw, width=%d, height=%d, format=BGRx !'
'videoconvert ! appsink sync=false'
% self.size
)
else:
cvt_pipeline = (
'videoscale ! '
'video/x-raw, width=%d, height=%d !'
'videoconvert ! appsink sync=false'
% self.size
)
if self.protocol == Protocol.IMAGE:
pipeline = (
'multifilesrc location=%s index=1 caps="image/%s,framerate=%d/1" ! decodebin ! '
% (
self.input_uri,
self._img_format(self.input_uri),
self.frame_rate
)
)
elif self.protocol == Protocol.VIDEO:
pipeline = 'filesrc location=%s ! decodebin ! ' % self.input_uri
elif self.protocol == Protocol.CSI:
if 'nvarguscamerasrc' in gst_elements:
pipeline = (
'nvarguscamerasrc sensor_id=%s ! '
'video/x-raw(memory:NVMM), width=%d, height=%d, '
'format=NV12, framerate=%d/1 ! '
% (
self.input_uri[6:],
*self.camera_resolution,
self.frame_rate
)
)
else:
raise RuntimeError('GStreamer CSI plugin not found')
elif self.protocol == Protocol.V4L2:
if 'v4l2src' in gst_elements:
pipeline = (
'v4l2src device=%s ! '
'video/x-raw, width=%d, height=%d, '
'format=YUY2, framerate=%d/1 ! '
% (
self.input_uri,
*self.camera_resolution,
self.frame_rate
)
)
else:
raise RuntimeError('GStreamer V4L2 plugin not found')
elif self.protocol == Protocol.RTSP:
pipeline = 'rtspsrc location=%s latency=0 ! decodebin ! ' % self.input_uri
elif self.protocol == Protocol.HTTP:
pipeline = 'souphttpsrc location=%s ! decodebin ! ' % self.input_uri
return pipeline + cvt_pipeline
def _gst_write_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
# use hardware encoder if found
if 'omxh264enc' in gst_elements:
h264_encoder = 'omxh264enc'
elif 'x264enc' in gst_elements:
h264_encoder = 'x264enc'
else:
raise RuntimeError('GStreamer H.264 encoder not found')
pipeline = (
'appsrc ! autovideoconvert ! %s ! qtmux ! filesink location=%s '
% (
h264_encoder,
self.output_uri
)
)
return pipeline
def _capture_frames(self):
while not self.exit_event.is_set():
ret, frame = self.cap.read()
with self.cond:
if not ret:
self.exit_event.set()
self.cond.notify()
break
# keep unprocessed frames in the buffer for file
if self.is_file:
while (len(self.frame_queue) == self.buffer_size and
not self.exit_event.is_set()):
self.cond.wait()
self.frame_queue.append(frame)
self.cond.notify()
@staticmethod
def _parse_uri(uri):
result = urlparse(uri)
if result.scheme == 'csi':
protocol = Protocol.CSI
elif result.scheme == 'rtsp':
protocol = Protocol.RTSP
elif result.scheme == 'http':
protocol = Protocol.HTTP
else:
if '/dev/video' in result.path:
protocol = Protocol.V4L2
elif '%' in result.path:
protocol = Protocol.IMAGE
else:
protocol = Protocol.VIDEO
return protocol
@staticmethod
def _img_format(uri):
suffix = Path(uri).suffix[1:]
return 'jpeg' if suffix == 'jpg' else suffix
| 35.076336
| 99
| 0.549075
|
fd594bc09f776354affa4e2156598895b231a969
| 2,706
|
py
|
Python
|
odoons/utils/git.py
|
jiksaa/odoons
|
6845b5cd1ad73a1d44f04772b63431c5a54cfca7
|
[
"MIT"
] | null | null | null |
odoons/utils/git.py
|
jiksaa/odoons
|
6845b5cd1ad73a1d44f04772b63431c5a54cfca7
|
[
"MIT"
] | 3
|
2021-10-05T14:29:51.000Z
|
2021-10-17T21:52:41.000Z
|
odoons/utils/git.py
|
jiksaa/odoons
|
6845b5cd1ad73a1d44f04772b63431c5a54cfca7
|
[
"MIT"
] | null | null | null |
import os
import subprocess
from . import printing
class Git:
def __init__(self, path, url, branch=None, commit=None):
self._path = path
self._url = url
self._branch = str(branch) if branch else None
self._commit = str(commit) if commit else None
def is_git_directory(self):
command = ["git", "-C", self._path, "status"]
process = subprocess.call(command, stderr=subprocess.STDOUT, stdout=open(os.devnull, "w"))
return process == 0
def is_frozen(self):
return bool(self._commit)
def clone(self):
if self.is_git_directory():
return self.update()
command = ["git", "clone", "--depth", "1"]
if not self.is_frozen():
command += ["-b", self._branch]
command += [self._url, self._path]
printing.debug("Running command:" + str(command))
printing.next_muted()
sp = subprocess.Popen(command)
sp.wait()
printing.reset()
if not self.is_frozen() or sp.returncode != 0:
return sp.returncode
printing.info("Repository is frozen to: {}".format(self._commit))
return self.checkout()
def update(self):
if not self.is_git_directory():
return self.clone()
path = os.path.abspath(self._path)
git_command = ["git", "-C", path]
printing.next_muted()
subprocess.run(git_command + ["fetch", "origin"])
if not self.is_frozen():
process = subprocess.run(git_command + ["reset", "--hard", "origin/" + self._branch])
printing.reset()
return process.returncode
return self.checkout()
def checkout(self):
if not self.is_git_directory():
return self.clone()
fetch_return = self.fetch_commit()
if fetch_return != 0:
printing.error("Error fetching commit")
return fetch_return
checkout_return = self.checkout_commit()
return checkout_return
def checkout_commit(self):
checkout_command = ["git", "-C", self._path, "reset", "--hard", self._commit]
printing.debug("Running command:" + str(checkout_command))
printing.next_muted()
sp = subprocess.Popen(checkout_command)
sp.wait()
printing.reset()
return sp.returncode
def fetch_commit(self):
checkout_command = ["git", "-C", self._path, "fetch", "--depth", "1", "origin", self._commit]
printing.debug("Running command:" + str(checkout_command))
printing.next_muted()
sp = subprocess.Popen(checkout_command)
sp.wait()
printing.reset()
return sp.returncode
| 31.465116
| 101
| 0.592757
|
5ca9aaa116e75b8f693589d1bcc0031d5ace0277
| 7,635
|
py
|
Python
|
.dev_scripts/visualize_lr.py
|
imabackstabber/mmcv
|
b272c09b463f00fd7fdd455f7bd4a055f9995521
|
[
"Apache-2.0"
] | null | null | null |
.dev_scripts/visualize_lr.py
|
imabackstabber/mmcv
|
b272c09b463f00fd7fdd455f7bd4a055f9995521
|
[
"Apache-2.0"
] | null | null | null |
.dev_scripts/visualize_lr.py
|
imabackstabber/mmcv
|
b272c09b463f00fd7fdd455f7bd4a055f9995521
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import json
import os
import os.path as osp
import time
import warnings
from collections import OrderedDict
from unittest.mock import patch
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
from torch.optim import SGD
from torch.utils.data import DataLoader
import mmcv
from mmcv.runner import build_runner
from mmcv.utils import get_logger
def parse_args():
parser = argparse.ArgumentParser(description='Visualize the given config'
'of learning rate and momentum, and this'
'script will overwrite the log_config')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--work-dir', default='./', help='the dir to save logs and models')
parser.add_argument(
'--num-iters', default=300, help='The number of iters per epoch')
parser.add_argument(
'--num-epochs', default=300, help='Only used in EpochBasedRunner')
parser.add_argument(
'--window-size',
default='12*14',
help='Size of the window to display images, in format of "$W*$H".')
parser.add_argument(
'--log-interval', default=10, help='The interval of TextLoggerHook')
args = parser.parse_args()
return args
class SimpleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
def train_step(self, *args, **kwargs):
return dict()
def val_step(self, *args, **kwargs):
return dict()
def iter_train(self, data_loader, **kwargs):
self.mode = 'train'
self.data_loader = data_loader
self.call_hook('before_train_iter')
self.call_hook('after_train_iter')
self._inner_iter += 1
self._iter += 1
def epoch_train(self, data_loader, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loader = data_loader
self._max_iters = self._max_epochs * len(self.data_loader)
self.call_hook('before_train_epoch')
for i, data_batch in enumerate(self.data_loader):
self._inner_iter = i
self.call_hook('before_train_iter')
self.call_hook('after_train_iter')
self._iter += 1
self.call_hook('after_train_epoch')
self._epoch += 1
def log(self, runner):
cur_iter = self.get_iter(runner, inner_iter=True)
log_dict = OrderedDict(
mode=self.get_mode(runner),
epoch=self.get_epoch(runner),
iter=cur_iter)
# only record lr of the first param group
cur_lr = runner.current_lr()
if isinstance(cur_lr, list):
log_dict['lr'] = cur_lr[0]
else:
assert isinstance(cur_lr, dict)
log_dict['lr'] = {}
for k, lr_ in cur_lr.items():
assert isinstance(lr_, list)
log_dict['lr'].update({k: lr_[0]})
cur_momentum = runner.current_momentum()
if isinstance(cur_momentum, list):
log_dict['momentum'] = cur_momentum[0]
else:
assert isinstance(cur_momentum, dict)
log_dict['momentum'] = {}
for k, lr_ in cur_momentum.items():
assert isinstance(lr_, list)
log_dict['momentum'].update({k: lr_[0]})
log_dict = dict(log_dict, **runner.log_buffer.output)
self._log_info(log_dict, runner)
self._dump_log(log_dict, runner)
return log_dict
@patch('torch.cuda.is_available', lambda: False)
@patch('mmcv.runner.EpochBasedRunner.train', epoch_train)
@patch('mmcv.runner.IterBasedRunner.train', iter_train)
@patch('mmcv.runner.hooks.TextLoggerHook.log', log)
def run(cfg, logger):
momentum_config = cfg.get('momentum_config')
lr_config = cfg.get('lr_config')
model = SimpleModel()
optimizer = SGD(model.parameters(), 0.1, momentum=0.8)
cfg.work_dir = cfg.get('work_dir', './')
workflow = [('train', 1)]
if cfg.get('runner') is None:
cfg.runner = {
'type': 'EpochBasedRunner',
'max_epochs': cfg.get('total_epochs', cfg.num_epochs)
}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
batch_size = 1
data = cfg.get('data')
if data:
batch_size = data.get('samples_per_gpu')
fake_dataloader = DataLoader(
list(range(cfg.num_iters)), batch_size=batch_size)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=None))
log_config = dict(
interval=cfg.log_interval, hooks=[
dict(type='TextLoggerHook'),
])
runner.register_training_hooks(lr_config, log_config=log_config)
runner.register_momentum_hook(momentum_config)
runner.run([fake_dataloader], workflow)
def plot_lr_curve(json_file, cfg):
data_dict = dict(LearningRate=[], Momentum=[])
assert os.path.isfile(json_file)
with open(json_file) as f:
for line in f:
log = json.loads(line.strip())
data_dict['LearningRate'].append(log['lr'])
data_dict['Momentum'].append(log['momentum'])
wind_w, wind_h = (int(size) for size in cfg.window_size.split('*'))
# if legend is None, use {filename}_{key} as legend
fig, axes = plt.subplots(2, 1, figsize=(wind_w, wind_h))
plt.subplots_adjust(hspace=0.5)
font_size = 20
for index, (updater_type, data_list) in enumerate(data_dict.items()):
ax = axes[index]
if cfg.runner.type == 'EpochBasedRunner':
ax.plot(data_list, linewidth=1)
ax.xaxis.tick_top()
ax.set_xlabel('Iters', fontsize=font_size)
ax.xaxis.set_label_position('top')
sec_ax = ax.secondary_xaxis(
'bottom',
functions=(lambda x: x / cfg.num_iters * cfg.log_interval,
lambda y: y * cfg.num_iters / cfg.log_interval))
sec_ax.tick_params(labelsize=font_size)
sec_ax.set_xlabel('Epochs', fontsize=font_size)
else:
# plt.subplot(2, 1, index + 1)
x_list = np.arange(len(data_list)) * cfg.log_interval
ax.plot(x_list, data_list)
ax.set_xlabel('Iters', fontsize=font_size)
ax.set_ylabel(updater_type, fontsize=font_size)
if updater_type == 'LearningRate':
if cfg.get('lr_config'):
title = cfg.lr_config.type
else:
title = 'No learning rate scheduler'
else:
if cfg.get('momentum_config'):
title = cfg.momentum_config.type
else:
title = 'No momentum scheduler'
ax.set_title(title, fontsize=font_size)
ax.grid()
# set tick font size
ax.tick_params(labelsize=font_size)
save_path = osp.join(cfg.work_dir, 'visualization-result')
plt.savefig(save_path)
print(f'The learning rate graph is saved at {save_path}.png')
plt.show()
def main():
args = parse_args()
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
cfg = mmcv.Config.fromfile(args.config)
cfg['num_iters'] = args.num_iters
cfg['num_epochs'] = args.num_epochs
cfg['log_interval'] = args.log_interval
cfg['window_size'] = args.window_size
log_path = osp.join(cfg.get('work_dir', './'), f'{timestamp}.log')
json_path = log_path + '.json'
logger = get_logger('mmcv', log_path)
run(cfg, logger)
plot_lr_curve(json_path, cfg)
if __name__ == '__main__':
main()
| 33.051948
| 78
| 0.62685
|
e44f2c0c47246a28632a2064e610615ffca3f608
| 943
|
py
|
Python
|
tests/test_code.py
|
thibaudcolas/draftjs_exporter_markdown
|
00d8ae4ff1c63a80d6a9bd92375d413c75090a74
|
[
"MIT"
] | 2
|
2020-07-30T20:35:27.000Z
|
2020-11-16T08:22:03.000Z
|
tests/test_code.py
|
thibaudcolas/draftjs_exporter_markdown
|
00d8ae4ff1c63a80d6a9bd92375d413c75090a74
|
[
"MIT"
] | 14
|
2018-09-16T12:13:16.000Z
|
2022-01-25T20:24:40.000Z
|
tests/test_code.py
|
thibaudcolas/draftjs_exporter_markdown
|
00d8ae4ff1c63a80d6a9bd92375d413c75090a74
|
[
"MIT"
] | 2
|
2018-09-14T23:05:22.000Z
|
2020-11-16T08:22:10.000Z
|
from unittest import TestCase
from draftjs_exporter.dom import DOM
from draftjs_exporter_markdown.code import code_element, code_wrapper
class test_code_element(TestCase):
def test_works(self):
self.assertEqual(DOM.render(code_element({
'block': {},
'children': 'test',
})), 'test\n')
def test_block_end(self):
block = {
'key': 'a',
'type': 'code-block',
'text': 'test',
'depth': 0,
}
self.assertEqual(DOM.render(code_element({
'block': block,
'blocks': [
dict(block, **{'key': 'b'}),
block,
],
'children': 'test',
})), 'test\n```\n\n')
class test_code_wrapper(TestCase):
def test_works(self):
self.assertEqual(DOM.render(code_wrapper({
'block': {},
'children': 'test',
})), '```\n')
| 24.815789
| 69
| 0.501591
|
06730016feb1f529e847a6c05fcad1dcac24d49f
| 1,024
|
py
|
Python
|
examples/pytest_xdist.py
|
snguyenthanh/sanic
|
ae91852cd586c67ddfe10669eed18b3ded332d2f
|
[
"MIT"
] | 5
|
2018-05-10T19:50:27.000Z
|
2018-05-10T20:07:05.000Z
|
examples/pytest_xdist.py
|
snguyenthanh/sanic
|
ae91852cd586c67ddfe10669eed18b3ded332d2f
|
[
"MIT"
] | 11
|
2021-07-10T17:14:47.000Z
|
2022-02-24T07:32:36.000Z
|
examples/pytest_xdist.py
|
snguyenthanh/sanic
|
ae91852cd586c67ddfe10669eed18b3ded332d2f
|
[
"MIT"
] | 1
|
2020-04-03T14:20:38.000Z
|
2020-04-03T14:20:38.000Z
|
"""pytest-xdist example for sanic server
Install testing tools:
$ pip install pytest pytest-xdist
Run with xdist params:
$ pytest examples/pytest_xdist.py -n 8 # 8 workers
"""
import re
from sanic import Sanic
from sanic.response import text
from sanic.testing import PORT as PORT_BASE, SanicTestClient
import pytest
@pytest.fixture(scope="session")
def test_port(worker_id):
m = re.search(r'[0-9]+', worker_id)
if m:
num_id = m.group(0)
else:
num_id = 0
port = PORT_BASE + int(num_id)
return port
@pytest.fixture(scope="session")
def app():
app = Sanic()
@app.route('/')
async def index(request):
return text('OK')
return app
@pytest.fixture(scope="session")
def client(app, test_port):
return SanicTestClient(app, test_port)
@pytest.mark.parametrize('run_id', range(100))
def test_index(client, run_id):
request, response = client._sanic_endpoint_test('get', '/')
assert response.status == 200
assert response.text == 'OK'
| 20.48
| 63
| 0.676758
|
e666f207ec98d85515f6217f83d42feca37d48c2
| 307
|
py
|
Python
|
pymc/tests/test_realization.py
|
matthew-brett/pymc
|
3a31613f056e7993a449d89bafef5fdaa40d47e9
|
[
"MIT"
] | 5
|
2015-12-03T09:42:44.000Z
|
2021-06-06T19:23:29.000Z
|
pymc/tests/test_realization.py
|
matthew-brett/pymc
|
3a31613f056e7993a449d89bafef5fdaa40d47e9
|
[
"MIT"
] | 1
|
2016-09-27T02:00:41.000Z
|
2016-09-27T02:15:32.000Z
|
pymc/tests/test_realization.py
|
matthew-brett/pymc
|
3a31613f056e7993a449d89bafef5fdaa40d47e9
|
[
"MIT"
] | 1
|
2017-10-27T13:27:32.000Z
|
2017-10-27T13:27:32.000Z
|
from numpy.testing import *
from pymc.gp import *
from test_mean import M, x
from test_cov import C
from numpy import *
from copy import copy
# Impose observations on the GP
class test_realization(TestCase):
def test(self):
for i in range(3):
f = Realization(M, C)
f(x)
| 21.928571
| 33
| 0.664495
|
575c94823addbba44e6ea2f5ef5ed1d2383fda62
| 630
|
py
|
Python
|
ada/migrations/0012_auto_20210511_0657.py
|
praekeltfoundation/ndoh-hub
|
91d834ff8fe43b930a73d8debdaa0e6af78c5efc
|
[
"BSD-3-Clause"
] | null | null | null |
ada/migrations/0012_auto_20210511_0657.py
|
praekeltfoundation/ndoh-hub
|
91d834ff8fe43b930a73d8debdaa0e6af78c5efc
|
[
"BSD-3-Clause"
] | 126
|
2016-07-12T19:39:44.000Z
|
2022-03-24T13:39:38.000Z
|
ada/migrations/0012_auto_20210511_0657.py
|
praekeltfoundation/ndoh-hub
|
91d834ff8fe43b930a73d8debdaa0e6af78c5efc
|
[
"BSD-3-Clause"
] | 3
|
2016-09-28T13:16:11.000Z
|
2020-11-07T15:32:37.000Z
|
# Generated by Django 2.2.20 on 2021-05-11 06:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("ada", "0011_redirecturl_url")]
operations = [
migrations.RenameField(
model_name="redirecturlsentry", old_name="url", new_name="symptom_check_url"
),
migrations.AlterField(
model_name="redirecturl",
name="url",
field=models.URLField(
blank=True,
default="https://hub.momconnect.co.za/confirmredirect",
max_length=255,
),
),
]
| 26.25
| 88
| 0.577778
|
62d0abb5f6a96d28762b387f495497c577ff2cf7
| 505
|
py
|
Python
|
plans/templatetags/plans_tags.py
|
CMU-TRP/podd-api
|
6eb5c4598f848f75d131287163cd9babf2a0a0fc
|
[
"MIT"
] | 3
|
2020-04-26T06:28:50.000Z
|
2021-04-05T08:02:26.000Z
|
plans/templatetags/plans_tags.py
|
CMU-TRP/podd-api
|
6eb5c4598f848f75d131287163cd9babf2a0a0fc
|
[
"MIT"
] | 10
|
2020-06-05T17:36:10.000Z
|
2022-03-11T23:16:42.000Z
|
plans/templatetags/plans_tags.py
|
CMU-TRP/podd-api
|
6eb5c4598f848f75d131287163cd9babf2a0a0fc
|
[
"MIT"
] | 5
|
2021-04-08T08:43:49.000Z
|
2021-11-27T06:36:46.000Z
|
from django import template
from plans.functions import plan_authority_get_level_areas
register = template.Library()
@register.simple_tag
def plan_authority_level_areas(plan, authority, level, spliter=', '):
if not plan or not authority or not level:
return ''
level_areas = plan._current_log['level_areas']
my_level_areas = plan_authority_get_level_areas(authority, level_areas)
areas = my_level_areas.get(level) or []
return spliter.join([a['address'] for a in areas])
| 28.055556
| 75
| 0.750495
|
d6347abaca0d1960f2ef96032bbb823f7cd889ae
| 392
|
py
|
Python
|
bootcamp/feeds/permission.py
|
Fadykhallaf/Signet
|
2a8edcd85324d24b50d7d14fc980c8e9f88bfd26
|
[
"MIT"
] | null | null | null |
bootcamp/feeds/permission.py
|
Fadykhallaf/Signet
|
2a8edcd85324d24b50d7d14fc980c8e9f88bfd26
|
[
"MIT"
] | null | null | null |
bootcamp/feeds/permission.py
|
Fadykhallaf/Signet
|
2a8edcd85324d24b50d7d14fc980c8e9f88bfd26
|
[
"MIT"
] | null | null | null |
# this is custom permission to allow post owner only to edit it.
from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsOwnerOrReadOnly(BasePermission):
message = "You Must Be The Owner Of This Post"
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
return obj.user == request.user
| 32.666667
| 67
| 0.729592
|
aae606f6ef4a0aaad6e011106aa84dfeb2d9da93
| 571
|
py
|
Python
|
pandoc-filter/debug.py
|
NMarkgraf/Quantitative-Methoden-der-W-Informatik
|
0b0be8d832eadce774a01047cd978f9599d29ca5
|
[
"CC0-1.0"
] | null | null | null |
pandoc-filter/debug.py
|
NMarkgraf/Quantitative-Methoden-der-W-Informatik
|
0b0be8d832eadce774a01047cd978f9599d29ca5
|
[
"CC0-1.0"
] | null | null | null |
pandoc-filter/debug.py
|
NMarkgraf/Quantitative-Methoden-der-W-Informatik
|
0b0be8d832eadce774a01047cd978f9599d29ca5
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import pip
print("\nsys.version:", sys.version, file=sys.stderr)
print("\nsys.platform:", sys.platform, file=sys.stderr)
print("\nsys.prefix:", sys.prefix, file=sys.stderr)
print("\nsys.path:", sys.path, file=sys.stderr)
#print("\nsys.path_importer_cache:", sys.path_importer_cache, file=sys.stderr)
print("\npip.__version__:", pip.__version__, file=sys.stderr)
print("\npip.__path__:", pip.__path__, file=sys.stderr)
import panflute as pf
print("\nsys.path nach import panflute:", sys.path, file=sys.stderr)
| 31.722222
| 78
| 0.725044
|
1550680f24301727ebb7feb5c1701969992d5758
| 1,146
|
py
|
Python
|
setup.py
|
XiaotingChen/tfmodisco
|
17cbafe806942304a02e8134fe10224bdff38b0c
|
[
"MIT"
] | null | null | null |
setup.py
|
XiaotingChen/tfmodisco
|
17cbafe806942304a02e8134fe10224bdff38b0c
|
[
"MIT"
] | null | null | null |
setup.py
|
XiaotingChen/tfmodisco
|
17cbafe806942304a02e8134fe10224bdff38b0c
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages
from distutils.core import setup
if __name__== '__main__':
setup(include_package_data=True,
description='TF MOtif Discovery from Importance SCOres',
long_description="""Algorithm for discovering consolidated patterns from base-pair-level importance scores""",
url='https://github.com/kundajelab/tfmodisco',
version='0.5.9.2',
packages=find_packages(),
package_data={
'': ['cluster/phenograph/louvain/*convert*', 'cluster/phenograph/louvain/*community*', 'cluster/phenograph/louvain/*hierarchy*']
},
zip_safe=False,
setup_requires=[],
install_requires=['numpy>=1.9', 'joblib>=0.11',
'scikit-learn>=0.19',
'h5py>=2.5', 'leidenalg>=0.7.0',
'tqdm>=4.38.0', 'psutil>=5.4.8',
'matplotlib>=2.2.5'],
extras_require={
'tensorflow': ['tensorflow>=1.7'],
'tensorflow with gpu': ['tensorflow-gpu>=1.7']},
scripts=[],
name='modisco')
| 44.076923
| 144
| 0.550611
|
dc539aa9d001afcbaec9160b053a7c0e4b443e9d
| 3,283
|
py
|
Python
|
.history/my_classes/ScopesClosuresAndDecorators/GlobalLocalScopes_20210709221059.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/GlobalLocalScopes_20210709221059.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/GlobalLocalScopes_20210709221059.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
""" Global and local Scopes
Scopes and Namespaces
When an object is assigned to a variable # a = 10
that variable points to some object
and we say that the variable (name) is bound to that object
That object can be accessed using that name in various parts of our code
# ### I can't reference that (a) just anywhere in my code!
That variable name and it's binding (name and object) only "exist" in specific parts of our code
The porton of code where that name/binding is defined, is called the lexical scope of the variable
These bindings are stored in namespaces
(each scope has its own namespace)
The global scope
The global scope is essentially the module scope
It spans a single file only
There is no concept of a truly global (across all the modules in our app) scope in Python
The only exception to this are some of the built=in globally available objects, such as:
True False None dict print
The built-in global variables can be used anywhere inside our module
including inside any function
Global scopes are nested inside the built-in scope
Built-in Scope
Module 1 name spaces
Scope name var1 0xA345E
space func1 0xFF34A
Module 2
Scope name
space
If I reference a variable name inside a scope and Python does ot find it in that scope's namespace
Examples
module1.py Python does not find True or print in the current (module/global) scope
print(True) So, it looks for them in the enclosing scope -> build-in
Finds them there -> True
module2.py Python does not find a or print in the current (module/global) scope
print(a) So, it looks for them in the enclosing scope -> built-in
Find print, but not a -> run-time Name Error
module3.py
print = lambda x: 'hello {0}!'.format(x)
s = print('world') Python finds print in the module scope
So it uses it
s -> hello world
The Local Scope
When we create functions, we can create variable names inside those functions (using assignments)
e.g. a = 10
Variables defined inside a function qre not created until the function is called
Every time the function is called, a new scope is created
Variables defined inside the function are assigned to the scope -> Function Local scope
-> Local scope
The actual object the variable references could be different each time the function is called
(this is why recursion works!)
Examples
my_func
def my_func(a,b): a
c = a * b b
return c c
my_func
my_func('z', 2) a-> 'z'
b->2
c->'zz' \
same names, different local scopes
my_func /
my_func(10, 5) a->10
b->5
c->50
"""
| 28.547826
| 102
| 0.576911
|
c62d2d6592920dd14408e915f3deb9dc9b0308f3
| 2,060
|
py
|
Python
|
maml_nonexclusive/maml_classification/data/miniImagenet/proc_images.py
|
kiss2u/google-research
|
2cd66234656f9e2f4218ed90a2d8aa9cf3139093
|
[
"Apache-2.0"
] | 7
|
2020-03-15T12:14:07.000Z
|
2021-12-01T07:01:09.000Z
|
maml_nonexclusive/maml_classification/data/miniImagenet/proc_images.py
|
JustinDurham/google-research
|
9049acf9246c1b75170f0c6757e62a8f619a9db6
|
[
"Apache-2.0"
] | 22
|
2020-07-08T13:10:46.000Z
|
2022-03-12T00:40:10.000Z
|
maml_nonexclusive/maml_classification/data/miniImagenet/proc_images.py
|
JustinDurham/google-research
|
9049acf9246c1b75170f0c6757e62a8f619a9db6
|
[
"Apache-2.0"
] | 4
|
2020-06-15T03:06:53.000Z
|
2021-08-06T16:38:33.000Z
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for converting from csv file datafiles to a directory for each image (which is how it is loaded by MAML code)
Acquire miniImagenet from Ravi & Larochelle '17, along with the train, val, and test csv files. Put the
csv files in the miniImagenet directory and put the images in the directory 'miniImagenet/images/'.
Then run this script from the miniImagenet directory:
cd data/miniImagenet/
python proc_images.py
"""
from __future__ import print_function
import csv
import glob
import os
from PIL import Image
path_to_images = 'mini_imagenet/'
all_images = glob.glob(path_to_images + '*')
# Resize images
for i, image_file in enumerate(all_images):
im = Image.open(image_file)
im = im.resize((84, 84), resample=Image.LANCZOS)
im.save(image_file)
if i % 500 == 0:
print(i)
# Put in correct directory
for datatype in ['train', 'val', 'test']:
os.system('mkdir ' + datatype)
with open(datatype + '.csv', 'r') as f:
reader = csv.reader(f, delimiter=',')
last_label = ''
for i, row in enumerate(reader):
if i == 0: # skip the headers
continue
label = row[1]
image_name = row[0]
if label != last_label:
cur_dir = datatype + '/' + label + '/'
os.system('mkdir ' + cur_dir)
last_label = label
os.system('mv mini_imagenet/' + image_name + ' ' + cur_dir)
| 33.225806
| 116
| 0.668932
|
454d98de87535edd7289ee422cf749b4017d988a
| 6,338
|
py
|
Python
|
plugins/games/hangman/hangman.py
|
wekko/bov
|
b446b267b3ad2881545a14440f4f01d300f46964
|
[
"MIT"
] | null | null | null |
plugins/games/hangman/hangman.py
|
wekko/bov
|
b446b267b3ad2881545a14440f4f01d300f46964
|
[
"MIT"
] | null | null | null |
plugins/games/hangman/hangman.py
|
wekko/bov
|
b446b267b3ad2881545a14440f4f01d300f46964
|
[
"MIT"
] | null | null | null |
from handler.base_plugin import BasePlugin
from random import choice
import json
class HangmanPlugin(BasePlugin):
__slots__ = ("commands_start", "save_data", "commands_stop", "commands_attempt", "prefixes", "games",
"words")
def __init__(self, commands_start=None, commands_stop=None, commands_attempt=None,
prefixes=(), words=None, save_data=False):
"""Game "Hangman"."""
super().__init__()
self.save_data = save_data
self.prefixes = prefixes
self.commands_start = commands_start if commands_start else ["виселица"]
self.commands_attempt = commands_attempt if commands_attempt else ["буква", "б"]
self.commands_stop = commands_stop if commands_stop else ["стоп"]
self.games = {}
self.words = words if words else ("любовь", "ненависть", "страсть", "жизнь", "счастье", "крот", "бегемот")
games_file = self.get_path("games.json")
try:
with open(games_file, "r") as outfile:
data = json.load(outfile)
for k, v in data.items():
self.games[int(k)] = v
except json.decoder.JSONDecodeError:
self.bot.logger.error("Failed to load games for \"Hangman\"")
except FileNotFoundError:
pass
for c in (self.commands_start, self.commands_attempt, self.commands_stop):
c = sorted(c, key=len, reverse=True)
self.description = [f"Виселица",
f"Игра \"Виселица\" - игроки вводят по букве и стараются угадать слово."
"Если не получится отгадать за 8 попыток - вы проиграете!",
f"{self.prefixes[0]}{self.commands_start[0]} - начать игру.",
f"{self.prefixes[0]}{self.commands_attempt[0]} [буква] - назвать букву [буква].",
f"{self.prefixes[0]}{self.commands_stop[0]} - остановить игру."]
def stop(self):
if not self.save_data:
return
games_file = self.get_path("games.json")
with open(games_file, "w") as outfile:
json.dump(self.games, outfile)
async def check_message(self, msg):
if msg.is_out:
return False
check_text = ""
for p in self.prefixes:
if msg.text.startswith(p):
check_text = msg.text.replace(p, "", 1)
break
if any(check_text.startswith(v.lower()) for v in self.commands_start):
msg.meta["_command"] = "start"
return True
if self in msg.occupied_by and any(check_text.startswith(v.lower()) for v in self.commands_stop):
msg.meta["_command"] = "stop"
return True
if self in msg.occupied_by:
for v in self.commands_attempt:
if check_text.startswith(v + " "):
msg.meta["_command"] = "attempt"
msg.meta["_letter"] = check_text[len(v) + 1:]
return True
return False
@staticmethod
def describe_game(current):
text = ["🙊 Слово: "]
for i in current[0]:
text.append(i if i in current[1] else "_")
text.append(" ")
text.pop(-1)
text.append("\n🙌 Открытые буквы: ")
for i in current[1]:
if i in current[0]:
text.append(i)
text.append(" ")
text.pop(-1)
text.append(f"\n❤ Осталось жизней: {current[2]}")
return " ".join(text)
async def global_before_message_checks(self, msg):
if self.games.get(msg.peer_id, False) is False:
return
msg.occupied_by.append(self)
return
# word, opened, lives
async def process_message(self, msg):
if msg.meta["_command"] == "stop":
current = self.games.get(msg.peer_id, [])
if current:
del self.games[msg.peer_id]
return await msg.answer("Ваша партия в \"виселицу\" закончена. Слово я вам не назову 😏")
return
if msg.meta["_command"] == "start":
current = self.games.get(msg.peer_id, [])
if current:
return await msg.answer(self.describe_game(self.games[msg.peer_id]))
if msg.occupied_by:
try:
reason = " Вы заняты плагином: " + msg.occupied_by[0].description[0]
except (AttributeError, IndexError):
reason = ""
return await msg.answer("Вы не можете сейчас начать игру!" + reason )
self.games[msg.peer_id] = [choice(self.words), "", 8]
tip = f"\n\n{self.prefixes[0]}{self.commands_attempt[0]} - назвать букву, " \
f"{self.prefixes[0]}{self.commands_stop[0]} - остановить игру"
return await msg.answer(self.describe_game(self.games[msg.peer_id]) + tip)
if msg.meta["_command"] == "attempt":
current = self.games.get(msg.peer_id, [])
if not current:
return
letter = msg.meta.get("_letter", "")
if len(letter) != 1 or not letter.isalpha():
return await msg.answer("Введите только одну букву!")
if letter in current[1]:
return await msg.answer("Вы уже вводили эту букву!")
current[1] += letter
if letter not in current[0]:
if current[2] == 1:
if msg.peer_id in self.games:
del self.games[msg.peer_id]
return await msg.answer("Вы проиграли! Слово я вам не назову 😏")
current[2] -= 1
return await msg.answer(f"Вы не угадали! У вас осталось {current[2]} жизней.\n"+
self.describe_game(self.games[msg.peer_id]))
for i in current[0]:
if i not in current[1]:
return await msg.answer(f"Верно! Продолжайте в том же духе!\n" +
self.describe_game(self.games[msg.peer_id]))
if msg.peer_id in self.games:
del self.games[msg.peer_id]
return await msg.answer(f"🎉 Верно! Ура\n👉 Слово: " + current[0])
| 33.712766
| 114
| 0.541022
|
78d137c95ead9f49e05a8d3c52e2191775d791be
| 2,043
|
py
|
Python
|
homeassistant/components/mochad/__init__.py
|
alindeman/home-assistant
|
b274b10f3874c196f0db8f9cfa5f47eb756d1f8e
|
[
"Apache-2.0"
] | 4
|
2019-07-03T22:36:57.000Z
|
2019-08-10T15:33:25.000Z
|
homeassistant/components/mochad/__init__.py
|
alindeman/home-assistant
|
b274b10f3874c196f0db8f9cfa5f47eb756d1f8e
|
[
"Apache-2.0"
] | 7
|
2019-08-23T05:26:02.000Z
|
2022-03-11T23:57:18.000Z
|
homeassistant/components/mochad/__init__.py
|
alindeman/home-assistant
|
b274b10f3874c196f0db8f9cfa5f47eb756d1f8e
|
[
"Apache-2.0"
] | 3
|
2019-04-28T16:35:45.000Z
|
2020-05-28T15:21:59.000Z
|
"""Support for CM15A/CM19A X10 Controller using mochad daemon."""
import logging
import threading
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
from homeassistant.const import (CONF_HOST, CONF_PORT)
_LOGGER = logging.getLogger(__name__)
CONTROLLER = None
CONF_COMM_TYPE = 'comm_type'
DOMAIN = 'mochad'
REQ_LOCK = threading.Lock()
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_HOST, default='localhost'): cv.string,
vol.Optional(CONF_PORT, default=1099): cv.port,
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the mochad component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
from pymochad import exceptions
global CONTROLLER
try:
CONTROLLER = MochadCtrl(host, port)
except exceptions.ConfigurationError:
_LOGGER.exception()
return False
def stop_mochad(event):
"""Stop the Mochad service."""
CONTROLLER.disconnect()
def start_mochad(event):
"""Start the Mochad service."""
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_mochad)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_mochad)
return True
class MochadCtrl:
"""Mochad controller."""
def __init__(self, host, port):
"""Initialize a PyMochad controller."""
super(MochadCtrl, self).__init__()
self._host = host
self._port = port
from pymochad import controller
self.ctrl = controller.PyMochad(server=self._host, port=self._port)
@property
def host(self):
"""Return the server where mochad is running."""
return self._host
@property
def port(self):
"""Return the port mochad is running on."""
return self._port
def disconnect(self):
"""Close the connection to the mochad socket."""
self.ctrl.socket.close()
| 24.614458
| 75
| 0.675477
|
baea0ec88b425201e05422d10add67738ab37bff
| 19,874
|
py
|
Python
|
custom_model_runner/datarobot_drum/drum/typeschema_validation.py
|
andreakropp/datarobot-user-models
|
423ab8c703a545491ad6013a0b7efa3119e2c0fc
|
[
"Apache-2.0"
] | null | null | null |
custom_model_runner/datarobot_drum/drum/typeschema_validation.py
|
andreakropp/datarobot-user-models
|
423ab8c703a545491ad6013a0b7efa3119e2c0fc
|
[
"Apache-2.0"
] | null | null | null |
custom_model_runner/datarobot_drum/drum/typeschema_validation.py
|
andreakropp/datarobot-user-models
|
423ab8c703a545491ad6013a0b7efa3119e2c0fc
|
[
"Apache-2.0"
] | null | null | null |
import os
from abc import ABC, abstractmethod
import base64
import logging
from enum import auto
from enum import Enum as PythonNativeEnum
from io import BytesIO
import operator
from typing import List, Type, TypeVar, Union
from PIL import Image
from strictyaml import Map, Optional, Seq, Int, Enum, Str, YAML
import numpy as np
import pandas as pd
from datarobot_drum.drum.exceptions import DrumSchemaValidationException
logger = logging.getLogger("drum." + __name__)
T = TypeVar("T")
class BaseEnum(PythonNativeEnum):
def __str__(self) -> str:
return self.name
@classmethod
def from_string(cls: Type[T], enum_str: str) -> T:
for el in list(cls):
if str(el) == enum_str:
return el
raise ValueError(f"No enum value matches: {enum_str!r}")
class RequirementTypes(BaseEnum):
INPUT_REQUIREMENTS = auto()
OUTPUT_REQUIREMENTS = auto()
def __str__(self) -> str:
return self.name.lower()
class Conditions(BaseEnum):
"""All acceptable values for the 'condition' field."""
EQUALS = auto()
IN = auto()
NOT_EQUALS = auto()
NOT_IN = auto()
GREATER_THAN = auto()
LESS_THAN = auto()
NOT_GREATER_THAN = auto()
NOT_LESS_THAN = auto()
@classmethod
def non_numeric(cls) -> List["Conditions"]:
return [
cls.EQUALS,
cls.NOT_EQUALS,
cls.IN,
cls.NOT_IN,
]
@classmethod
def single_value_conditions(cls) -> List["Conditions"]:
return [
cls.EQUALS,
cls.NOT_EQUALS,
cls.GREATER_THAN,
cls.NOT_GREATER_THAN,
cls.LESS_THAN,
cls.NOT_LESS_THAN,
]
class Values(BaseEnum):
"""All acceptable values for the 'value' field. """
NUM = auto()
TXT = auto()
CAT = auto()
IMG = auto()
DATE = auto()
DATE_DURATION = auto()
COUNT_DICT = auto()
GEO = auto()
FORBIDDEN = auto()
SUPPORTED = auto()
REQUIRED = auto()
NEVER = auto()
DYNAMIC = auto()
ALWAYS = auto()
IDENTITY = auto()
@classmethod
def data_values(cls) -> List["Values"]:
return [
cls.NUM,
cls.TXT,
cls.IMG,
cls.DATE,
cls.CAT,
cls.DATE_DURATION,
cls.COUNT_DICT,
cls.GEO,
]
@classmethod
def input_values(cls) -> List["Values"]:
return [cls.FORBIDDEN, cls.SUPPORTED, cls.REQUIRED]
@classmethod
def output_values(cls) -> List["Values"]:
return [cls.NEVER, cls.DYNAMIC, cls.ALWAYS, cls.IDENTITY]
class Fields(BaseEnum):
DATA_TYPES = auto()
SPARSE = auto()
NUMBER_OF_COLUMNS = auto()
CONTAINS_MISSING = auto()
def __str__(self) -> str:
return self.name.lower()
def conditions(self) -> List[Conditions]:
conditions = {
Fields.SPARSE: [Conditions.EQUALS],
Fields.DATA_TYPES: Conditions.non_numeric(),
Fields.NUMBER_OF_COLUMNS: list(Conditions),
Fields.CONTAINS_MISSING: [Conditions.EQUALS],
}
return conditions[self]
def input_values(self) -> List[Values]:
values = {
Fields.DATA_TYPES: Values.data_values(),
Fields.SPARSE: Values.input_values(),
Fields.NUMBER_OF_COLUMNS: [],
Fields.CONTAINS_MISSING: [Values.FORBIDDEN, Values.SUPPORTED],
}
return values[self]
def output_values(self) -> List[Values]:
values = {
Fields.DATA_TYPES: Values.data_values(),
Fields.SPARSE: Values.output_values(),
Fields.NUMBER_OF_COLUMNS: [],
Fields.CONTAINS_MISSING: [Values.NEVER, Values.DYNAMIC],
}
return values[self]
def to_requirements(self, requirement_type: RequirementTypes) -> Map:
types = {
RequirementTypes.INPUT_REQUIREMENTS: _get_mapping(self, self.input_values()),
RequirementTypes.OUTPUT_REQUIREMENTS: _get_mapping(self, self.output_values()),
}
return types[requirement_type]
def to_validator_class(self) -> Type["BaseValidator"]:
classes = {
Fields.DATA_TYPES: DataTypes,
Fields.SPARSE: Sparsity,
Fields.NUMBER_OF_COLUMNS: NumColumns,
Fields.CONTAINS_MISSING: ContainsMissing,
}
return classes[self]
def _get_mapping(field: Fields, values: List[Values]) -> Map:
base_value_enum = Enum([str(el) for el in values])
if field == Fields.DATA_TYPES:
value_enum = base_value_enum | Seq(base_value_enum)
elif field == Fields.NUMBER_OF_COLUMNS:
value_enum = Int() | Seq(Int())
else:
value_enum = base_value_enum
conditions = Enum([str(el) for el in field.conditions()])
return Map({"field": Enum(str(field)), "condition": conditions, "value": value_enum})
def is_sparse(dataframe: pd.DataFrame) -> bool:
return dataframe.dtypes.apply(pd.api.types.is_sparse).any()
class BaseValidator(ABC):
def __init__(self, condition: Conditions, values: List[Union[str, int]]):
if len(values) > 1 and condition in Conditions.single_value_conditions():
raise DrumSchemaValidationException(
f"{condition} only accepts a single value for: {values}"
)
def convert_value(value):
if isinstance(value, int):
return value
return Values.from_string(value)
self.condition = condition
self.values = [convert_value(value) for value in values]
@abstractmethod
def validate(self, dataframe: pd.DataFrame):
raise NotImplementedError
class DataTypes(BaseValidator):
"""Validation related to data types. This is common between input and output."""
def __init__(self, condition, values):
# We currently do not support DRUM validation for these values, but they are supported in DataRobot
self._SKIP_VALIDATION = {
Values.DATE_DURATION.name,
Values.COUNT_DICT.name,
Values.GEO.name,
}
values = list(set(values) - self._SKIP_VALIDATION)
if len(values) == 0:
logger.info(
f"Values ({self.list_str(values)}) specified do not have runtime validation in DRUM, only within DataRobot."
)
super(DataTypes, self).__init__(condition, values)
@staticmethod
def list_str(l):
"""f-strings do not do a great job dealing with lists of objects. The __str__ method isn't called on the
contained objects, and the result is in []. This provides the nicely formatted representation we want
in the error message"""
return ", ".join(sorted([str(x) for x in l]))
@staticmethod
def is_text(x):
"""
Decide if a pandas series is text, using a very simple heuristic:
1. Count the number of elements in the series that contain 1 or more whitespace character
2. If >75% of the elements have whitespace, and either there are more than 60 unique values or
more than 5% of values are unique then the Series is considered to be text
Parameters
----------
x: pd.Series - Series to be analyzed for text
Returns
-------
boolean: True for is text, False for not text
"""
MIN_WHITESPACE_ROWS = 0.75 # percent
MIN_UNIQUE_VALUES = 0.05 # percent
if (
pd.api.types.is_string_dtype(x)
and pd.api.types.infer_dtype(x) != "boolean"
and pd.api.types.infer_dtype(x) != "bytes"
):
pct_rows_with_whitespace = (x.str.count(r"\s") > 0).sum() / x.shape[0]
unique = x.nunique()
pct_unique_values = unique / x.shape[0]
return pct_rows_with_whitespace >= MIN_WHITESPACE_ROWS and (
pct_unique_values >= MIN_UNIQUE_VALUES or unique >= 60
)
return False
@staticmethod
def is_img(x):
def convert(data):
return Image.open(BytesIO(base64.b64decode(data)))
try:
x.apply(convert)
return True
except:
return False
@staticmethod
def number_of_text_columns(X):
return len(X.columns[list(X.apply(DataTypes.is_text, result_type="expand"))])
@staticmethod
def number_of_img_columns(X):
return len(X.columns[list(X.apply(DataTypes.is_img, result_type="expand"))])
def validate(self, dataframe):
"""Perform validation of the dataframe against the supplied specification."""
if len(self.values) == 0:
logger.info("Skipping type validation")
return []
types = dict()
if is_sparse(dataframe):
# only numeric can be a csr or matrix market sparse matrix
types[Values.NUM] = True
types[Values.TXT] = False
types[Values.IMG] = False
types[Values.CAT] = False
types[Values.DATE] = False
else:
num_bool_columns = dataframe.select_dtypes("boolean").shape[1]
num_txt_columns = self.number_of_text_columns(dataframe)
num_img_columns = self.number_of_img_columns(dataframe)
num_obj_columns = dataframe.select_dtypes("O").shape[1]
# Note that boolean values will be sent as numeric in DataRobot
if num_bool_columns > 0:
logger.warning(
"Boolean values were present in the data, which are passed as numeric input in DataRobot. You may need to convert boolean values to integers/floats for your model"
)
types[Values.NUM] = (
dataframe.select_dtypes(np.number).shape[1] > 0 or num_bool_columns > 0
)
types[Values.TXT] = num_txt_columns > 0
types[Values.IMG] = num_img_columns > 0
types[Values.CAT] = num_obj_columns - (num_txt_columns + num_img_columns)
types[Values.DATE] = dataframe.select_dtypes("datetime").shape[1] > 0
types_present = [k for k, v in types.items() if v]
base_error = f"Datatypes incorrect. Data has types: {DataTypes.list_str(types_present)}"
errors = {
Conditions.EQUALS: f"{base_error}, but expected types to exactly match: {DataTypes.list_str(self.values)}",
Conditions.NOT_EQUALS: f"{base_error}, but expected {self.values[0]} to NOT be present.",
Conditions.IN: f"{base_error}, which includes values that are not in {DataTypes.list_str(self.values)}.",
Conditions.NOT_IN: f"{base_error}, but expected no types in: {DataTypes.list_str(self.values)} to be present",
}
tests = {
Conditions.EQUALS: lambda data_types: set(self.values) == set(data_types),
Conditions.NOT_EQUALS: lambda data_types: self.values[0] not in data_types,
Conditions.IN: lambda data_types: set(data_types).issubset(set(self.values)),
Conditions.NOT_IN: lambda data_types: all(el not in self.values for el in data_types),
}
if not tests[self.condition](types_present):
return [errors[self.condition]]
return []
class Sparsity(BaseValidator):
def __init__(self, condition, values):
super(Sparsity, self).__init__(condition, values)
def validate(self, dataframe):
_is_sparse = is_sparse(dataframe)
sparse_input_allowed_values = [Values.SUPPORTED, Values.REQUIRED]
sparse_output_allowed_values = [Values.DYNAMIC, Values.ALWAYS]
dense_input_allowed_values = [Values.FORBIDDEN, Values.SUPPORTED]
dense_output_allowed_values = [Values.NEVER, Values.DYNAMIC, Values.IDENTITY]
value = self.values[0]
if value in Values.input_values():
io_type = "input"
else:
io_type = "output"
if _is_sparse and value not in sparse_output_allowed_values + sparse_input_allowed_values:
return [
f"Sparse {io_type} data found, however value is set to {value}, expecting dense"
]
elif (
not _is_sparse and value not in dense_output_allowed_values + dense_input_allowed_values
):
return [
f"Dense {io_type} data found, however value is set to {value}, expecting sparse"
]
else:
return []
class NumColumns(BaseValidator):
def __init__(self, condition, values):
super(NumColumns, self).__init__(condition, values)
if not all([v >= 0 for v in values]):
raise ValueError("The value for number of columns can not be negative")
if 0 in values:
if condition not in [
Conditions.NOT_IN,
Conditions.NOT_EQUALS,
Conditions.NOT_LESS_THAN,
Conditions.GREATER_THAN,
]:
raise ValueError(f"Value of 0 is not supported for {condition}")
def validate(self, dataframe):
n_columns = len(dataframe.columns)
conditions_map = {
Conditions.EQUALS: operator.eq,
Conditions.NOT_EQUALS: operator.ne,
Conditions.IN: lambda a, b: a in b,
Conditions.NOT_IN: lambda a, b: a not in b,
Conditions.GREATER_THAN: operator.gt,
Conditions.NOT_GREATER_THAN: operator.le,
Conditions.LESS_THAN: operator.lt,
Conditions.NOT_LESS_THAN: operator.ge,
}
test_value = self.values
if self.condition in Conditions.single_value_conditions():
test_value = self.values[0]
passes = conditions_map[self.condition](n_columns, test_value)
if not passes:
return [
f"Incorrect number of columns. {n_columns} received. However, the schema dictates that number of columns should be {self.condition} {test_value}"
]
return []
class ContainsMissing(BaseValidator):
def __init__(self, condition, values):
super(ContainsMissing, self).__init__(condition, values)
def validate(self, dataframe):
missing_output_disallowed = Values.NEVER
missing_input_disallowed = Values.FORBIDDEN
if is_sparse(dataframe):
# sparse but not NA...
any_missing = False
else:
any_missing = dataframe.isna().any().any()
value = self.values[0]
if value in Values.input_values():
io_type = "Input"
else:
io_type = "Output"
if any_missing and value in [missing_output_disallowed, missing_input_disallowed]:
return [
f"{io_type} contains missing values, which the supplied task schema does not allow"
]
return []
def get_type_schema_yaml_validator() -> Map:
seq_validator = Seq(
Map(
{
"field": Enum([str(el) for el in Fields]),
"condition": Str(),
"value": Str() | Seq(Str()),
}
)
)
return Map(
{
Optional(str(RequirementTypes.INPUT_REQUIREMENTS)): seq_validator,
Optional(str(RequirementTypes.OUTPUT_REQUIREMENTS)): seq_validator,
}
)
def revalidate_typeschema(type_schema: YAML):
"""THIS MUTATES `type_schema`! calling the function would change {"number_of_columns": {"value": "1"}}
to {"number_of_columns": {"value": 1}}
Perform validation on each dictionary in the both lists. This is required due to limitations in strictyaml. See
the strictyaml documentation on revalidation for details. This checks that the provided values
are valid together while the initial validation only checks that the map is in the right general format."""
for requriment_type in RequirementTypes:
for req in type_schema.get(str(requriment_type), []):
field = Fields.from_string(req.data["field"])
req.revalidate(field.to_requirements(requriment_type))
class SchemaValidator:
"""
SchemaValidator transforms the typeschema definition into usable validation objects to be used to verify the data
meets the schema requirements. Two methods, validate_inputs and validate_outputs are provided to perform the
actual validation on the respective dataframes.
"""
_DEFAULT_TYPE_SCHEMA_CODEDIR_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "resource", "default_typeschema")
)
assert os.path.exists(_DEFAULT_TYPE_SCHEMA_CODEDIR_PATH)
def __init__(
self, type_schema: dict, strict=True, use_default_type_schema=False, verbose=False
):
"""
Parameters
----------
type_schema: dict
YAML type schema converted to dict
strict: bool
Whether to error if data does not match type schema
use_default_type_schema: bool
Whether to use the default type schema which matches DataRobot's defaults when no type schema is present.
type_schema must not be provided for the default to be used.
verbose: bool
Whether to print messages to the user
"""
self._using_default_type_schema = False
if not type_schema and use_default_type_schema:
from datarobot_drum.drum.common import (
read_model_metadata_yaml,
) # local import to prevent cyclic dependency
type_schema = read_model_metadata_yaml(
SchemaValidator._DEFAULT_TYPE_SCHEMA_CODEDIR_PATH
)["typeSchema"]
self._using_default_type_schema = True
self._input_validators = [
self._get_validator(schema) for schema in type_schema.get("input_requirements", [])
]
self._output_validators = [
self._get_validator(schema) for schema in type_schema.get("output_requirements", [])
]
self.strict = strict
self._verbose = verbose
def _get_validator(self, schema):
field = Fields.from_string(schema["field"])
condition = Conditions.from_string(schema["condition"])
values = schema["value"]
if not isinstance(values, list):
values = [values]
return field.to_validator_class()(condition, values)
def validate_inputs(self, dataframe):
# Validate that the input values are of the type and shape the user specified in the schema
return self._run_validate(dataframe, self._input_validators, "input")
def validate_outputs(self, dataframe):
# Validate that the output values are of the type and shape the user specified in the schema
return self._run_validate(dataframe, self._output_validators, "output")
def _run_validate(self, dataframe, validators, step_label):
errors = []
for validator in validators:
errors.extend(validator.validate(dataframe))
if len(validators) == 0:
if self._verbose:
logger.info("No type schema for {} provided.".format(step_label))
return True
elif len(errors) == 0:
if self._verbose:
logger.info("Schema validation completed for task {}.".format(step_label))
return True
else:
logger.error(
"Schema validation found mismatch between {} dataset and the supplied schema".format(
step_label
)
)
for error in errors:
logger.error(error)
if self.strict:
raise DrumSchemaValidationException(
"schema validation failed for {}:\n {}".format(step_label, errors)
)
return False
| 35.237589
| 184
| 0.618798
|
b457bfd784103466aed0e58f5f116a2e60c4b172
| 8,433
|
py
|
Python
|
pyalgotrade/bitstamp/livefeed.py
|
richwu/pyalgotrade
|
52f801cb5280a7037f33f953e3abff515c789c39
|
[
"Apache-2.0"
] | 2
|
2016-12-10T05:39:49.000Z
|
2016-12-11T04:34:18.000Z
|
pyalgotrade/bitstamp/livefeed.py
|
richwu/pyalgotrade
|
52f801cb5280a7037f33f953e3abff515c789c39
|
[
"Apache-2.0"
] | null | null | null |
pyalgotrade/bitstamp/livefeed.py
|
richwu/pyalgotrade
|
52f801cb5280a7037f33f953e3abff515c789c39
|
[
"Apache-2.0"
] | 1
|
2020-12-10T07:06:48.000Z
|
2020-12-10T07:06:48.000Z
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import datetime
import time
import Queue
from pyalgotrade import bar
from pyalgotrade import dataseries
from pyalgotrade import barfeed
from pyalgotrade import observer
from pyalgotrade.bitstamp import common
from pyalgotrade.bitstamp import wsclient
class TradeBar(bar.Bar):
# Optimization to reduce memory footprint.
__slots__ = ('__dateTime', '__tradeId', '__price', '__amount')
def __init__(self, dateTime, trade):
self.__dateTime = dateTime
self.__tradeId = trade.getId()
self.__price = trade.getPrice()
self.__amount = trade.getAmount()
def __setstate__(self, state):
(self.__dateTime, self.__tradeId, self.__price, self.__amount) = state
def __getstate__(self):
return (self.__dateTime, self.__tradeId, self.__price, self.__amount)
def setUseAdjustedValue(self, useAdjusted):
if useAdjusted:
raise Exception("Adjusted close is not available")
def getTradeId(self):
return self.__tradeId
def getFrequency(self):
return bar.Frequency.TRADE
def getDateTime(self):
return self.__dateTime
def getOpen(self, adjusted=False):
return self.__price
def getHigh(self, adjusted=False):
return self.__price
def getLow(self, adjusted=False):
return self.__price
def getClose(self, adjusted=False):
return self.__price
def getVolume(self):
return self.__amount
def getAdjClose(self):
return None
def getTypicalPrice(self):
return self.__price
def getPrice(self):
return self.__price
def getUseAdjValue(self):
return False
class LiveTradeFeed(barfeed.BaseBarFeed):
"""A real-time BarFeed that builds bars from live trades.
:param maxLen: The maximum number of values that the :class:`pyalgotrade.dataseries.bards.BarDataSeries` will hold.
Once a bounded length is full, when new items are added, a corresponding number of items are discarded
from the opposite end.
:type maxLen: int.
.. note::
Note that a Bar will be created for every trade, so open, high, low and close values will all be the same.
"""
QUEUE_TIMEOUT = 0.01
def __init__(self, maxLen=dataseries.DEFAULT_MAX_LEN):
if not isinstance(maxLen, int):
raise Exception("Invalid type for maxLen parameter")
barfeed.BaseBarFeed.__init__(self, bar.Frequency.TRADE, maxLen)
self.__barDicts = []
self.registerInstrument(common.btc_symbol)
self.__prevTradeDateTime = None
self.__thread = None
self.__initializationOk = None
self.__enableReconnection = True
self.__stopped = False
self.__orderBookUpdateEvent = observer.Event()
# Factory method for testing purposes.
def buildWebSocketClientThread(self):
return wsclient.WebSocketClientThread()
def getCurrentDateTime(self):
return wsclient.get_current_datetime()
def enableReconection(self, enableReconnection):
self.__enableReconnection = enableReconnection
def __initializeClient(self):
self.__initializationOk = None
common.logger.info("Initializing websocket client.")
try:
# Start the thread that runs the client.
self.__thread = self.buildWebSocketClientThread()
self.__thread.start()
except Exception, e:
self.__initializationOk = False
common.logger.error("Error connecting : %s" % str(e))
# Wait for initialization to complete.
while self.__initializationOk is None and self.__thread.is_alive():
self.__dispatchImpl([wsclient.WebSocketClient.ON_CONNECTED])
if self.__initializationOk:
common.logger.info("Initialization ok.")
else:
common.logger.error("Initialization failed.")
return self.__initializationOk
def __onConnected(self):
self.__initializationOk = True
def __onDisconnected(self):
if self.__enableReconnection:
initialized = False
while not self.__stopped and not initialized:
common.logger.info("Reconnecting")
initialized = self.__initializeClient()
if not initialized:
time.sleep(5)
else:
self.__stopped = True
def __dispatchImpl(self, eventFilter):
ret = False
try:
eventType, eventData = self.__thread.getQueue().get(True, LiveTradeFeed.QUEUE_TIMEOUT)
if eventFilter is not None and eventType not in eventFilter:
return False
ret = True
if eventType == wsclient.WebSocketClient.ON_TRADE:
self.__onTrade(eventData)
elif eventType == wsclient.WebSocketClient.ON_ORDER_BOOK_UPDATE:
self.__orderBookUpdateEvent.emit(eventData)
elif eventType == wsclient.WebSocketClient.ON_CONNECTED:
self.__onConnected()
elif eventType == wsclient.WebSocketClient.ON_DISCONNECTED:
self.__onDisconnected()
else:
ret = False
common.logger.error("Invalid event received to dispatch: %s - %s" % (eventType, eventData))
except Queue.Empty:
pass
return ret
# Bar datetimes should not duplicate. In case trade object datetimes conflict, we just move one slightly forward.
def __getTradeDateTime(self, trade):
ret = trade.getDateTime()
if ret == self.__prevTradeDateTime:
ret += datetime.timedelta(microseconds=1)
self.__prevTradeDateTime = ret
return ret
def __onTrade(self, trade):
# Build a bar for each trade.
barDict = {
common.btc_symbol: TradeBar(self.__getTradeDateTime(trade), trade)
}
self.__barDicts.append(barDict)
def barsHaveAdjClose(self):
return False
def getNextBars(self):
ret = None
if len(self.__barDicts):
ret = bar.Bars(self.__barDicts.pop(0))
return ret
def peekDateTime(self):
# Return None since this is a realtime subject.
return None
# This may raise.
def start(self):
if self.__thread is not None:
raise Exception("Already running")
elif not self.__initializeClient():
self.__stopped = True
raise Exception("Initialization failed")
def dispatch(self):
# Note that we may return True even if we didn't dispatch any Bar
# event.
ret = False
if self.__dispatchImpl(None):
ret = True
if barfeed.BaseBarFeed.dispatch(self):
ret = True
return ret
# This should not raise.
def stop(self):
try:
self.__stopped = True
if self.__thread is not None and self.__thread.is_alive():
common.logger.info("Shutting down websocket client.")
self.__thread.stop()
except Exception, e:
common.logger.error("Error shutting down client: %s" % (str(e)))
# This should not raise.
def join(self):
if self.__thread is not None:
self.__thread.join()
def eof(self):
return self.__stopped
def getOrderBookUpdateEvent(self):
"""
Returns the event that will be emitted when the orderbook gets updated.
Eventh handlers should receive one parameter:
1. A :class:`pyalgotrade.bitstamp.wsclient.OrderBookUpdate` instance.
:rtype: :class:`pyalgotrade.observer.Event`.
"""
return self.__orderBookUpdateEvent
| 32.187023
| 119
| 0.649591
|
ca0066544931788533070484ec201f973a6a9aff
| 3,416
|
py
|
Python
|
scripts/plotting/hgdp_1kg_tob_wgs_pop_pca/hgdp_1kg_tob_wgs_plot_pca.py
|
populationgenomics/ancestry
|
faf6fd4bc3a1f8b2a2adb7e59cf584d4bfdf79e6
|
[
"MIT"
] | null | null | null |
scripts/plotting/hgdp_1kg_tob_wgs_pop_pca/hgdp_1kg_tob_wgs_plot_pca.py
|
populationgenomics/ancestry
|
faf6fd4bc3a1f8b2a2adb7e59cf584d4bfdf79e6
|
[
"MIT"
] | 21
|
2021-03-09T06:35:59.000Z
|
2022-02-21T22:56:15.000Z
|
scripts/plotting/hgdp_1kg_tob_wgs_pop_pca/hgdp_1kg_tob_wgs_plot_pca.py
|
populationgenomics/ancestry
|
faf6fd4bc3a1f8b2a2adb7e59cf584d4bfdf79e6
|
[
"MIT"
] | null | null | null |
"""Create PCA plots for HGDP/1kG + TOB-WGS samples"""
from bokeh.models import CategoricalColorMapper
from bokeh.palettes import turbo # pylint: disable=no-name-in-module
import pandas as pd
import hail as hl
from hail.plot import show
import click
HGDP1KG_TOBWGS = (
'gs://cpg-tob-wgs-analysis/1kg_hgdp_tobwgs_pca/v1/'
'hgdp1kg_tobwgs_joined_all_samples.mt'
)
SCORES = 'gs://cpg-tob-wgs-analysis/1kg_hgdp_tobwgs_pca/v1/scores.ht/'
EIGENVALUES = 'gs://cpg-tob-wgs-analysis/1kg_hgdp_tobwgs_pca/v1/eigenvalues.csv'
@click.command()
@click.option('--number-of-pcs', 'number_of_pcs', help='Number of PCS', default=19)
def main(number_of_pcs: int): # pylint: disable=too-many-locals
"""Query script entry point."""
hl.init()
mt = hl.read_matrix_table(HGDP1KG_TOBWGS)
scores = hl.read_table(SCORES)
mt = mt.annotate_cols(scores=scores[mt.s].scores)
mt = mt.annotate_cols(TOB_WGS=mt.s.contains('TOB'))
# PCA plot must all come from the same object
columns = mt.cols()
pca_scores = columns.scores
labels = columns.TOB_WGS
# get percent variance explained
eigenvalues = pd.read_csv(EIGENVALUES)
eigenvalues.columns = ['eigenvalue']
variance = eigenvalues['eigenvalue'].divide(float(eigenvalues.sum())) * 100
variance = variance.round(2)
print('Making PCA plots labelled by the study ID')
for i in range(0, number_of_pcs):
pc1 = i
pc2 = i + 1
print(f'PC{pc1 + 1} vs PC{pc2 + 1}')
p = hl.plot.scatter(
pca_scores[pc1],
pca_scores[pc2],
label=labels,
title='TOB-WGS',
xlabel='PC' + str(pc1 + 1) + ' (' + str(variance[pc1]) + '%)',
ylabel='PC' + str(pc2 + 1) + ' (' + str(variance[pc2]) + '%)',
)
show(p)
print('Making PCA plots labelled by the continental population')
labels = columns.hgdp_1kg_metadata.population_inference.pop
pops = list(set(labels.collect()))
hover_fields = dict([('s', columns.s)])
for i in range(0, number_of_pcs):
pc1 = i
pc2 = i + 1
print(f'PC{pc1 + 1} vs PC{pc2 + 1}')
p = hl.plot.scatter(
pca_scores[pc1],
pca_scores[pc2],
label=labels,
title='Continental Population',
xlabel='PC' + str(pc1 + 1) + ' (' + str(variance[pc1]) + '%)',
ylabel='PC' + str(pc2 + 1) + ' (' + str(variance[pc2]) + '%)',
collect_all=True,
colors=CategoricalColorMapper(palette=turbo(len(pops)), factors=pops),
hover_fields=hover_fields,
)
show(p)
print('Making PCA plots labelled by the subpopulation')
labels = columns.hgdp_1kg_metadata.labeled_subpop
pops = list(set(labels.collect()))
for i in range(0, number_of_pcs):
pc1 = i
pc2 = i + 1
print(f'PC{pc1 + 1} vs PC{pc2 + 1}')
p = hl.plot.scatter(
pca_scores[pc1],
pca_scores[pc2],
label=labels,
title='Sub-Population',
xlabel='PC' + str(pc1 + 1) + ' (' + str(variance[pc1]) + '%)',
ylabel='PC' + str(pc2 + 1) + ' (' + str(variance[pc2]) + '%)',
collect_all=True,
colors=CategoricalColorMapper(palette=turbo(len(pops)), factors=pops),
)
show(p)
if __name__ == '__main__':
main() # pylint: disable=no-value-for-parameter
| 33.490196
| 83
| 0.595726
|
76e4f7475f53139ee15060984724ca3c2fd5a763
| 1,231
|
py
|
Python
|
dautil/IO/stdio.py
|
ickc/dautil-py
|
9cdd87080ec85774d7386e3cd2f55c2bc6b6aadd
|
[
"BSD-3-Clause"
] | null | null | null |
dautil/IO/stdio.py
|
ickc/dautil-py
|
9cdd87080ec85774d7386e3cd2f55c2bc6b6aadd
|
[
"BSD-3-Clause"
] | null | null | null |
dautil/IO/stdio.py
|
ickc/dautil-py
|
9cdd87080ec85774d7386e3cd2f55c2bc6b6aadd
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
from functools import wraps
def redirect_stdout_stderr(f):
'''a decorator to add keyword stdout, stderr to the function args
if stdout, stderr is not None, redirect the stdout, stderr to
the respective files.
'''
@wraps(f)
def f_decorated(*args, **kwargs):
try:
stdout = kwargs.pop('stdout', None)
if stdout:
stdout_original = sys.stdout
f_out = os.open(stdout, os.O_WRONLY | os.O_CREAT, 0o644)
os.dup2(f_out, sys.stdout.fileno())
stderr = kwargs.pop('stderr', None)
if stderr:
stderr_original = sys.stderr
f_err = os.open(stderr, os.O_WRONLY | os.O_CREAT, 0o644)
os.dup2(f_err, sys.stderr.fileno())
return f(*args, **kwargs)
finally:
if stdout:
sys.stdout.flush() # prevent buffer not saved
os.dup2(stdout_original.fileno(), f_out)
os.close(f_out)
if stderr:
sys.stderr.flush() # prevent buffer not saved
os.dup2(stderr_original.fileno(), f_err)
os.close(f_err)
return f_decorated
| 31.564103
| 72
| 0.552396
|
0355c81e6d4eac7b5003e92163bc29c5a1aec449
| 15,339
|
py
|
Python
|
tests/contrib/pymysql/test_pymysql.py
|
mykytarudenko/new-project
|
e06a912382239739dd3f93b54d545b9506102372
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/contrib/pymysql/test_pymysql.py
|
mykytarudenko/new-project
|
e06a912382239739dd3f93b54d545b9506102372
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/contrib/pymysql/test_pymysql.py
|
mykytarudenko/new-project
|
e06a912382239739dd3f93b54d545b9506102372
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import pymysql
from ddtrace import Pin
from ddtrace.compat import PY2
from ddtrace.compat import stringify
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.pymysql.patch import patch
from ddtrace.contrib.pymysql.patch import unpatch
from tests.opentracer.utils import init_tracer
from tests.utils import TracerTestCase
from tests.utils import assert_dict_issuperset
from tests.utils import assert_is_measured
from ...contrib.config import MYSQL_CONFIG
class PyMySQLCore(object):
"""PyMySQL test case reuses the connection across tests"""
conn = None
DB_INFO = {
"out.host": MYSQL_CONFIG.get("host"),
}
if PY2:
DB_INFO.update({"db.user": MYSQL_CONFIG.get("user"), "db.name": MYSQL_CONFIG.get("database")})
else:
DB_INFO.update(
{
"db.user": stringify(bytes(MYSQL_CONFIG.get("user"), encoding="utf-8")),
"db.name": stringify(bytes(MYSQL_CONFIG.get("database"), encoding="utf-8")),
}
)
def setUp(self):
super(PyMySQLCore, self).setUp()
patch()
def tearDown(self):
super(PyMySQLCore, self).tearDown()
if self.conn and not self.conn._closed:
self.conn.close()
unpatch()
def _get_conn_tracer(self):
# implement me
pass
def test_simple_query(self):
conn, tracer = self._get_conn_tracer()
cursor = conn.cursor()
# PyMySQL returns back the rowcount instead of a cursor
rowcount = cursor.execute("SELECT 1")
assert rowcount == 1
rows = cursor.fetchall()
assert len(rows) == 1
spans = tracer.pop()
assert len(spans) == 1
span = spans[0]
assert_is_measured(span)
assert span.service == "pymysql"
assert span.name == "pymysql.query"
assert span.span_type == "sql"
assert span.error == 0
assert span.get_metric("out.port") == MYSQL_CONFIG.get("port")
meta = {}
meta.update(self.DB_INFO)
assert_dict_issuperset(span.meta, meta)
def test_simple_query_fetchall(self):
with self.override_config("dbapi2", dict(trace_fetch_methods=True)):
conn, tracer = self._get_conn_tracer()
cursor = conn.cursor()
cursor.execute("SELECT 1")
rows = cursor.fetchall()
assert len(rows) == 1
spans = tracer.pop()
assert len(spans) == 2
span = spans[0]
assert_is_measured(span)
assert span.service == "pymysql"
assert span.name == "pymysql.query"
assert span.span_type == "sql"
assert span.error == 0
assert span.get_metric("out.port") == MYSQL_CONFIG.get("port")
meta = {}
meta.update(self.DB_INFO)
assert_dict_issuperset(span.meta, meta)
fetch_span = spans[1]
assert fetch_span.name == "pymysql.query.fetchall"
def test_query_with_several_rows(self):
conn, tracer = self._get_conn_tracer()
cursor = conn.cursor()
query = "SELECT n FROM (SELECT 42 n UNION SELECT 421 UNION SELECT 4210) m"
cursor.execute(query)
rows = cursor.fetchall()
assert len(rows) == 3
spans = tracer.pop()
assert len(spans) == 1
self.assertEqual(spans[0].name, "pymysql.query")
def test_query_with_several_rows_fetchall(self):
with self.override_config("dbapi2", dict(trace_fetch_methods=True)):
conn, tracer = self._get_conn_tracer()
cursor = conn.cursor()
query = "SELECT n FROM (SELECT 42 n UNION SELECT 421 UNION SELECT 4210) m"
cursor.execute(query)
rows = cursor.fetchall()
assert len(rows) == 3
spans = tracer.pop()
assert len(spans) == 2
fetch_span = spans[1]
assert fetch_span.name == "pymysql.query.fetchall"
def test_query_many(self):
# tests that the executemany method is correctly wrapped.
conn, tracer = self._get_conn_tracer()
tracer.enabled = False
cursor = conn.cursor()
cursor.execute(
"""
create table if not exists dummy (
dummy_key VARCHAR(32) PRIMARY KEY,
dummy_value TEXT NOT NULL)"""
)
tracer.enabled = True
stmt = "INSERT INTO dummy (dummy_key, dummy_value) VALUES (%s, %s)"
data = [("foo", "this is foo"), ("bar", "this is bar")]
# PyMySQL `executemany()` returns the rowcount
rowcount = cursor.executemany(stmt, data)
assert rowcount == 2
query = "SELECT dummy_key, dummy_value FROM dummy ORDER BY dummy_key"
cursor.execute(query)
rows = cursor.fetchall()
assert len(rows) == 2
assert rows[0][0] == "bar"
assert rows[0][1] == "this is bar"
assert rows[1][0] == "foo"
assert rows[1][1] == "this is foo"
spans = tracer.pop()
assert len(spans) == 2
cursor.execute("drop table if exists dummy")
def test_query_many_fetchall(self):
with self.override_config("dbapi2", dict(trace_fetch_methods=True)):
# tests that the executemany method is correctly wrapped.
conn, tracer = self._get_conn_tracer()
tracer.enabled = False
cursor = conn.cursor()
cursor.execute(
"""
create table if not exists dummy (
dummy_key VARCHAR(32) PRIMARY KEY,
dummy_value TEXT NOT NULL)"""
)
tracer.enabled = True
stmt = "INSERT INTO dummy (dummy_key, dummy_value) VALUES (%s, %s)"
data = [("foo", "this is foo"), ("bar", "this is bar")]
cursor.executemany(stmt, data)
query = "SELECT dummy_key, dummy_value FROM dummy ORDER BY dummy_key"
cursor.execute(query)
rows = cursor.fetchall()
assert len(rows) == 2
assert rows[0][0] == "bar"
assert rows[0][1] == "this is bar"
assert rows[1][0] == "foo"
assert rows[1][1] == "this is foo"
spans = tracer.pop()
assert len(spans) == 3
cursor.execute("drop table if exists dummy")
fetch_span = spans[2]
assert fetch_span.name == "pymysql.query.fetchall"
def test_query_proc(self):
conn, tracer = self._get_conn_tracer()
# create a procedure
tracer.enabled = False
cursor = conn.cursor()
cursor.execute("DROP PROCEDURE IF EXISTS sp_sum")
cursor.execute(
"""
CREATE PROCEDURE sp_sum (IN p1 INTEGER, IN p2 INTEGER, OUT p3 INTEGER)
BEGIN
SET p3 := p1 + p2;
END;"""
)
tracer.enabled = True
proc = "sp_sum"
data = (40, 2, None)
# spans[len(spans) - 2]
cursor.callproc(proc, data)
# spans[len(spans) - 1]
cursor.execute(
"""
SELECT @_sp_sum_0, @_sp_sum_1, @_sp_sum_2
"""
)
output = cursor.fetchone()
assert len(output) == 3
assert output[2] == 42
spans = tracer.pop()
assert spans, spans
# number of spans depends on PyMySQL implementation details,
# typically, internal calls to execute, but at least we
# can expect the last closed span to be our proc.
span = spans[len(spans) - 2]
assert_is_measured(span)
assert span.service == "pymysql"
assert span.name == "pymysql.query"
assert span.span_type == "sql"
assert span.error == 0
assert span.get_metric("out.port") == MYSQL_CONFIG.get("port")
meta = {}
meta.update(self.DB_INFO)
assert_dict_issuperset(span.meta, meta)
def test_simple_query_ot(self):
"""OpenTracing version of test_simple_query."""
conn, tracer = self._get_conn_tracer()
ot_tracer = init_tracer("mysql_svc", tracer)
with ot_tracer.start_active_span("mysql_op"):
cursor = conn.cursor()
cursor.execute("SELECT 1")
rows = cursor.fetchall()
assert len(rows) == 1
spans = tracer.pop()
assert len(spans) == 2
ot_span, dd_span = spans
# confirm parenting
assert ot_span.parent_id is None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.service == "mysql_svc"
assert ot_span.name == "mysql_op"
assert_is_measured(dd_span)
assert dd_span.service == "pymysql"
assert dd_span.name == "pymysql.query"
assert dd_span.span_type == "sql"
assert dd_span.error == 0
assert dd_span.get_metric("out.port") == MYSQL_CONFIG.get("port")
meta = {}
meta.update(self.DB_INFO)
assert_dict_issuperset(dd_span.meta, meta)
def test_simple_query_ot_fetchall(self):
"""OpenTracing version of test_simple_query."""
with self.override_config("dbapi2", dict(trace_fetch_methods=True)):
conn, tracer = self._get_conn_tracer()
ot_tracer = init_tracer("mysql_svc", tracer)
with ot_tracer.start_active_span("mysql_op"):
cursor = conn.cursor()
cursor.execute("SELECT 1")
rows = cursor.fetchall()
assert len(rows) == 1
spans = tracer.pop()
assert len(spans) == 3
ot_span, dd_span, fetch_span = spans
# confirm parenting
assert ot_span.parent_id is None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.service == "mysql_svc"
assert ot_span.name == "mysql_op"
assert_is_measured(dd_span)
assert dd_span.service == "pymysql"
assert dd_span.name == "pymysql.query"
assert dd_span.span_type == "sql"
assert dd_span.error == 0
assert dd_span.get_metric("out.port") == MYSQL_CONFIG.get("port")
meta = {}
meta.update(self.DB_INFO)
assert_dict_issuperset(dd_span.meta, meta)
assert fetch_span.name == "pymysql.query.fetchall"
def test_commit(self):
conn, tracer = self._get_conn_tracer()
conn.commit()
spans = tracer.pop()
assert len(spans) == 1
span = spans[0]
assert span.service == "pymysql"
assert span.name == "pymysql.connection.commit"
def test_rollback(self):
conn, tracer = self._get_conn_tracer()
conn.rollback()
spans = tracer.pop()
assert len(spans) == 1
span = spans[0]
assert span.service == "pymysql"
assert span.name == "pymysql.connection.rollback"
def test_analytics_default(self):
conn, tracer = self._get_conn_tracer()
cursor = conn.cursor()
cursor.execute("SELECT 1")
rows = cursor.fetchall()
assert len(rows) == 1
spans = tracer.pop()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_with_rate(self):
with self.override_config("dbapi2", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
conn, tracer = self._get_conn_tracer()
cursor = conn.cursor()
cursor.execute("SELECT 1")
rows = cursor.fetchall()
assert len(rows) == 1
spans = tracer.pop()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5)
def test_analytics_without_rate(self):
with self.override_config("dbapi2", dict(analytics_enabled=True)):
conn, tracer = self._get_conn_tracer()
cursor = conn.cursor()
cursor.execute("SELECT 1")
rows = cursor.fetchall()
assert len(rows) == 1
spans = tracer.pop()
self.assertEqual(len(spans), 1)
span = spans[0]
self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0)
class TestPyMysqlPatch(PyMySQLCore, TracerTestCase):
def _get_conn_tracer(self):
if not self.conn:
self.conn = pymysql.connect(**MYSQL_CONFIG)
assert not self.conn._closed
# Ensure that the default pin is there, with its default value
pin = Pin.get_from(self.conn)
assert pin
# Customize the service
# we have to apply it on the existing one since new one won't inherit `app`
pin.clone(tracer=self.tracer).onto(self.conn)
return self.conn, self.tracer
def test_patch_unpatch(self):
unpatch()
# assert we start unpatched
conn = pymysql.connect(**MYSQL_CONFIG)
assert not Pin.get_from(conn)
conn.close()
patch()
try:
conn = pymysql.connect(**MYSQL_CONFIG)
pin = Pin.get_from(conn)
assert pin
pin.clone(tracer=self.tracer).onto(conn)
assert not conn._closed
cursor = conn.cursor()
cursor.execute("SELECT 1")
rows = cursor.fetchall()
assert len(rows) == 1
spans = self.pop_spans()
assert len(spans) == 1
span = spans[0]
assert span.service == "pymysql"
assert span.name == "pymysql.query"
assert span.span_type == "sql"
assert span.error == 0
assert span.get_metric("out.port") == MYSQL_CONFIG.get("port")
meta = {}
meta.update(self.DB_INFO)
assert_dict_issuperset(span.meta, meta)
finally:
unpatch()
# assert we finish unpatched
conn = pymysql.connect(**MYSQL_CONFIG)
assert not Pin.get_from(conn)
conn.close()
patch()
def test_user_pin_override(self):
conn, tracer = self._get_conn_tracer()
pin = Pin.get_from(conn)
pin.clone(service="pin-svc", tracer=self.tracer).onto(conn)
cursor = conn.cursor()
cursor.execute("SELECT 1")
rows = cursor.fetchall()
assert len(rows) == 1
spans = tracer.pop()
assert len(spans) == 1
span = spans[0]
assert span.service == "pin-svc"
def test_context_manager(self):
conn, tracer = self._get_conn_tracer()
# connection doesn't support context manager usage
with conn.cursor() as cursor:
cursor.execute("SELECT 1")
rows = cursor.fetchall()
assert len(rows) == 1
spans = tracer.pop()
assert len(spans) == 1
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_PYMYSQL_SERVICE="mysvc"))
def test_user_specified_service_integration(self):
conn, tracer = self._get_conn_tracer()
conn.rollback()
spans = self.pop_spans()
assert len(spans) == 1
span = spans[0]
assert span.service == "mysvc"
| 33.12959
| 102
| 0.575005
|
0cc17be851f553e57360f32b7034ca2630d8e085
| 46,933
|
py
|
Python
|
test/parallel/test_mxnet.py
|
DEKHTIARJonathan/horovod
|
333ce607c5ed0c5a38defd234f818aeb27a5394b
|
[
"Apache-2.0"
] | null | null | null |
test/parallel/test_mxnet.py
|
DEKHTIARJonathan/horovod
|
333ce607c5ed0c5a38defd234f818aeb27a5394b
|
[
"Apache-2.0"
] | null | null | null |
test/parallel/test_mxnet.py
|
DEKHTIARJonathan/horovod
|
333ce607c5ed0c5a38defd234f818aeb27a5394b
|
[
"Apache-2.0"
] | 3
|
2021-07-20T07:40:15.000Z
|
2021-08-03T08:39:17.000Z
|
# Copyright 2018 Uber Technologies, Inc. All Rights Reserved.
# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
import itertools
import unittest
from distutils.version import LooseVersion
import pytest
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'utils'))
from common import skip_or_fail_gpu_test
try:
import mxnet as mx
from mxnet.base import MXNetError
from mxnet.test_utils import almost_equal, same
import horovod.mxnet as hvd
has_gpu = mx.context.num_gpus() > 0
ccl_supported_types = set(['int32', 'int64', 'float32', 'float64'])
# MXNet 1.4.x will kill test MPI process if error occurs during operation enqueue. Skip
# those tests for versions earlier than 1.5.0.
_skip_enqueue_errors = LooseVersion(mx.__version__) < LooseVersion('1.5.0')
HAS_MXNET = True
except ImportError:
has_gpu = False
_skip_enqueue_errors = False
HAS_MXNET = False
@pytest.mark.skipif(not HAS_MXNET, reason='MXNet unavailable')
class MXTests(unittest.TestCase):
"""
Tests for ops in horovod.mxnet.
"""
def _current_context(self):
if has_gpu:
return mx.gpu(hvd.local_rank())
else:
return mx.current_context()
def filter_supported_types(self, types):
if 'CCL_ROOT' in os.environ:
types = [t for t in types if t in ccl_supported_types]
return types
def test_gpu_required(self):
if not has_gpu:
skip_or_fail_gpu_test(self, "No GPUs available")
def test_horovod_allreduce(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
# MXNet uses gpu_id as part of the seed, so to get identical seeds
# we must set a context.
mx.random.seed(1234, ctx=ctx)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
summed = hvd.allreduce(tensor, average=False, name=str(count))
multiplied = tensor * size
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert almost_equal(summed.asnumpy(), multiplied.asnumpy(), atol=threshold), \
f'hvd.allreduce produces incorrect results: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_allreduce_average(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
averaged = hvd.allreduce(tensor, average=True, name=str(count))
tensor *= size
tensor /= size
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 1
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert almost_equal(averaged.asnumpy(), tensor.asnumpy(), atol=threshold), \
f'hvd.allreduce produces incorrect results for average: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_allreduce_inplace(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
multiplied = tensor * size
hvd.allreduce_(tensor, average=False, name=str(count))
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert almost_equal(tensor.asnumpy(), multiplied.asnumpy(), atol=threshold), \
f'hvd.allreduce produces incorrect results for self: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_allreduce_prescale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with prescaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float16', 'float32', 'float64'])
int_types = ['int32', 'int64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 1
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
np.random.seed(1234)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
factor = np.random.uniform()
scaled = hvd.allreduce(tensor, average=False, name=str(count),
prescale_factor=factor)
factor = mx.nd.array([factor], dtype='float64', ctx=ctx)
if ctx != mx.cpu() and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.astype('float64' if dtype in int_types else dtype)
tensor = tensor.astype('float64' if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.astype('float32' if dtype == 'float16' else
'float64' if dtype in int_types else dtype)
tensor = tensor.astype('float32' if dtype == 'float16' else
'float64' if dtype in int_types else dtype)
expected = factor * tensor
expected = expected.astype(dtype)
expected *= size
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert almost_equal(expected.asnumpy(), scaled.asnumpy(), atol=threshold), \
f'hvd.allreduce produces incorrect results for prescaling: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_allreduce_postscale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with postscaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float16', 'float32', 'float64'])
int_types = ['int32', 'int64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 1
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
np.random.seed(1234)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
factor = np.random.uniform()
scaled = hvd.allreduce(tensor, average=False, name=str(count),
postscale_factor=factor)
factor = mx.nd.array([factor], dtype='float64', ctx=ctx)
if ctx != mx.cpu() and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.astype('float64' if dtype in int_types else dtype)
tensor = tensor.astype('float64' if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.astype('float32' if dtype == 'float16' else
'float64' if dtype in int_types else dtype)
tensor = tensor.astype('float32' if dtype == 'float16' else
'float64' if dtype in int_types else dtype)
expected = tensor * size
expected *= factor
expected = expected.astype(dtype)
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert almost_equal(expected.asnumpy(), scaled.asnumpy(), atol=threshold), \
f'hvd.allreduce produces incorrect results for pre/post scaling: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
ctx = self._current_context()
shape = (17 + rank, 3)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
try:
output = hvd.allreduce(tensor)
output.wait_to_read()
assert False, 'hvd.allreduce did not throw error'
except (MXNetError, RuntimeError):
pass
# Same number of elements, different rank
if rank == 0:
shape = (17, 23 * 57)
else:
shape = (17, 23, 57)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
try:
output = hvd.allreduce(tensor)
output.wait_to_read()
assert False, 'hvd.allreduce did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
shape = (17, 3)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
if rank % 2 == 0:
tensor = tensor.astype('int32')
else:
tensor = tensor.astype('float32')
try:
output = hvd.allreduce(tensor)
output.wait_to_read()
assert False, 'hvd.allreduce did not throw error'
except (MXNetError, RuntimeError):
pass
@unittest.skipUnless(has_gpu, "no gpu detected")
def test_horovod_allreduce_cpu_gpu_error(self):
"""Test that the allreduce raises an error if different ranks try to
perform reduction on CPU and GPU."""
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
shape = (17, 17, 17)
if rank % 2 == 0:
ctx = mx.gpu(hvd.rank())
else:
ctx = mx.cpu(hvd.rank())
tensor = mx.nd.ones(shape=shape, ctx=ctx)
try:
output = hvd.allreduce(tensor)
output.wait_to_read()
assert False, 'hvd.allreduce did not throw cpu-gpu error'
except (MXNetError, RuntimeError):
pass
def test_horovod_allreduce_ndarray_lifetime(self):
"""Test that the input NDArray remains valid during async allreduce"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
for i, dim in enumerate(dims):
tensor = mx.nd.ones(shape=shapes[dim], ctx=ctx)
# tensor*(i+1) result will be destroyed immediately after this call
# See https://github.com/horovod/horovod/issues/1533
sum = hvd.allreduce(tensor * (i + 1), average=False)
expected = tensor * (i + 1) * size
assert same(sum.asnumpy(), expected.asnumpy())
def test_horovod_grouped_allreduce(self):
"""Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 1
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
tensors = [mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx) for _ in range(5)]
tensors = [tensor.astype(dtype) for tensor in tensors]
multiplied = [tensor * size for tensor in tensors]
summed = hvd.grouped_allreduce(tensors, average=False, name=str(count))
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([almost_equal(t1.asnumpy(), t2.asnumpy(), atol=threshold)
for t1, t2 in zip(summed, multiplied)]), \
f'hvd.grouped_allreduce produces incorrect results: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_grouped_allreduce_average(self):
"""Test that the grouped allreduce correctly averages 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 1
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
tensors = [mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx) for _ in range(5)]
tensors = [tensor.astype(dtype) for tensor in tensors]
tensors = [tensor * size for tensor in tensors]
tensors = [tensor / size for tensor in tensors]
averaged = hvd.grouped_allreduce(tensors, average=True, name=str(count))
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([almost_equal(t1.asnumpy(), t2.asnumpy(), atol=threshold)
for t1, t2 in zip(averaged, tensors)]), \
f'hvd.grouped_allreduce produces incorrect results for average: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_grouped_allreduce_inplace(self):
"""Test that the in-place grouped allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 1
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
tensors = [mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx) for _ in range(5)]
tensors = [tensor.astype(dtype) for tensor in tensors]
multiplied = [tensor * size for tensor in tensors]
hvd.grouped_allreduce_(tensors, average=False, name=str(count))
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([almost_equal(t1.asnumpy(), t2.asnumpy(), atol=threshold)
for t1, t2 in zip(tensors, multiplied)]), \
f'hvd.grouped_allreduce_ produces incorrect results: {hvd.rank()} {count} {dtype} {dim}'
@unittest.skipUnless(has_gpu, "no gpu detected")
@pytest.mark.skipif(_skip_enqueue_errors,
reason="Skip enqueue errors for MXNet version < 1.5.0")
def test_horovod_grouped_allreduce_cpu_gpu_error(self):
"""Test that the grouped allreduce raises an error if the input tensor
list contains a mix of tensors on CPU and GPU."""
hvd.init()
local_rank = hvd.local_rank()
tensors = [mx.nd.ones(shape=[10], ctx=mx.gpu(local_rank) if i % 2
else mx.cpu(local_rank)) for i in range(5)]
try:
outputs = hvd.grouped_allreduce(tensors)
mx.nd.waitall()
assert False, 'hvd.grouped_allreduce did not throw cpu-gpu error'
except (MXNetError, RuntimeError):
pass
def test_horovod_broadcast(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims,
root_ranks):
tensor = mx.nd.ones(shapes[dim], ctx=ctx) * rank
root_tensor = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
tensor = tensor.astype(dtype)
root_tensor = root_tensor.astype(dtype)
broadcast_tensor = hvd.broadcast(tensor, root_rank=root_rank,
name=str(count))
if rank != root_rank:
if same(tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim,
mx.nd.max(tensor == root_tensor))
print("tensor", hvd.rank(), tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(), tensor == root_tensor)
assert not same(tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast modifies source tensor'
if not same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim)
print("broadcast_tensor", hvd.rank(), broadcast_tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(),
broadcast_tensor == root_tensor)
assert same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast produces incorrect broadcasted tensor'
count += 1
def test_horovod_broadcast_inplace(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims,
root_ranks):
tensor = mx.nd.ones(shapes[dim], ctx=ctx) * rank
root_tensor = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
tensor = tensor.astype(dtype)
root_tensor = root_tensor.astype(dtype)
# Only do broadcasting using broadcast_tensor
broadcast_tensor = tensor.copy()
hvd.broadcast_(broadcast_tensor, root_rank=root_rank,
name=str(count))
if rank != root_rank:
if same(tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim,
mx.nd.max(tensor == root_tensor))
print("tensor", hvd.rank(), tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(), tensor == root_tensor)
assert not same(tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast modifies source tensor'
if not same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim)
print("broadcast_tensor", hvd.rank(), broadcast_tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(),
broadcast_tensor == root_tensor)
assert same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast produces incorrect broadcasted tensor'
count += 1
def test_horovod_broadcast_parameters(self):
"""Test the correctness of broadcast_parameters."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
root_rank = 1
tensor_dict = {}
root_dict = {}
for dtype, dim, in itertools.product(dtypes, dims):
tensor_dict[count] = mx.nd.ones(shapes[dim], ctx=ctx) * rank
root_dict[count] = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
tensor_dict[count] = tensor_dict[count].astype(dtype)
root_dict[count] = root_dict[count].astype(dtype)
count += 1
hvd.broadcast_parameters(tensor_dict, root_rank=root_rank)
for i in range(count):
if not same(tensor_dict[i].asnumpy(), root_dict[i].asnumpy()):
print("broadcast", i, dtypes[i], dims[i])
print("broadcast_tensor", hvd.rank(), tensor_dict[i])
print("root_tensor", hvd.rank(), root_dict[i])
print("comparison", hvd.rank(), tensor_dict[i] == root_dict[i])
assert same(tensor_dict[i].asnumpy(), root_dict[i].asnumpy()), \
'hvd.broadcast_parameters produces incorrect broadcasted tensor'
def test_horovod_broadcast_error(self):
"""Test that the broadcast returns an error if any dimension besides
the first is different among the tensors being broadcasted."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
shape = (17, rank+1)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
try:
output = hvd.broadcast(tensor, 0)
output.wait_to_read()
assert False, 'hvd.broadcast did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_broadcast_type_error(self):
"""Test that the broadcast returns an error if the types being broadcasted
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
shape = (17, 3)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
if rank % 2 == 0:
tensor = tensor.astype('int32')
else:
tensor = tensor.astype('float32')
try:
output = hvd.broadcast(tensor, 0)
output.wait_to_read()
assert False, 'hvd.broadcast did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_broadcast_rank_error(self):
"""Test that the broadcast returns an error if different ranks
specify different root rank."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
shape = (17, 17, 17)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
try:
output = hvd.broadcast(tensor, root_rank=rank)
output.wait_to_read()
assert False, 'hvd.broadcast did not throw rank error'
except (MXNetError, RuntimeError):
pass
def test_horovod_broadcast_deferred_init_parameters(self):
"""Test that the deferred initialized parameters are broadcasted."""
hvd.init()
root_rank = 0
rank = hvd.rank()
# This test does not apply if there is only one worker.
if hvd.size() == 1:
self.skipTest("Only one worker available")
mx.random.seed(rank)
layer = mx.gluon.nn.Conv2D(10, 2)
layer.initialize()
hvd.broadcast_parameters(layer.collect_params(), root_rank=root_rank)
x = mx.nd.ones((5, 4, 10, 10))
layer(x)
tensors = [p.data() for _, p in sorted(layer.collect_params().items())]
root_tensors = []
for tensor in tensors:
root_tensors.append(hvd.broadcast(tensor, root_rank=root_rank))
for tensor, root_tensor in zip(tensors, root_tensors):
assert same(tensor.asnumpy(), root_tensor.asnumpy()), \
'horovod did not broadcast deferred initialized parameter correctly'
def test_horovod_allgather(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
for dtype, dim in itertools.product(dtypes, dims):
tensor = mx.ndarray.ones(shape=[17] * dim, dtype=dtype, ctx=ctx) * rank
gathered = hvd.allgather(tensor)
assert list(gathered.shape) == [17 * size] + [17] * (dim - 1)
for i in range(size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == [17] * dim, \
'hvd.allgather produces incorrect gathered shape'
assert rank_tensor.min() == i, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.max() == i, 'hvd.allgather produces incorrect gathered tensor'
def test_horovod_allgather_variable_size(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = mx.ndarray.ones(
shape=[tensor_sizes[rank]] + [17] * (dim - 1), dtype=dtype, ctx=ctx) * rank
gathered = hvd.allgather(tensor)
expected_size = sum(tensor_sizes)
assert list(gathered.shape) == [expected_size] + [17] * (dim - 1)
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = gathered[sum(
tensor_sizes[:i]):sum(tensor_sizes[:i + 1])]
assert list(rank_tensor.shape) == rank_size
assert rank_tensor.min() == i
assert rank_tensor.max() == i
def test_horovod_allgather_error(self):
"""Test that the allgather returns an error if any dimension besides
the first is different among the tensors being gathered."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = mx.ndarray.ones(shape=tensor_size, ctx=ctx)
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_allgather_type_error(self):
"""Test that the allgather returns an error if the types being gathered
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
tensor_size = [17] * 3
if rank % 2 == 0:
tensor = mx.ndarray.ones(shape=tensor_size, dtype="int32", ctx=ctx)
else:
tensor = mx.ndarray.ones(shape=tensor_size, dtype="float32", ctx=ctx)
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (MXNetError, RuntimeError):
pass
def test_broadcast_object(self):
hvd.init()
expected_obj = {
'hello': 123,
0: [1, 2]
}
obj = expected_obj if hvd.rank() == 0 else {}
obj = hvd.broadcast_object(obj, root_rank=0)
self.assertDictEqual(obj, expected_obj)
# To prevent premature shutdown from rank 0 for this test
mx.nd.waitall()
def test_allgather_object(self):
hvd.init()
d = {'metric_val_1': hvd.rank()}
if hvd.rank() == 1:
d['metric_val_2'] = 42
results = hvd.allgather_object(d)
expected = [{'metric_val_1': i} for i in range(hvd.size())]
if hvd.size() > 1:
expected[1] = {'metric_val_1': 1, 'metric_val_2': 42}
self.assertEqual(len(results), hvd.size())
self.assertListEqual(results, expected)
# To prevent premature shutdown from rank 0 for this test
mx.nd.waitall()
def test_horovod_alltoall(self):
"""Test that the alltoall correctly distributes 1D, 2D, and 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1,2,3]
ctx = self._current_context()
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = mx.ndarray.array(vals, dtype=dtype, ctx=ctx)
for _ in range(dim - 1):
tensor = mx.ndarray.expand_dims(tensor, axis=1)
tensor = mx.ndarray.concat(tensor, tensor, dim=1)
splits = mx.ndarray.array([rank + 1] * size, dtype='int32', ctx=ctx)
collected = hvd.alltoall(tensor, splits)
assert collected.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.size == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
def test_horovod_alltoall_equal_split(self):
"""Test that the alltoall correctly distributes 1D tensors with default splitting."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1,2,3]
ctx = self._current_context()
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = mx.ndarray.array(vals, dtype=dtype, ctx=ctx)
for _ in range(dim - 1):
tensor = mx.ndarray.expand_dims(tensor, axis=1)
tensor = mx.ndarray.concat(tensor, tensor, dim=1)
collected = hvd.alltoall(tensor)
assert collected.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.size == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
def test_horovod_alltoall_type_error(self):
"""Test that the alltoall returns an error if the tensor types differ
across the processes."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
ctx = self._current_context()
if rank % 2:
tensor = mx.ndarray.empty([size], dtype='int32', ctx=ctx)
else:
tensor = mx.ndarray.empty([size], dtype='float32', ctx=ctx)
try:
output = hvd.alltoall(tensor)
output.wait_to_read()
assert False, 'hvd.alltoall did not throw error'
except (MXNetError, RuntimeError):
pass
@pytest.mark.skipif(_skip_enqueue_errors,
reason="Skip enqueue errors for MXNet version < 1.5.0")
def test_horovod_alltoall_equal_split_length_error(self):
"""Test that the alltoall with default splitting returns an error if the first dimension
of tensor is not a multiple of the number of workers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
ctx = self._current_context()
tensor = mx.ndarray.empty([size + 1], ctx=ctx)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (MXNetError, RuntimeError):
pass
@pytest.mark.skipif(_skip_enqueue_errors,
reason="Skip enqueue errors for MXNet version < 1.5.0")
def test_horovod_alltoall_splits_error(self):
"""Test that the alltoall returns an error if the sum of the splits entries exceeds
the first dimension of the input tensor."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
ctx = self._current_context()
tensor = mx.ndarray.empty([size-1], ctx=ctx)
splits = mx.ndarray.ones([size], dtype='int32', ctx=ctx)
try:
hvd.alltoall(tensor, splits)
assert False, 'hvd.alltoall did not throw error'
except (MXNetError, RuntimeError):
pass
@pytest.mark.skipif(_skip_enqueue_errors,
reason="Skip enqueue errors for MXNet version < 1.5.0")
def test_horovod_alltoall_splits_type_error(self):
"""Test that the alltoall returns an error if the splits tensor does not
contain 32-bit integers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
ctx = self._current_context()
tensor = mx.ndarray.empty([size], ctx=ctx)
splits = mx.ndarray.ones([size], dtype='float32', ctx=ctx)
try:
hvd.alltoall(tensor, splits)
assert False, 'hvd.alltoall did not throw error'
except (MXNetError, ValueError):
pass
def test_two_trainer(self):
"""Test using horovod allreduce in MXNet Gluon trainer."""
from mxnet import gluon
from mxnet.gluon import Block, nn, HybridBlock
hvd.init()
rank = hvd.rank()
ctx = mx.cpu(rank)
net1 = nn.Dense(20, in_units=10)
net2 = nn.Dense(30, in_units=10)
net1.initialize(ctx=ctx)
net2.initialize(ctx=ctx)
params1 = net1.collect_params()
params2 = net2.collect_params()
hvd.broadcast_parameters(params1, prefix="net1")
hvd.broadcast_parameters(params2, prefix="net2")
trainer1 = hvd.DistributedTrainer(params1, 'sgd', {'learning_rate': 0.1}, prefix="net1")
trainer2 = hvd.DistributedTrainer(params2, 'sgd', {'learning_rate': 0.1}, prefix="net2")
for i in range(10):
data = mx.nd.ones((5, 10), ctx=ctx)
with mx.autograd.record():
pred1 = net1(data).sum()
pred2 = net2(data).sum()
mx.autograd.backward([pred1, pred2])
trainer1.step(1.0)
trainer2.step(1.0)
l = pred1.asscalar() + pred2.asscalar()
def test_horovod_alltoall_rank_error(self):
"""Test that the alltoall returns an error if any dimension besides
the first is different among the tensors being processed."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
ctx = self._current_context()
tensor_size = [2 * size] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = mx.ndarray.ones(shape=tensor_size, ctx=ctx)
try:
output = hvd.alltoall(tensor)
output.wait_to_read()
assert False, 'hvd.alltoall did not throw error'
except (MXNetError, RuntimeError):
pass
@unittest.skipUnless(has_gpu, "no gpu detected")
def test_gluon_trainer(self):
"""Test using horovod allreduce in MXNet Gluon trainer."""
from mxnet import gluon
from mxnet.gluon import Block, nn, HybridBlock
hvd.init()
rank = hvd.rank()
np.random.seed(1000 + 10 * rank)
mx.random.seed(1000 + 10 * rank)
ctx = mx.gpu(rank)
def gen_random_dataset(batch_size=64, dim=32, min_len=20, max_len=100,
size=1000):
for _ in range(size):
length = np.random.randint(min_len, max_len + 1)
rand_src = mx.nd.random.normal(0, 1, (length, dim))
rand_dst = mx.nd.random.normal(0, 1, (length, dim))
yield rand_src, rand_dst
class SimpleNet(HybridBlock):
def __init__(self, layer_num=6, **kwargs):
super(SimpleNet, self).__init__(**kwargs)
self._layer_num = layer_num
self.ln_l = nn.HybridSequential()
self.dense_l = nn.HybridSequential()
for i in range(layer_num):
self.dense_l.add(nn.Dense(units=32 + layer_num - 1 - i,
flatten=False))
self.ln_l.add(nn.LayerNorm())
def hybrid_forward(self, F, data):
"""
Parameters
----------
data :
Shape (batch_size, seq_len, fea_dim)
Returns
-------
out :
Shape (batch_size, seq_len, fea_dim)
"""
for i in range(self._layer_num):
data = self.ln_l[i](data)
data = self.dense_l[i](data)
return data
net = SimpleNet()
net.initialize(ctx=ctx)
net.hybridize(static_alloc=True)
params = net.collect_params()
cnt = 0
lr = 1E-4
trainer = gluon.Trainer(params, 'adam', {'learning_rate': lr},
update_on_kvstore=False)
data_gen = gen_random_dataset()
for (src_data, dst_data) in data_gen:
src_data = src_data.as_in_context(ctx).astype(np.float32)
dst_data = dst_data.as_in_context(ctx).astype(np.float32)
with mx.autograd.record():
pred = net(src_data)
loss = mx.nd.abs(pred - dst_data).mean()
loss.backward()
# Begin to update the parameter
trainer.step(1.0)
cnt += 1
l = loss.asscalar()
if cnt >= 10:
for key, param in params.items():
hvd.allreduce_(param.list_data()[0])
cnt = 0
if __name__ == '__main__':
unittest.main()
| 39.539174
| 123
| 0.555047
|
e17ef06cf9436eb2d52bda859496e4d7b902fe38
| 7,424
|
py
|
Python
|
mmdet/models/detectors/two_stage.py
|
ruihan0495/mmdetection
|
3dcbaf82d04aa1d389bba963f459cf9b3f7c7828
|
[
"Apache-2.0"
] | 3
|
2021-08-13T03:00:52.000Z
|
2021-08-19T07:54:11.000Z
|
mmdet/models/detectors/two_stage.py
|
ruihan0495/mmdetection
|
3dcbaf82d04aa1d389bba963f459cf9b3f7c7828
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/detectors/two_stage.py
|
ruihan0495/mmdetection
|
3dcbaf82d04aa1d389bba963f459cf9b3f7c7828
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class TwoStageDetector(BaseDetector):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(TwoStageDetector, self).__init__()
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
if roi_head is not None:
# update train and test cfg here for now
# TODO: refactor assigner & sampler
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = build_head(roi_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
"""bool: whether the detector has RPN"""
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self):
"""bool: whether the detector has a RoI head"""
return hasattr(self, 'roi_head') and self.roi_head is not None
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(TwoStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_roi_head:
self.roi_head.init_weights(pretrained)
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
outs = ()
# backbone
x = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).to(img.device)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposals)
outs = outs + (roi_outs, )
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
proposals : override rpn proposals with custom proposals. Use when
`with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(img)
losses = dict()
# RPN forward and loss
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
rpn_losses, proposal_list = self.rpn_head.forward_train(
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=gt_bboxes_ignore,
proposal_cfg=proposal_cfg)
losses.update(rpn_losses)
else:
proposal_list = proposals
roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,
gt_bboxes, gt_labels,
gt_bboxes_ignore, gt_masks,
**kwargs)
losses.update(roi_losses)
return losses
async def async_simple_test(self,
img,
img_meta,
proposals=None,
rescale=False):
"""Async test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = await self.rpn_head.async_simple_test_rpn(
x, img_meta)
else:
proposal_list = proposals
return await self.roi_head.async_simple_test(
x, proposal_list, img_meta, rescale=rescale)
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(
x, proposal_list, img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
| 35.184834
| 79
| 0.569235
|
e9372f05864c52ad3487e95e3f6ad58721586254
| 7,455
|
py
|
Python
|
crawlino_develop/check_plugins.py
|
crawlino/crawlino-develop
|
5f17c987f98eb3c6261302af14430a783f472810
|
[
"BSD-3-Clause"
] | 1
|
2020-10-20T22:41:32.000Z
|
2020-10-20T22:41:32.000Z
|
crawlino_develop/check_plugins.py
|
crawlino/crawlino-develop
|
5f17c987f98eb3c6261302af14430a783f472810
|
[
"BSD-3-Clause"
] | null | null | null |
crawlino_develop/check_plugins.py
|
crawlino/crawlino-develop
|
5f17c987f98eb3c6261302af14430a783f472810
|
[
"BSD-3-Clause"
] | 1
|
2020-10-20T22:41:34.000Z
|
2020-10-20T22:41:34.000Z
|
import os
import re
import tempfile
from typing import List, Tuple
from colored import fore, style, back
from contextlib import redirect_stdout
from distutils.version import StrictVersion
from pytest import main as main_pytest
from crawlino_develop.helpers import get_plugins
from crawlino_develop.model import CRDRunningConfig
DOC_SECTIONS = ("quick start", "requisites", "yaml config", "examples",
"input data", "output data")
REQUIRED_FILES = ("__init__.py", "README.rst", "requirements.txt", "VERSION")
ACCEPTED_COVERAGE = 95
def _get_plugin_name(path) -> str:
return os.sep.join(path.split(os.sep)[-2:])
def _check_required_files(plugin_path: str) -> List[Tuple[str, str]]:
"""This check ensures that a plugin contains the required files"""
plugin_name = _get_plugin_name(plugin_path)
plugin_files = os.listdir(plugin_path)
issues = []
for f in REQUIRED_FILES:
if f not in plugin_files:
issues.append((
f"'{plugin_name}' - missing '{f}' file",
"fail"
))
else:
issues.append((
f"'{plugin_name}' - contain '{f}' file",
"ok"
))
# Check for test file
if not any(x.startswith("test_") and x.endswith(".py")
for x in plugin_files):
issues.append((
f"'{plugin_name}' - missing unit testing file (test_xxxxx.py)",
"fail"
))
else:
issues.append((
f"'{plugin_name}' - contains a unit testing file (test_xxxxx.py)",
"ok"
))
return issues
def _check_doc(plugin_path: str) -> List[Tuple[str, str]]:
"""this plugin checks that the plugin doc contains the minimum sections"""
# load readme
plugin_name = _get_plugin_name(plugin_path)
results = []
total_founds = []
try:
with open(os.path.join(plugin_path, "README.rst"), "r") as f:
readme_file = f.read()
# Build regex
for section in DOC_SECTIONS:
res = re.search(fr'''({section}[\w\s]*)([\s])([\-]+)''',
readme_file.lower())
if res:
results.append((
f"'{plugin_name}' - Section "
f"'{section}' found in README.rst ",
"ok"
))
total_founds.append(section)
for not_found in set(DOC_SECTIONS).difference(total_founds):
results.append(
(f"'{plugin_name}' - Section '{not_found}' not found in "
f"README.rst", "fail")
)
return results
except IOError:
return [(f"'{plugin_name}' - Missing 'README.rst",
"fails")]
def _check_unit_tests(plugin_path: str) -> List[Tuple[str, str]]:
# Get test form package installation dir
# test_paths = os.path.join(os.path.dirname(crawlino_develop.__file__),
# "tests")
plugin_name = _get_plugin_name(plugin_path)
ret = []
try:
with tempfile.NamedTemporaryFile() as f:
# Launch PyTest and store results in a temporal file
with redirect_stdout(open(f.name, 'w')):
pytest_return = main_pytest([
f"--cov={plugin_path}", plugin_path
])
# Load coverage results
with open(f.name, "r") as r:
results = r.read()
# Get total coverage
cov = re.search(r'''(TOTAL.*)( [\d]{1,3})(%)''', results)
if not cov:
ret.append((f"'{plugin_name}' - Can't obtain the coverage",
"fail"))
else:
cov_value = int(cov.group(2))
if cov_value < ACCEPTED_COVERAGE:
ret.append((
f"'{plugin_name}' - Testing coverage is "
f"'{cov_value}%'. Must be greater than "
f"'{ACCEPTED_COVERAGE}%'",
"fail"
))
else:
ret.append((
f"'{plugin_name}' - Testing coverage is "
f"'{cov_value}%'",
"ok"
))
except Exception as e:
ret.append((f"'{plugin_name}' - error running pytest", "fail"))
return ret
#
# Pytest codes:
# https://docs.pytest.org/en/latest/usage.html#possible-exit-codes
#
if pytest_return == 0:
ret.append((f"'{plugin_name}' - unit tests pass", "ok"))
elif pytest_return == 1:
ret.append((f"'{plugin_name}' - unit tests wasn't pass", "fail"))
elif pytest_return == 5:
ret.append((f"'{plugin_name}': can't find any tests to pass", "fail"))
else:
# Pytest error
ret.append((f"'{plugin_name}': error running pytest", "fail"))
return ret
def _check_version_format(plugin_path: str) -> List[Tuple[str, str]]:
"""This function checks the format for the VERSION file content
Examples:
- 1.0 -> OK
- 1.0.0 -> OK
- 1.1.2 -> OK
- version1 -> BAD
- pre-release-10 -> BAD
"""
plugin_name = _get_plugin_name(plugin_path)
try:
with open(os.path.join(plugin_path, "VERSION"), "r") as f:
version = f.readline()
except FileNotFoundError:
return [(f"'{plugin_name}' - missing 'VERSION' file", "fail")]
try:
StrictVersion(version)
return [(f"'{plugin_name}' - Version '{version}'", "ok")]
except ValueError:
return [(f"'{plugin_name}' - Invalid version "
f"value '{version}' in 'VERSION' file ", "fail")]
def pass_plugins_checks(config: CRDRunningConfig) -> List[str]:
"""
Launch all checks for each plugin
return a list of issues
"""
if isinstance(config.path, list):
plugins_paths = config.path
else:
plugins_paths = [config.path]
issues = []
for path in plugins_paths:
for plugin_path in get_plugins(path):
# Check files
issues.extend(_check_required_files(plugin_path))
# Check doc
issues.extend(_check_doc(plugin_path))
# Check version format
issues.extend(_check_version_format(plugin_path))
# Launch unit-test
issues.extend(_check_unit_tests(plugin_path))
# List issues
bad_issues = 0
for description, status in issues:
if status == "ok":
if not config.show_all:
continue
color_start = fore.LIGHT_BLUE
start_symbol = "[OK]"
background = style.RESET
else:
bad_issues += 1
start_symbol = "[FAIL]"
color_start = fore.LIGHT_RED
background = style.RESET + back.RED
print(color_start,
start_symbol,
background,
fore.WHITE,
description,
style.RESET)
if bad_issues == 0:
color_start = fore.LIGHT_BLUE
start_symbol = "[OK]"
background = style.RESET
print(color_start,
start_symbol,
background,
fore.WHITE,
"The plugin is ready!",
style.RESET)
exit(len(issues))
| 29.350394
| 78
| 0.532394
|
1b2506dc085146978855fd526f39ac87cc9fa119
| 11,736
|
py
|
Python
|
chinilla/rpc/farmer_rpc_api.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
chinilla/rpc/farmer_rpc_api.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
chinilla/rpc/farmer_rpc_api.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
import dataclasses
import operator
from typing import Any, Callable, Dict, List, Optional
from typing_extensions import Protocol
from chinilla.farmer.farmer import Farmer
from chinilla.plot_sync.receiver import Receiver
from chinilla.protocols.harvester_protocol import Plot
from chinilla.types.blockchain_format.sized_bytes import bytes32
from chinilla.util.byte_types import hexstr_to_bytes
from chinilla.util.ints import uint32
from chinilla.util.paginator import Paginator
from chinilla.util.streamable import Streamable, streamable
from chinilla.util.ws_message import WsRpcMessage, create_payload_dict
class PaginatedRequestData(Protocol):
@property
def node_id(self) -> bytes32:
pass
@property
def page(self) -> uint32:
pass
@property
def page_size(self) -> uint32:
pass
@streamable
@dataclasses.dataclass(frozen=True)
class FilterItem(Streamable):
key: str
value: Optional[str]
@streamable
@dataclasses.dataclass(frozen=True)
class PlotInfoRequestData(Streamable):
node_id: bytes32
page: uint32
page_size: uint32
filter: List[FilterItem] = dataclasses.field(default_factory=list)
sort_key: str = "filename"
reverse: bool = False
@streamable
@dataclasses.dataclass(frozen=True)
class PlotPathRequestData(Streamable):
node_id: bytes32
page: uint32
page_size: uint32
filter: List[str] = dataclasses.field(default_factory=list)
reverse: bool = False
def paginated_plot_request(source: List[Any], request: PaginatedRequestData) -> Dict[str, object]:
paginator: Paginator = Paginator(source, request.page_size)
return {
"node_id": request.node_id.hex(),
"page": request.page,
"page_count": paginator.page_count(),
"total_count": len(source),
"plots": paginator.get_page(request.page),
}
def plot_matches_filter(plot: Plot, filter_item: FilterItem) -> bool:
plot_attribute = getattr(plot, filter_item.key)
if filter_item.value is None:
return plot_attribute is None
else:
return filter_item.value in str(plot_attribute)
class FarmerRpcApi:
def __init__(self, farmer: Farmer):
self.service = farmer
self.service_name = "chinilla_farmer"
def get_routes(self) -> Dict[str, Callable]:
return {
"/get_signage_point": self.get_signage_point,
"/get_signage_points": self.get_signage_points,
"/get_reward_targets": self.get_reward_targets,
"/set_reward_targets": self.set_reward_targets,
"/get_pool_state": self.get_pool_state,
"/set_payout_instructions": self.set_payout_instructions,
"/get_harvesters": self.get_harvesters,
"/get_harvesters_summary": self.get_harvesters_summary,
"/get_harvester_plots_valid": self.get_harvester_plots_valid,
"/get_harvester_plots_invalid": self.get_harvester_plots_invalid,
"/get_harvester_plots_keys_missing": self.get_harvester_plots_keys_missing,
"/get_harvester_plots_duplicates": self.get_harvester_plots_duplicates,
"/get_pool_login_link": self.get_pool_login_link,
}
async def _state_changed(self, change: str, change_data: Dict) -> List[WsRpcMessage]:
payloads = []
if change == "new_signage_point":
sp_hash = change_data["sp_hash"]
data = await self.get_signage_point({"sp_hash": sp_hash.hex()})
payloads.append(
create_payload_dict(
"new_signage_point",
data,
self.service_name,
"wallet_ui",
)
)
elif change == "new_farming_info":
payloads.append(
create_payload_dict(
"new_farming_info",
change_data,
self.service_name,
"wallet_ui",
)
)
elif change == "harvester_update":
payloads.append(
create_payload_dict(
"harvester_update",
change_data,
self.service_name,
"wallet_ui",
)
)
elif change == "harvester_removed":
payloads.append(
create_payload_dict(
"harvester_removed",
change_data,
self.service_name,
"wallet_ui",
)
)
elif change == "submitted_partial":
payloads.append(
create_payload_dict(
"submitted_partial",
change_data,
self.service_name,
"metrics",
)
)
elif change == "proof":
payloads.append(
create_payload_dict(
"proof",
change_data,
self.service_name,
"metrics",
)
)
return payloads
async def get_signage_point(self, request: Dict) -> Dict:
sp_hash = hexstr_to_bytes(request["sp_hash"])
for _, sps in self.service.sps.items():
for sp in sps:
if sp.challenge_chain_sp == sp_hash:
pospaces = self.service.proofs_of_space.get(sp.challenge_chain_sp, [])
return {
"signage_point": {
"challenge_hash": sp.challenge_hash,
"challenge_chain_sp": sp.challenge_chain_sp,
"reward_chain_sp": sp.reward_chain_sp,
"difficulty": sp.difficulty,
"sub_slot_iters": sp.sub_slot_iters,
"signage_point_index": sp.signage_point_index,
},
"proofs": pospaces,
}
raise ValueError(f"Signage point {sp_hash.hex()} not found")
async def get_signage_points(self, _: Dict) -> Dict[str, Any]:
result: List[Dict[str, Any]] = []
for sps in self.service.sps.values():
for sp in sps:
pospaces = self.service.proofs_of_space.get(sp.challenge_chain_sp, [])
result.append(
{
"signage_point": {
"challenge_hash": sp.challenge_hash,
"challenge_chain_sp": sp.challenge_chain_sp,
"reward_chain_sp": sp.reward_chain_sp,
"difficulty": sp.difficulty,
"sub_slot_iters": sp.sub_slot_iters,
"signage_point_index": sp.signage_point_index,
},
"proofs": pospaces,
}
)
return {"signage_points": result}
async def get_reward_targets(self, request: Dict) -> Dict:
search_for_private_key = request["search_for_private_key"]
max_ph_to_search = request.get("max_ph_to_search", 500)
return await self.service.get_reward_targets(search_for_private_key, max_ph_to_search)
async def set_reward_targets(self, request: Dict) -> Dict:
farmer_target, pool_target = None, None
if "farmer_target" in request:
farmer_target = request["farmer_target"]
if "pool_target" in request:
pool_target = request["pool_target"]
self.service.set_reward_targets(farmer_target, pool_target)
return {}
def get_pool_contract_puzzle_hash_plot_count(self, pool_contract_puzzle_hash: bytes32) -> int:
plot_count: int = 0
for receiver in self.service.plot_sync_receivers.values():
plot_count += sum(
plot.pool_contract_puzzle_hash == pool_contract_puzzle_hash for plot in receiver.plots().values()
)
return plot_count
async def get_pool_state(self, _: Dict) -> Dict:
pools_list = []
for p2_singleton_puzzle_hash, pool_dict in self.service.pool_state.items():
pool_state = pool_dict.copy()
pool_state["p2_singleton_puzzle_hash"] = p2_singleton_puzzle_hash.hex()
pool_state["plot_count"] = self.get_pool_contract_puzzle_hash_plot_count(p2_singleton_puzzle_hash)
pools_list.append(pool_state)
return {"pool_state": pools_list}
async def set_payout_instructions(self, request: Dict) -> Dict:
launcher_id: bytes32 = bytes32.from_hexstr(request["launcher_id"])
await self.service.set_payout_instructions(launcher_id, request["payout_instructions"])
return {}
async def get_harvesters(self, _: Dict):
return await self.service.get_harvesters(False)
async def get_harvesters_summary(self, _: Dict[str, object]) -> Dict[str, object]:
return await self.service.get_harvesters(True)
async def get_harvester_plots_valid(self, request_dict: Dict[str, object]) -> Dict[str, object]:
# TODO: Consider having a extra List[PlotInfo] in Receiver to avoid rebuilding the list for each call
request = PlotInfoRequestData.from_json_dict(request_dict)
plot_list = list(self.service.get_receiver(request.node_id).plots().values())
# Apply filter
plot_list = [
plot for plot in plot_list if all(plot_matches_filter(plot, filter_item) for filter_item in request.filter)
]
restricted_sort_keys: List[str] = ["pool_contract_puzzle_hash", "pool_public_key", "plot_public_key"]
# Apply sort_key and reverse if sort_key is not restricted
if request.sort_key in restricted_sort_keys:
raise KeyError(f"Can't sort by optional attributes: {restricted_sort_keys}")
# Sort by plot_id also by default since its unique
plot_list = sorted(plot_list, key=operator.attrgetter(request.sort_key, "plot_id"), reverse=request.reverse)
return paginated_plot_request(plot_list, request)
def paginated_plot_path_request(
self, source_func: Callable[[Receiver], List[str]], request_dict: Dict[str, object]
) -> Dict[str, object]:
request: PlotPathRequestData = PlotPathRequestData.from_json_dict(request_dict)
receiver = self.service.get_receiver(request.node_id)
source = source_func(receiver)
# Apply filter
source = [plot for plot in source if all(filter_item in plot for filter_item in request.filter)]
# Apply reverse
source = sorted(source, reverse=request.reverse)
return paginated_plot_request(source, request)
async def get_harvester_plots_invalid(self, request_dict: Dict[str, object]) -> Dict[str, object]:
return self.paginated_plot_path_request(Receiver.invalid, request_dict)
async def get_harvester_plots_keys_missing(self, request_dict: Dict[str, object]) -> Dict[str, object]:
return self.paginated_plot_path_request(Receiver.keys_missing, request_dict)
async def get_harvester_plots_duplicates(self, request_dict: Dict[str, object]) -> Dict[str, object]:
return self.paginated_plot_path_request(Receiver.duplicates, request_dict)
async def get_pool_login_link(self, request: Dict) -> Dict:
launcher_id: bytes32 = bytes32(hexstr_to_bytes(request["launcher_id"]))
login_link: Optional[str] = await self.service.generate_login_link(launcher_id)
if login_link is None:
raise ValueError(f"Failed to generate login link for {launcher_id.hex()}")
return {"login_link": login_link}
| 40.75
| 119
| 0.624063
|
e77aba6f22cd92742b6bd4f4b07364a731cf9801
| 45
|
py
|
Python
|
tests/__init__.py
|
sralli/fast-similarity
|
c07ed98c12c6da83b77e9ffaef43934d55ee1c8c
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
sralli/fast-similarity
|
c07ed98c12c6da83b77e9ffaef43934d55ee1c8c
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
sralli/fast-similarity
|
c07ed98c12c6da83b77e9ffaef43934d55ee1c8c
|
[
"MIT"
] | null | null | null |
"""Unit test package for fast_similarity."""
| 22.5
| 44
| 0.733333
|
9b0cb76086476a82eb282c7daaa350bd41ee65a9
| 900
|
py
|
Python
|
src/borg/testsuite/nanorst.py
|
Gelma/borg
|
e4247cc0d25f221efdf7447d316dcb022b38b8b7
|
[
"BSD-3-Clause"
] | null | null | null |
src/borg/testsuite/nanorst.py
|
Gelma/borg
|
e4247cc0d25f221efdf7447d316dcb022b38b8b7
|
[
"BSD-3-Clause"
] | null | null | null |
src/borg/testsuite/nanorst.py
|
Gelma/borg
|
e4247cc0d25f221efdf7447d316dcb022b38b8b7
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from ..nanorst import rst_to_text
def test_inline():
assert rst_to_text('*foo* and ``bar``.') == 'foo and bar.'
def test_inline_spread():
assert rst_to_text('*foo and bar, thusly\nfoobar*.') == 'foo and bar, thusly\nfoobar.'
def test_comment_inline():
assert rst_to_text('Foo and Bar\n.. foo\nbar') == 'Foo and Bar\n.. foo\nbar'
def test_comment():
assert rst_to_text('Foo and Bar\n\n.. foo\nbar') == 'Foo and Bar\n\nbar'
def test_directive_note():
assert rst_to_text('.. note::\n Note this and that') == 'Note:\n Note this and that'
def test_ref():
references = {
'foo': 'baz'
}
assert rst_to_text('See :ref:`fo\no`.', references=references) == 'See baz.'
def test_undefined_ref():
with pytest.raises(ValueError) as exc_info:
rst_to_text('See :ref:`foo`.')
assert 'Undefined reference' in str(exc_info.value)
| 23.684211
| 92
| 0.651111
|
a6d2239b448a9521e13770d77bb35e871bd4fa7a
| 903
|
py
|
Python
|
setup.py
|
yczeng/mit-course-catalog-cli
|
9b9165723f52940456be016391d7a0dea92113ff
|
[
"MIT"
] | 6
|
2017-07-18T04:28:51.000Z
|
2022-03-17T04:47:24.000Z
|
setup.py
|
yczeng/mit-course-catalog-cli
|
9b9165723f52940456be016391d7a0dea92113ff
|
[
"MIT"
] | null | null | null |
setup.py
|
yczeng/mit-course-catalog-cli
|
9b9165723f52940456be016391d7a0dea92113ff
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name="mit-course-catalog-cli",
description="Lets you access the MIT course catalog from your command line!",
version='0.1',
url="https://github.com/yczeng/mit-course-catalog-cli",
download_url="https://github.com/yczeng/mit-course-catalog-cli",
author="Catherine Zeng",
author_email="yczeng@mit.edu",
license="MIT",
py_modules=['main'],
install_requires=[
'Click',
'urllib',
'pyquery',
],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'],
entry_points='''
[console_scripts]
mit-cc=main:cli
''',
)
| 30.1
| 81
| 0.603544
|
be9b766a14dffe68ff9927c2207293902c975126
| 15,511
|
py
|
Python
|
Detection/MtcnnDetector_org.py
|
jimeffry/MTCNN-TF
|
4d41c5fd2dc13008d39b868aa2e921a7ff731e10
|
[
"MIT"
] | 8
|
2018-08-15T11:07:03.000Z
|
2019-12-05T10:05:41.000Z
|
Detection/MtcnnDetector_org.py
|
jimeffry/MTCNN-TF
|
4d41c5fd2dc13008d39b868aa2e921a7ff731e10
|
[
"MIT"
] | 2
|
2018-12-04T07:16:02.000Z
|
2019-11-04T09:42:03.000Z
|
Detection/MtcnnDetector_org.py
|
jimeffry/MTCNN-TF
|
4d41c5fd2dc13008d39b868aa2e921a7ff731e10
|
[
"MIT"
] | 6
|
2018-08-07T01:09:12.000Z
|
2021-08-13T07:19:47.000Z
|
import cv2
import time
import numpy as np
import sys
sys.path.append("../")
from train_models.MTCNN_config import config
from nms import py_nms
class MtcnnDetector(object):
def __init__(self,
detectors,
min_face_size=25,
stride=2,
threshold=[0.6, 0.7, 0.7],
scale_factor=0.79):
self.pnet_detector = detectors[0]
self.rnet_detector = detectors[1]
self.onet_detector = detectors[2]
self.min_face_size = min_face_size
self.stride = stride
self.thresh = threshold
self.scale_factor = scale_factor
self.slide_window = False
def convert_to_square(self, bbox):
"""
convert bbox to square
Parameters:
----------
bbox: numpy array , shape n x 5
input bbox
Returns:
-------
square bbox
"""
square_bbox = bbox.copy()
h = bbox[:, 3] - bbox[:, 1] + 1
w = bbox[:, 2] - bbox[:, 0] + 1
max_side = np.maximum(h, w)
square_bbox[:, 0] = bbox[:, 0] + w * 0.5 - max_side * 0.5
square_bbox[:, 1] = bbox[:, 1] + h * 0.5 - max_side * 0.5
square_bbox[:, 2] = square_bbox[:, 0] + max_side - 1
square_bbox[:, 3] = square_bbox[:, 1] + max_side - 1
return square_bbox
def calibrate_box(self, bbox, reg):
"""
calibrate bboxes
Parameters:
----------
bbox: numpy array, shape n x 5
input bboxes
reg: numpy array, shape n x 4
bboxes adjustment
Returns:
-------
bboxes after refinement
"""
bbox_c = bbox.copy()
w = bbox[:, 2] - bbox[:, 0] + 1
w = np.expand_dims(w, 1)
h = bbox[:, 3] - bbox[:, 1] + 1
h = np.expand_dims(h, 1)
reg_m = np.hstack([w, h, w, h])
aug = reg_m * reg
bbox_c[:, 0:4] = bbox_c[:, 0:4] + aug
return bbox_c
def generate_bbox(self, cls_map, reg, scale, threshold):
"""
generate bbox from feature cls_map
Parameters:
----------
cls_map: numpy array , n x m
detect score for each position
reg: numpy array , n x m x 4
bbox
scale: float number
scale of this detection
threshold: float number
detect threshold
Returns:
-------
bbox array
"""
stride = 2
#stride = 4
cellsize = 12
#cellsize = 25
t_index = np.where(cls_map > threshold)
# find nothing
if t_index[0].size == 0:
return np.array([])
#offset
dx1, dy1, dx2, dy2 = [reg[t_index[0], t_index[1], i] for i in range(4)]
reg = np.array([dx1, dy1, dx2, dy2])
score = cls_map[t_index[0], t_index[1]]
boundingbox = np.vstack([np.round((stride * t_index[1]) / scale),
np.round((stride * t_index[0]) / scale),
np.round((stride * t_index[1] + cellsize) / scale),
np.round((stride * t_index[0] + cellsize) / scale),
score,
reg])
return boundingbox.T
#pre-process images
def processed_image(self, img, scale):
height, width, channels = img.shape
new_height = int(height * scale) # resized new height
new_width = int(width * scale) # resized new width
new_dim = (new_width, new_height)
img_resized = cv2.resize(img, new_dim, interpolation=cv2.INTER_LINEAR) # resized image
img_resized = (img_resized - 127.5) / 128
return img_resized
def pad(self, bboxes, w, h):
"""
pad the the bboxes, alse restrict the size of it
Parameters:
----------
bboxes: numpy array, n x 5
input bboxes
w: float number
width of the input image
h: float number
height of the input image
Returns :
------
dy, dx : numpy array, n x 1
start point of the bbox in target image
edy, edx : numpy array, n x 1
end point of the bbox in target image
y, x : numpy array, n x 1
start point of the bbox in original image
ex, ex : numpy array, n x 1
end point of the bbox in original image
tmph, tmpw: numpy array, n x 1
height and width of the bbox
"""
tmpw, tmph = bboxes[:, 2] - bboxes[:, 0] + 1, bboxes[:, 3] - bboxes[:, 1] + 1
num_box = bboxes.shape[0]
dx, dy = np.zeros((num_box,)), np.zeros((num_box,))
edx, edy = tmpw.copy() - 1, tmph.copy() - 1
x, y, ex, ey = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
tmp_index = np.where(ex > w - 1)
edx[tmp_index] = tmpw[tmp_index] + w - 2 - ex[tmp_index]
ex[tmp_index] = w - 1
tmp_index = np.where(ey > h - 1)
edy[tmp_index] = tmph[tmp_index] + h - 2 - ey[tmp_index]
ey[tmp_index] = h - 1
tmp_index = np.where(x < 0)
dx[tmp_index] = 0 - x[tmp_index]
x[tmp_index] = 0
tmp_index = np.where(y < 0)
dy[tmp_index] = 0 - y[tmp_index]
y[tmp_index] = 0
return_list = [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]
return_list = [item.astype(np.int32) for item in return_list]
return return_list
def detect_pnet(self, im):
"""Get face candidates through pnet
Parameters:
----------
im: numpy array
input image array
Returns:
-------
boxes: numpy array
detected boxes before calibration
boxes_c: numpy array
boxes after calibration
"""
h, w, c = im.shape
net_size = 12
current_scale = float(net_size) / self.min_face_size # find initial scale
# print("current_scale", net_size, self.min_face_size, current_scale)
im_resized = self.processed_image(im, current_scale)
current_height, current_width, _ = im_resized.shape
# fcn
all_boxes = list()
while min(current_height, current_width) > net_size:
#return the result predicted by pnet
#cls_cls_map : H*w*2
#reg: H*w*4
cls_cls_map, reg = self.pnet_detector.predict(im_resized)
#boxes: num*9(x1,y1,x2,y2,score,x1_offset,y1_offset,x2_offset,y2_offset)
boxes = self.generate_bbox(cls_cls_map[:, :,1], reg, current_scale, self.thresh[0])
current_scale *= self.scale_factor
im_resized = self.processed_image(im, current_scale)
current_height, current_width, _ = im_resized.shape
if boxes.size == 0:
continue
keep = py_nms(boxes[:, :5], 0.5, 'Union')
boxes = boxes[keep]
all_boxes.append(boxes)
if len(all_boxes) == 0:
return None, None, None
all_boxes = np.vstack(all_boxes)
# merge the detection from first stage
keep = py_nms(all_boxes[:, 0:5], 0.7, 'Union')
all_boxes = all_boxes[keep]
boxes = all_boxes[:, :5]
bbw = all_boxes[:, 2] - all_boxes[:, 0] + 1
bbh = all_boxes[:, 3] - all_boxes[:, 1] + 1
# refine the boxes
boxes_c = np.vstack([all_boxes[:, 0] + all_boxes[:, 5] * bbw,
all_boxes[:, 1] + all_boxes[:, 6] * bbh,
all_boxes[:, 2] + all_boxes[:, 7] * bbw,
all_boxes[:, 3] + all_boxes[:, 8] * bbh,
all_boxes[:, 4]])
boxes_c = boxes_c.T
return boxes, boxes_c, None
def detect_rnet(self, im, dets):
"""Get face candidates using rnet
Parameters:
----------
im: numpy array
input image array
dets: numpy array
detection results of pnet
Returns:
-------
boxes: numpy array
detected boxes before calibration
boxes_c: numpy array
boxes after calibration
"""
h, w, c = im.shape
dets = self.convert_to_square(dets)
dets[:, 0:4] = np.round(dets[:, 0:4])
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(dets, w, h)
num_boxes = dets.shape[0]
cropped_ims = np.zeros((num_boxes, 24, 24, 3), dtype=np.float32)
for i in range(num_boxes):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
cropped_ims[i, :, :, :] = (cv2.resize(tmp, (24, 24))-127.5) / 128
#cls_scores : num_data*2
#reg: num_data*4
#landmark: num_data*10
cls_scores, reg, _ = self.rnet_detector.predict(cropped_ims)
cls_scores = cls_scores[:,1]
keep_inds = np.where(cls_scores > self.thresh[1])[0]
if len(keep_inds) > 0:
boxes = dets[keep_inds]
boxes[:, 4] = cls_scores[keep_inds]
reg = reg[keep_inds]
#landmark = landmark[keep_inds]
else:
return None, None, None
keep = py_nms(boxes, 0.6)
boxes = boxes[keep]
boxes_c = self.calibrate_box(boxes, reg[keep])
return boxes, boxes_c,None
def detect_onet(self, im, dets):
"""Get face candidates using onet
Parameters:
----------
im: numpy array
input image array
dets: numpy array
detection results of rnet
Returns:
-------
boxes: numpy array
detected boxes before calibration
boxes_c: numpy array
boxes after calibration
"""
h, w, c = im.shape
dets = self.convert_to_square(dets)
dets[:, 0:4] = np.round(dets[:, 0:4])
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(dets, w, h)
num_boxes = dets.shape[0]
cropped_ims = np.zeros((num_boxes, 48, 48, 3), dtype=np.float32)
for i in range(num_boxes):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
cropped_ims[i, :, :, :] = (cv2.resize(tmp, (48, 48))-127.5) / 128
cls_scores, reg,landmark = self.onet_detector.predict(cropped_ims)
#prob belongs to face
cls_scores = cls_scores[:,1]
keep_inds = np.where(cls_scores > self.thresh[2])[0]
if len(keep_inds) > 0:
#pickout filtered box
boxes = dets[keep_inds]
boxes[:, 4] = cls_scores[keep_inds]
reg = reg[keep_inds]
landmark = landmark[keep_inds]
else:
return None, None, None
#width
w = boxes[:,2] - boxes[:,0] + 1
#height
h = boxes[:,3] - boxes[:,1] + 1
landmark[:,0::2] = (np.tile(w,(5,1)) * landmark[:,0::2].T + np.tile(boxes[:,0],(5,1)) - 1).T
landmark[:,1::2] = (np.tile(h,(5,1)) * landmark[:,1::2].T + np.tile(boxes[:,1],(5,1)) - 1).T
boxes_c = self.calibrate_box(boxes, reg)
boxes = boxes[py_nms(boxes, 0.6, "Minimum")]
keep = py_nms(boxes_c, 0.6, "Minimum")
boxes_c = boxes_c[keep]
landmark = landmark[keep]
return boxes, boxes_c,landmark
#use for video
def detect(self, img):
"""Detect face over image
"""
boxes = None
t = time.time()
# pnet
t1 = 0
if self.pnet_detector:
boxes, boxes_c,_ = self.detect_pnet(img)
if boxes_c is None:
return np.array([]),np.array([])
t1 = time.time() - t
t = time.time()
print("Pnet out ",boxes_c.shape)
# rnet
t2 = 0
if self.rnet_detector:
boxes, boxes_c,_ = self.detect_rnet(img, boxes_c)
if boxes_c is None:
return np.array([]),np.array([])
t2 = time.time() - t
t = time.time()
# onet
t3 = 0
if self.onet_detector:
boxes, boxes_c,landmark = self.detect_onet(img, boxes_c)
if boxes_c is None:
return np.array([]),np.array([])
t3 = time.time() - t
t = time.time()
print(
"time cost " + '{:.3f}'.format(t1 + t2 + t3) + ' pnet {:.3f} rnet {:.3f} onet {:.3f}'.format(t1, t2,
t3))
return boxes_c,landmark
def detect_face(self, test_data):
all_boxes = []#save each image's bboxes
landmarks = []
batch_idx = 0
sum_time = 0
#test_data is iter_
for databatch in test_data:
#databatch(image returned)
if batch_idx % 100 == 0:
print("%d images done" % batch_idx)
im = databatch
# pnet
t1 = 0
if self.pnet_detector:
t = time.time()
#ignore landmark
boxes, boxes_c, landmark = self.detect_pnet(im)
t1 = time.time() - t
sum_time += t1
if boxes_c is None:
print("boxes_c is None...")
all_boxes.append(np.array([]))
#pay attention
landmarks.append(np.array([]))
batch_idx += 1
continue
# rnet
t2 = 0
if self.rnet_detector:
t = time.time()
#ignore landmark
boxes, boxes_c, landmark = self.detect_rnet(im, boxes_c)
t2 = time.time() - t
sum_time += t2
if boxes_c is None:
all_boxes.append(np.array([]))
landmarks.append(np.array([]))
batch_idx += 1
continue
# onet
t3 = 0
if self.onet_detector:
t = time.time()
boxes, boxes_c, landmark = self.detect_onet(im, boxes_c)
t3 = time.time() - t
sum_time += t3
if boxes_c is None:
all_boxes.append(np.array([]))
landmarks.append(np.array([]))
batch_idx += 1
continue
print(
"time cost " + '{:.3f}'.format(sum_time) + ' pnet {:.3f} rnet {:.3f} onet {:.3f}'.format(t1, t2,t3))
all_boxes.append(boxes_c)
landmarks.append(landmark)
batch_idx += 1
#num_of_data*9,num_of_data*10
return all_boxes,landmarks
| 34.700224
| 123
| 0.477403
|
e0d109f0bfe2ef80fd8a6641806b650e82f7a6e3
| 2,290
|
py
|
Python
|
dregcli/tests/tests_integration/tests_delete/test_delete_from_date_single_tag.py
|
jssuzanne/dregcli
|
328e8aacf4e46f538e2b62c8c3cceba002feb367
|
[
"MIT"
] | null | null | null |
dregcli/tests/tests_integration/tests_delete/test_delete_from_date_single_tag.py
|
jssuzanne/dregcli
|
328e8aacf4e46f538e2b62c8c3cceba002feb367
|
[
"MIT"
] | 1
|
2019-04-12T13:46:52.000Z
|
2019-04-15T15:26:47.000Z
|
dregcli/tests/tests_integration/tests_delete/test_delete_from_date_single_tag.py
|
jssuzanne/dregcli
|
328e8aacf4e46f538e2b62c8c3cceba002feb367
|
[
"MIT"
] | 1
|
2019-04-12T13:44:53.000Z
|
2019-04-12T13:44:53.000Z
|
import json
import os
import sys
import pytest
sys.path.append(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir + os.sep + os.pardir
)
)
import tools
from fixtures import (
fixture_registry_url,
fixture_client,
fixture_repository,
fixture_delete_tags,
)
from dregcli.console.delete import DeleteCommandHandler
class TestDeleteFromDateSingleTag:
@pytest.mark.usefixtures(
'fixture_registry_url',
'fixture_client',
'fixture_repository',
'fixture_delete_tags',
)
def test_from_date_single_tag(
self,
fixture_registry_url,
fixture_client,
fixture_repository,
fixture_delete_tags,
capsys
):
# check data set adhoc state
repo = fixture_client.repositories()[0]
repo_tags = repo.tags()
assert sorted(repo_tags) == sorted(fixture_delete_tags)
# tags by date desc (and their name should match fixtures)
tags_by_date_desc = repo.get_tags_by_date()
expected_tags_by_desc_date = [
tag_data['tag'] for tag_data in tags_by_date_desc
]
assert sorted(expected_tags_by_desc_date) == \
sorted(fixture_delete_tags)
# delete from index desc order
# delete from 'alpha'/master-*-1383 in desc order
# see docstring of fixture_delete_tags
from_date = '2019-01-30 22:20:20'
handler = DeleteCommandHandler()
deleted = handler.run(
fixture_registry_url,
fixture_repository,
False,
from_date=from_date,
single_tag='^master-'
)
# 'commit tags' to be removed and left
# (no other release tags like 'staging' on them)
commit_tag_only_tags_deleted = [
'master-2ze98e000wx39d60a7390925d0czr3qs03j90aaa-1382',
]
# check commit_tags_only tags deleted
assert sorted(deleted) == sorted(commit_tag_only_tags_deleted)
# check repo should have over tags than commit_tags_only left now
should_left_tags = [
t for t in fixture_delete_tags
if t not in commit_tag_only_tags_deleted
]
assert sorted(repo.tags()) == sorted(should_left_tags)
| 28.987342
| 73
| 0.640175
|
b88a1b738e8a8a935c77548552c6804c3c74a93f
| 20,974
|
py
|
Python
|
host.py
|
aplneto/redes_projeto
|
450ef8ac61e46bc38ff34142d07eda3d726ce326
|
[
"MIT"
] | 1
|
2019-04-04T13:10:01.000Z
|
2019-04-04T13:10:01.000Z
|
host.py
|
aplneto/redes_projeto
|
450ef8ac61e46bc38ff34142d07eda3d726ce326
|
[
"MIT"
] | null | null | null |
host.py
|
aplneto/redes_projeto
|
450ef8ac61e46bc38ff34142d07eda3d726ce326
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Módulo do Servidor de arquivos TCP
Esse módulo contém a implementação do objeto Host que controla o lado da
conexão do servidor.
Todo:
Concluir a documentação
Example:
>> servidor = Host()
>> servidor.start()
"""
from console import Console
import base64
import pathlib
import os
import threading
import ntpath
import datetime
# Dicionário que armazenará os usuários cadastrados
USR_DICT = dict()
# Dicionário de ajuda do terminal
TERMINAL_HELP = {"conexões": "mostra quantas conexões estão ativas no momento",
"finalizar": "fecha o servidor para conexões futuras e "+
"sai do menu",
"iniciar": "abre o servidor para novas conexões"}
# Dicionário de ajuda pré-login
HELP_DICT = {"sair" : "efetuar logoff e encerrar a execução do programa",
"login <usr> <psw>": "efetuar login usando o valor de <usr>"
+ " e <psw> como senha",
"signup <usr> <psw>": "efetua cadastro como <usr>" +
" usando <psw> como senha"}
# Dicionário de comandos principais
MENU_DICT = {'post <file>': 'faz o upload de um arquivo para o servidor',
'get <file>': 'faz o download de um arquivo do servidor',
'share <file> <usr>': 'compartilha um arquivo com um usuário',
'show': 'lista todos os arquivos disponíveis',
'delete <file>':'exlui um arquivo do banco de dados do usuário'}
CLIENT_COUNTER = 0
CLIENT_DICT = dict()
# Funções Auxlilares
def makethread(func):
"""Função que transforma uma função qualquer numa Thread
Args:
func (function): função a ser transformada em Thread
Returns:
(function) wrapper da função a ser transformada em thread
"""
def _thread(*args, **kwargs):
"""Decorador interno da função
Args:
*args (tuple): tupla de argumentos da função
**kwargs (dict): dicionários de palavras-chave da função
"""
pcs = threading.Thread(target = func, args = args, kwargs = kwargs)
pcs.start()
return _thread
# Classe Principal do Servidor
class Host(Console, threading.Thread):
"""Classe do servidor que receberá os comandos e arquivos dos clientes
Servidor TCP de armazenamento de arquivos. O servidor escuta na mesma porta
por novas conexões, estabelecendo uma conexão de mão-dupla com o cliente.
Attributes:
HOST (str): valor padrão do servidor host para conexões locais
'localhost'
PORT (int): valor de porta padrão para o servidor 4400
"""
HOST = "localhost"
PORT = 4400
def __init__(self, host_ip = HOST, port = PORT, root = "./root", **kwargs):
"""Método construdor do Host
O Host é configurado através de um ip, um valor de porta e um diretório
onde os arquivos dos usuários serão armazenados.
Args:
host_ip (str): IP do Host, configurado por padrão para coneões
locais '127.0.0.1'
port (int): valor de porta para conexão com o servidor, por padrão
4400
root (str): caminho para o diretório raiz onde os arquivos dos
usuários serão salvos, sendo './root' por padrão
Kwargs:
key_file (str): endereço do arquivo contendo a chave privada do
servidor. Por padrão ".pvtkey.txt"
file_usr (str): endeço do arquivo de texto contendo os usuários já
cadastrados no servidor. Por padrão ".usr.txt"
"""
Console.__init__(self, key_file = kwargs.get('key_file',
'.pvtkey.txt'))
threading.Thread.__init__(self)
self.host_name = (host_ip, port)
self.sock.bind(self.host_name)
self.root = pathlib.Path(root)
if not os.path.exists(root):
self.root.mkdir()
try:
usr_dict = Host.load_users(kwargs.get('file_usr',
'.usr.txt'))
except FileNotFoundError:
usr_dict = dict()
finally:
USR_DICT.update(usr_dict)
self.__kwargs = kwargs
self.__run = False
def run(self, backlog = 0, timeout = 0.5):
"""Método de execução principal do servidor.
Esse método coloca o servidor no modo de escuta, aceitando conexões de
acordo com o backlog e timeout fornecidos como parâmetro.
Args:
backlog (int): tamanho da fila de conexões não-aceitas.
timeout (float): intervaloda busca por novas conexões do socket do
servidor
"""
global CLIENT_COUNTER
self.sock.settimeout(timeout)
self.sock.listen(backlog)
self.__run = True
print("Aguardando conexões...")
while self.__run:
try:
sock, client = self.sock.accept()
except:
pass
else:
print("Conexão estabelecida com: " + ', '.join(
str(x) for x in client))
CLIENT_COUNTER += 1
tmp = ClientHandler(sock, client, self.publickey, self.privatekey,
self.root)
tmp.start()
@staticmethod
def Menu(host):
"""Método de controle de Servidores
Funciona como um console para o servidor, onde o usuário digita os
comandos e o servidor executa.
"""
global CLIENT_COUNTER
running = False
print("\nDigite 'help' ou 'ajuda' se precisar de ajuda.\n")
while True:
comando = input("\nadmin: ")
if comando == "iniciar":
if running:
print("Servidor já em execução!")
else:
host.start()
elif comando == "conexões":
print(CLIENT_COUNTER)
elif comando == "finalizar":
print("Finalizando servidor.")
host.stop()
running = False
break
elif comando == "clientes":
for h in CLIENT_DICT:
print(h.usr)
elif comando == "ajuda" or comando == "help":
for cmd in TERMINAL_HELP:
print(cmd.__repr__() + ': ' + TERMINAL_HELP[cmd])
def stop(self, **kwargs):
"""Método usado para finalizar o servidor com segurança
Finaliza o socket principal e inicia o processo de finalização dos
terminais abertos.
Kwargs:
file_usr (str): endereço do arquivo de texto onde os usuários serão
salvos, 'usr.config' por padrão.
file_config (str): endereço onde as configurações do host serão
salvas, 'host.config' por padrão.
"""
self.__run = False
self.sock.close()
self.export_settings(kwargs.get('file_config', '.host.txt'))
Host.save_users(USR_DICT, kwargs.get('file_usr', '.usr.txt'))
key_file = open(self.__kwargs.get('key_file', '.pvtkey.txt'), 'wb')
key_file.write(self.privatekey.exportKey())
key_file.close()
def export_settings(self, filename):
"""Função para exportar as configurações do servidor para um arquivo
As configurações são criptografadas com Base64 para evitar que usuários
inexperientes percam seus dados.
Args:
filename (str): endereço do arquivo onde as configurações serão
salvas
"""
host_ip = "host_ip@{}".format(self.host_name[0])
port = "port@{}".format(self.host_name[1])
root = "root@{}".format(self.root)
with open(filename, 'w') as file:
file.write(base64.a85encode(host_ip.encode()).decode()+'\n')
file.write(base64.a85encode(port.encode()).decode()+'\n')
file.write(base64.a85encode(root.encode()).decode()+'\n')
for key in self.__kwargs:
line = key + '@' + str(self.__kwargs[key])
file.write(base64.a85encode(line.encode()).decode()+'\n')
@staticmethod
def load_host(filename):
"""Função que carrega um host de um arquivo
Essa função permite criar um novo objeto do tipo Host a partir de um
arquivo que contenha as configurações salvas anteriormente.
Args:
filename (str): endereço do arquivo de configurações
Returns:
(Host) objeto do tipo Host com as configurações salvas no arquivo.
"""
configurations = dict()
with open(filename, 'r') as file:
code = file.readline()
while code:
line = base64.a85decode(code.encode()).decode()
settings = line.split('@')
configurations[settings[0]] = settings[1]
code = file.readline()
configurations['port'] = int(configurations['port'])
return Host(**configurations)
@staticmethod
def save_users(dict_, filename):
"""Função para exportar os usuários de um servidor para um arquivo
A função varre o dicionário de usuários que tem o seguinte formato:
chaves: números inteiros que representam o hash das senhas de seus
respectivos usuários
valores: strings contendo os nomes de usuário
Por fim, a função criptografa as strings usando Base64.
Args:
filename (str): endereço do arquivo onde os usuários serão salvos
"""
with open(filename, 'w') as file:
for h in dict_:
str_ = h+'@'+dict_[h]
code = base64.a85encode(str_.encode())
file.write(code.decode()+'\n')
@staticmethod
def load_users(fileusers):
"""Retorna um dicionário contendo como chaves o hash das senhas dos
usuários e como valores os logins de cada um dos usuários.
Args:
fileusers (str): endereço do arquivo de usuários
Returns:
(dict) dicionário contendo como chaves hash de senhas e valores os
nomes de usuário
"""
dict_usr = dict()
with open(fileusers, 'r') as file:
for line in file:
info = base64.a85decode(line.encode())
info = info.decode().split('@')
dict_usr[info[0]] = info[1]
return dict_usr
def __repr__(self):
return ', '.join(["{0}({1}, {2}, {3})".format(self.__class__.__name__,
self.host_name[0].__repr__(), self.host_name[1].__repr__(),
self.root.name.__repr__(), )]+[', '.join('{}: {}'.format
(x,
self.__kwargs[x].__repr__()) for x in self.__kwargs)])
# Classe auxiliar do Servidor
class ClientHandler(Console, threading.Thread):
def __init__(self, socket, client, publickey, privatekey, root):
"""Método construtor do ajudante
Esse método realiza a troca de chaves com o cliente.
Args:
socket (socke.socket): socket pelo qual a comunicação acontecerá
publickey (bytes): inicializador da chave pública (fornecido pelo
Host)
root (pathlib.Path):
"""
Console.__init__(self, sock = socket)
threading.Thread.__init__(self)
self.client = client
self.privatekey = privatekey
self.sock.send(publickey)
self.publickey = self.receive_key()
self.root = self.directory = root
self.usr_bd = dict()
self.running = True
self.usr = 'guest'
def run(self):
"""Processo principal da Thread do Handler
"""
global CLIENT_COUNTER
self.send("TCPy Server\nFaça login ou cadastre-se para continuar.")
while True:
msg = self.receive()
cmd = msg.split(' ')
if cmd[0] == "sair":
break
try:
self.__getattribute__(cmd[0])(*cmd[1:])
except KeyError as k:
raise k
except TypeError:
self.send("Parâmetros incorretos!\nUse o comando 'ajuda'" +
" para mais informações!")
except AttributeError:
self.send("Comando inválido!")
self.sock.close()
CLIENT_COUNTER -= 1
if self.usr != 'guest':
del CLIENT_DICT[self.usr]
self.running = False
print("Conexão com", self.client, "encerrada")
self.generate_bdfile(str(self.directory.joinpath(self.usr+'.bd')),
self.usr_bd)
def share (self, filename, usr):
"""Método de compartilhamento de arquivos com outros usuários
Args:
filename (str): nome do arquivo
usr (str): nome do usuário
"""
if not filename in self.usr_bd:
self.send("Arquivo inexistente")
elif not usr in USR_DICT:
self.send("Usuário não encontrado")
else:
if usr in CLIENT_DICT:
CLIENT_DICT[usr].usr_bd[filename] = self.usr_bd[filename]
else:
file = self.root.joinpath(usr).joinpath(usr+'.bd').open('a')
text_line = filename+' '+' '.join(self.usr_bd[filename])+'\n'
file.write(text_line)
file.close()
self.send(filename+" compartilhado com "+usr)
def ajuda(self):
"""Método de envio de ajuda do servidor.
"""
if self.usr == 'guest':
for key in HELP_DICT:
msg = key.__repr__() + ': ' + HELP_DICT[key]
self.send(msg)
ack = self.receive()
self.send('0')
else:
for key in MENU_DICT:
msg = key.__repr__() + ': ' + MENU_DICT[key]
self.send(msg)
ack = self.receive()
self.send('0')
def show(self):
"""Método de exibição dos arquivos disponíveis
"""
info = "{0}\nProprietário: {1}, Última atualização: {2}\n"
for file in self.usr_bd:
print(file, self.usr_bd[file])
self.send(info.format(file, self.usr_bd[file][0],
self.usr_bd[file][1]))
ack = self.receive()
self.send('EOF')
def login(self, usr, psw):
"""Método de Login
Método que controla a rotina de login no servidor.
Args:
usr (str): nome de usuário para tentativa de acesso
psw (str): senha do usuário
"""
if usr in CLIENT_DICT:
self.send("Sessão em andamento!")
if self.usr == 'guest':
if usr in USR_DICT:
if USR_DICT[usr] == psw:
self.usr = usr
self.send('1')
self.directory = self.root.joinpath(usr)
self.usr_bd.update(
self.recover_bdfile(
str(self.directory.joinpath(usr+'.bd'))))
print(self.usr + ' efetuou login de ' + str(self.client))
CLIENT_DICT[self.usr] = self
else:
self.send("Senha incorreta!")
else:
self.send("Nome de usuário desconhecido!")
else:
self.send("Comando inválido!")
def signup(self, usr, psw):
"""Método de Cadastro
Método que controla a rotina de cadastro no servidor, criando uma nova
pasta para o usuário dentro da pasta root do servidor, assim como um
relatório de banco de dados "files.bd" no interior da pasta.
Args:
usr (str): nome de usuário a ser cadastrado deve ser único
psw (str): senha de acesso do usuário
"""
if self.usr == 'guest':
if usr in USR_DICT:
self.send("Usuário já cadastrado")
else:
self.send("1")
USR_DICT[usr] = psw
_dir = self.directory.joinpath(usr)
try:
_dir.mkdir()
except FileExistsError:
pass
finally:
bd = open(str(_dir.joinpath(usr+'.bd')), 'w')
bd.close()
ack = self.receive()
self.login(usr, psw)
else:
self.send("Comando inválido!")
def post(self, file_address):
"""Método que controla o upload de um arquivo
Esse método controla o upload de um arquivo para o diretório do usuário
sem se preocupar com qual a versão do arquivo.
Args:
file_address (str): endereço do arquivo na máquina do cliente
"""
self.send("ack")
filename = ntpath.basename(file_address)
for b in self.receive_file(str(self.directory.joinpath(filename))):
pass
print(str(b) + ' bytes recebidos de '+ str(self.client))
self.usr_bd[filename] = (self.usr, str(datetime.datetime.now()))
def get(self, file):
"""Método usado para baixar o arquivo do servidor
Args:
file (str): nome do arquivo no banco de dados do usuário
"""
filename = str(self.root.joinpath(self.usr_bd[file][0]).joinpath(file))
for b in self.send_file(filename):
pass
print(str(b) + ' bytes enviados para '+ str(self.client))
def delete(self, file):
"""
Args:
file (str): nome do arquivo a ser excluido
"""
if file in self.usr_bd:
del self.usr_bd[file]
filepath = self.directory.joinpath(file)
os.remove(str(filepath))
self.send(file +" excluído")
else:
self.send("Arquivo não encontrado")
@staticmethod
def update_bdfile(bdfilename, file):
_bd = ClientHandler.recover_bdfile(bdfilename)
try:
del _bd[file]
except KeyError:
pass
ClientHandler.generate_bdfile(bdfilename, _bd)
@staticmethod
def recover_bdfile(bdfilename):
"""Método para recuperar o dicionário de arquivos de um usuário
Os dicionários de arquivos possuem como chave o nome do arquivo e como
valor uma tupla contendo o auto e a data da última atualização do
arquivo.
Args:
bdfilename (str): nome do arquivo .bd do usuário
Returns:
(dict) dicionário no formato:
dict[(nome do arquivo)] = (proprietário, última modificação)
"""
bd_dict = dict()
file = open(bdfilename, 'r')
for line in file:
info = line.split(' ')
bd_dict[info[0]] = tuple(info[1:])
file.close()
return bd_dict
@staticmethod
def generate_bdfile(bdfilename, bd_dict):
"""Método que cria um arquivo .bd a partir de um dicionário de arquivos
Args:
bdfilename (str): nome do arquivo .bd para o qual o dicionário será
salvo.
bd_dict (dict): dicionário a ser salvo.
"""
file = open(bdfilename, 'w')
text_line = '{0} {1}'
for key in bd_dict:
value = ' '.join(bd_dict[key])
file.write(text_line.format(key, value))
file.close()
def __repr__(self):
"""Método repr usado apenas para observação.
"""
return "Client: "+self.usr +", running: " + str(self.running)
if __name__ == "__main__":
servidor = Host()
servidor.start()
| 36.037801
| 83
| 0.524125
|
690657ffffc4706cb6bfd94092b857c060705319
| 4,866
|
py
|
Python
|
lib/python/treadmill/vring.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | 2
|
2017-10-31T18:48:20.000Z
|
2018-03-04T20:35:20.000Z
|
lib/python/treadmill/vring.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | null | null | null |
lib/python/treadmill/vring.py
|
bretttegart/treadmill
|
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
|
[
"Apache-2.0"
] | null | null | null |
"""Manage a port-redirect ring between Treadmill containers.
Each vring manages chain of iptables output rules, which enables applications
that expect to find their peers on a "well-defined" constant port to be
deployed inside the container.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import socket
from treadmill import firewall
from treadmill import iptables
from treadmill import sysinfo
_LOGGER = logging.getLogger(__name__)
def run(routing, endpoints, discovery, rulemgr, ip_owner, rules_owner):
"""Manage ring rules based on discovery info.
:param routing:
The map between logical endpoint name and internal container port that
is used for this endpoint.
:param endpoints:
The set of endpoints to monitor.
:param discovery:
The treadmill.discovery object/iterator. Loop over discovery never
ends, and it yields results in a form:
appname:endpoint hostname:port
appname:endpoint
Absense of hostname:port indicates that given endpoint no longer
exists.
:param ``RuleMgr`` rulemgr:
Firewall rule manager instance.
:param ``str`` rules_owner:
Unique name of the container owning all the rules.
:param ``str`` ip_owner:
IP of the container owning of the VRing.
"""
local_host = sysinfo.hostname()
local_ip = socket.gethostbyname(local_host)
_LOGGER.info('Starting vring: %r %r %r %r %r',
local_host, ip_owner, rules_owner, routing, endpoints)
# Add reflective rules back to the container
for endpoint in endpoints:
dnat_rule = firewall.DNATRule(
proto=routing[endpoint]['proto'],
src_ip=ip_owner,
dst_ip=local_ip,
dst_port=routing[endpoint]['port'],
new_ip=ip_owner,
new_port=routing[endpoint]['port']
)
rulemgr.create_rule(chain=iptables.VRING_DNAT,
rule=dnat_rule,
owner=rules_owner)
vring_state = {}
for (app, hostport) in discovery.iteritems():
# app is in the form appname:endpoint. We care only about endpoint
# name.
_name, proto, endpoint = app.split(':')
# Ignore if endpoint is not in routing (only interested in endpoints
# that are in routing table).
if endpoint not in endpoints:
continue
private_port = int(routing[endpoint]['port'])
if hostport:
host, public_port = hostport.split(':')
if host == local_host:
continue
ipaddr = socket.gethostbyname(host)
public_port = int(public_port)
vring_route = (proto, ipaddr, public_port)
_LOGGER.info('add vring route: %r', vring_route)
vring_state[app] = vring_route
dnat_rule = firewall.DNATRule(
proto=proto,
src_ip=ip_owner,
dst_ip=ipaddr,
dst_port=private_port,
new_ip=ipaddr,
new_port=public_port
)
snat_rule = firewall.SNATRule(
proto=proto,
src_ip=ipaddr,
src_port=public_port,
dst_ip=ip_owner,
new_ip=ipaddr,
new_port=private_port
)
rulemgr.create_rule(chain=iptables.VRING_DNAT,
rule=dnat_rule,
owner=rules_owner)
rulemgr.create_rule(chain=iptables.VRING_SNAT,
rule=snat_rule,
owner=rules_owner)
else:
vring_route = vring_state.pop(app, None)
if not vring_route:
continue
_LOGGER.info('del vring route: %r', vring_route)
proto, ipaddr, public_port = vring_route
dnat_rule = firewall.DNATRule(
proto=proto,
src_ip=ip_owner,
dst_ip=ipaddr,
dst_port=private_port,
new_ip=ipaddr,
new_port=public_port
)
snat_rule = firewall.SNATRule(
proto=proto,
src_ip=ipaddr,
src_port=public_port,
dst_ip=ip_owner,
new_ip=ipaddr,
new_port=private_port,
)
rulemgr.unlink_rule(chain=iptables.VRING_DNAT,
rule=dnat_rule,
owner=rules_owner)
rulemgr.unlink_rule(chain=iptables.VRING_SNAT,
rule=snat_rule,
owner=rules_owner)
| 34.757143
| 78
| 0.574188
|
45f59183835c41357ba9b1279b5cfcba936a69d7
| 3,786
|
py
|
Python
|
src/scheduled-query/azext_scheduled_query/vendored_sdks/azure_mgmt_scheduled_query/_monitor_management_client.py
|
wwendyc/azure-cli-extensions
|
6b4099676bb5d43fdb57bc69f9c0281cca510a0a
|
[
"MIT"
] | null | null | null |
src/scheduled-query/azext_scheduled_query/vendored_sdks/azure_mgmt_scheduled_query/_monitor_management_client.py
|
wwendyc/azure-cli-extensions
|
6b4099676bb5d43fdb57bc69f9c0281cca510a0a
|
[
"MIT"
] | 1
|
2020-12-07T18:21:04.000Z
|
2020-12-07T18:21:04.000Z
|
src/scheduled-query/azext_scheduled_query/vendored_sdks/azure_mgmt_scheduled_query/_monitor_management_client.py
|
wwendyc/azure-cli-extensions
|
6b4099676bb5d43fdb57bc69f9c0281cca510a0a
|
[
"MIT"
] | 5
|
2020-09-08T22:46:48.000Z
|
2020-11-08T14:54:35.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import MonitorManagementClientConfiguration
from .operations import ScheduledQueryRulesOperations
from . import models
class MonitorManagementClient(object):
"""Monitor Management Client.
:ivar scheduled_query_rules: ScheduledQueryRulesOperations operations
:vartype scheduled_query_rules: $(python-base-namespace).v2021_02_01_preview.operations.ScheduledQueryRulesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = MonitorManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.scheduled_query_rules = ScheduledQueryRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> MonitorManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 42.539326
| 133
| 0.684099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.