hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f7b4648705b0febcbb5851c8b2f61531c6ee3c6
| 5,397
|
py
|
Python
|
tests/ut/python/parallel/test_unsortedsegmentsum.py
|
peng-zhihui/mindspore
|
4e0bc761b228cece8b24a280f15b0915959071dc
|
[
"Apache-2.0"
] | 55
|
2020-12-17T10:26:06.000Z
|
2022-03-28T07:18:26.000Z
|
tests/ut/python/parallel/test_unsortedsegmentsum.py
|
77zmf/mindspore
|
4e0bc761b228cece8b24a280f15b0915959071dc
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/parallel/test_unsortedsegmentsum.py
|
77zmf/mindspore
|
4e0bc761b228cece8b24a280f15b0915959071dc
|
[
"Apache-2.0"
] | 14
|
2021-01-29T02:39:47.000Z
|
2022-03-23T05:00:26.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from tests.ut.python.ops.test_math_ops import VirtualLoss
context.set_context(mode=context.GRAPH_MODE)
grad_all = C.GradOperation(get_all=True)
class Net(nn.Cell):
def __init__(self, strategy1, strategy2, num_segments):
super(Net, self).__init__()
self.merge_op = P.UnsortedSegmentSum().shard((strategy1, strategy2))
self.num_segments = num_segments
def construct(self, vectors, segment_ids):
predict = self.merge_op(vectors, segment_ids, self.num_segments)
return predict
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x, y):
return grad_all(self.network)(x, y)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.network = network
self.loss = VirtualLoss()
def construct(self, x, y):
predict = self.network(x, y)
return self.loss(predict)
def compile_graph(x, y, segments, strategy1, strategy2, auto=False):
if auto:
context.set_auto_parallel_context(parallel_mode="auto_parallel")
else:
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
net = GradWrap(NetWithLoss(Net(strategy1, strategy2, segments)))
net.set_auto_parallel()
net.set_train()
_executor.compile(net, x, y)
def test_unsortedsegmentsum_model_parallel_slice_1d():
context.set_auto_parallel_context(device_num=8, global_rank=0)
x = Tensor(np.ones(8), ms.float32)
y = Tensor(np.ones(8), ms.int32)
num_segments = 16
strategy1 = (8,)
strategy2 = (8,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_unsortedsegmentsum_model_parallel_no_slice_1d():
context.set_auto_parallel_context(device_num=8, global_rank=0)
x = Tensor(np.ones(8), ms.float32)
y = Tensor(np.ones(8), ms.int32)
num_segments = 16
strategy1 = (1,)
strategy2 = (1,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_unsortedsegmentsum_model_parallel_index_slice_2d():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 8)), ms.float32)
y = Tensor(np.arange(4), ms.int32)
num_segments = 4
strategy1 = (4, 1)
strategy2 = (4,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_unsortedsegmentsum_model_parallel_index_slice_3d():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 4, 8)), ms.float32)
y = Tensor(np.ones((4, 4)), ms.int32)
num_segments = 16
strategy1 = (2, 2, 1)
strategy2 = (2, 2)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_unsortedsegmentsum_model_parallel_vector_slice_2d():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 8)), ms.float32)
y = Tensor(np.ones(4), ms.int32)
num_segments = 4
strategy1 = (1, 4)
strategy2 = (1,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_unsortedsegmentsum_model_parallel_vector_slice_3d():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 8, 8)), ms.float32)
y = Tensor(np.ones(4), ms.int32)
num_segments = 4
strategy1 = (1, 2, 2)
strategy2 = (1,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_unsortedsegmentsum_model_parallel_index_vector_slice_2d():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 8)), ms.float32)
y = Tensor(np.ones(4), ms.int32)
num_segments = 4
strategy1 = (2, 2)
strategy2 = (2,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_unsortedsegmentsum_model_parallel_index_vector_slice_3d():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 4, 8)), ms.float32)
y = Tensor(np.ones((4, 4)), ms.int32)
num_segments = 16
strategy1 = (2, 1, 2)
strategy2 = (2, 1)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_unsortedsegmentsum_model_parallel_repeat_caculate():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 4, 8)), ms.float32)
y = Tensor(np.ones((4, 4)), ms.int32)
num_segments = 16
strategy1 = (1, 1, 1)
strategy2 = (1, 1)
compile_graph(x, y, num_segments, strategy1, strategy2)
| 33.110429
| 78
| 0.698907
|
d8a438d04de63bf4bcfe5d345085d6e46b19415d
| 2,350
|
py
|
Python
|
diagnostics/stratonovich_scalar.py
|
emaballarin/torchsde
|
83373b30c9bd447ec32a8c286c42a4cf5e9753a6
|
[
"Apache-2.0"
] | 984
|
2020-07-06T23:15:17.000Z
|
2022-03-31T10:09:49.000Z
|
diagnostics/stratonovich_scalar.py
|
GabrielNobis/torchsde
|
53038a3efcd77f6c9f3cfd0310700a59be5d5d2d
|
[
"Apache-2.0"
] | 95
|
2020-07-11T10:53:02.000Z
|
2022-03-30T21:33:56.000Z
|
diagnostics/stratonovich_scalar.py
|
GabrielNobis/torchsde
|
53038a3efcd77f6c9f3cfd0310700a59be5d5d2d
|
[
"Apache-2.0"
] | 117
|
2020-07-07T20:05:05.000Z
|
2022-03-20T21:30:23.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from tests.problems import NeuralScalar
from torchsde import BrownianInterval
from torchsde.settings import LEVY_AREA_APPROXIMATIONS, SDE_TYPES
from . import inspection
from . import utils
def main():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.set_default_dtype(torch.float64)
utils.manual_seed()
small_batch_size, large_batch_size, d = 16, 16384, 3
t0, t1, steps, dt = 0., 2., 10, 1e-1
ts = torch.linspace(t0, t1, steps=steps, device=device)
dts = tuple(2 ** -i for i in range(1, 7)) # For checking strong order.
sde = NeuralScalar(d=d, sde_type=SDE_TYPES.stratonovich).to(device)
methods = ('euler_heun', 'heun', 'midpoint', 'reversible_heun', 'milstein', 'milstein', 'log_ode')
options = (None, None, None, None, None, dict(grad_free=True), None)
labels = ('euler-heun', 'heun', 'midpoint', 'reversible_heun', 'milstein', 'grad-free milstein', 'log_ode')
img_dir = os.path.join(os.path.dirname(__file__), 'plots', 'stratonovich_scalar')
y0 = torch.full((small_batch_size, d), fill_value=0.1, device=device)
bm = BrownianInterval(
t0=t0, t1=t1, size=(small_batch_size, 1), dtype=y0.dtype, device=device,
levy_area_approximation=LEVY_AREA_APPROXIMATIONS.foster
)
inspection.inspect_samples(y0, ts, dt, sde, bm, img_dir, methods, options, labels)
y0 = torch.full((large_batch_size, d), fill_value=0.1, device=device)
bm = BrownianInterval(
t0=t0, t1=t1, size=(large_batch_size, 1), dtype=y0.dtype, device=device,
levy_area_approximation=LEVY_AREA_APPROXIMATIONS.foster
)
inspection.inspect_orders(y0, t0, t1, dts, sde, bm, img_dir, methods, options, labels)
if __name__ == '__main__':
main()
| 40.517241
| 111
| 0.714468
|
0d0df5db6714986753ce616be85c1ab9301565a3
| 661
|
py
|
Python
|
common/role.py
|
ayang818/pyweb-template
|
d4b8c97b9e99166a6b6d856929ef670771b90fd3
|
[
"MIT"
] | null | null | null |
common/role.py
|
ayang818/pyweb-template
|
d4b8c97b9e99166a6b6d856929ef670771b90fd3
|
[
"MIT"
] | null | null | null |
common/role.py
|
ayang818/pyweb-template
|
d4b8c97b9e99166a6b6d856929ef670771b90fd3
|
[
"MIT"
] | null | null | null |
class Role(object):
vistor = 1 # 游客,无登录态
user = 2 # 用户
manager = 3 # 管理员
class RoleBuilder(object):
def __init__(self):
self.role = []
def append_vistor(self):
self.role.append(Role.vistor)
return self
def append_user(self):
self.role.append(Role.user)
return self
def append_manager(self):
self.role.append(Role.manager)
return self
def build(self):
return self.role
@classmethod
def all(cls):
return [Role.vistor, Role.user, Role.manager]
if __name__ == '__main__':
print(Role.manager)
print(Role.user)
print(Role.vistor)
| 18.361111
| 53
| 0.596067
|
7401f013b908e687c6f424e8627bfd490cff1749
| 3,156
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/credentials/tests/test_utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/credentials/tests/test_utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/credentials/tests/test_utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""Tests covering Credentials utilities."""
import uuid
from unittest import mock
from openedx.core.djangoapps.credentials.models import CredentialsApiConfig
from openedx.core.djangoapps.credentials.tests import factories
from openedx.core.djangoapps.credentials.tests.mixins import CredentialsApiConfigMixin
from openedx.core.djangoapps.credentials.utils import get_credentials
from openedx.core.djangoapps.oauth_dispatch.tests.factories import ApplicationFactory
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
from common.djangoapps.student.tests.factories import UserFactory
UTILS_MODULE = 'openedx.core.djangoapps.credentials.utils'
@skip_unless_lms
@mock.patch(UTILS_MODULE + '.get_edx_api_data')
class TestGetCredentials(CredentialsApiConfigMixin, CacheIsolationTestCase):
""" Tests for credentials utility functions. """
ENABLED_CACHES = ['default']
def setUp(self):
super().setUp()
ApplicationFactory(name=CredentialsApiConfig.OAUTH2_CLIENT_NAME)
self.credentials_config = self.create_credentials_config(cache_ttl=1)
self.user = UserFactory()
def test_get_many(self, mock_get_edx_api_data):
expected = factories.UserCredential.create_batch(3)
mock_get_edx_api_data.return_value = expected
actual = get_credentials(self.user)
mock_get_edx_api_data.assert_called_once()
call = mock_get_edx_api_data.mock_calls[0]
__, __, kwargs = call
querystring = {
'username': self.user.username,
'status': 'awarded',
'only_visible': 'True',
}
cache_key = f'{self.credentials_config.CACHE_KEY}.{self.user.username}'
assert kwargs['querystring'] == querystring
assert kwargs['cache_key'] == cache_key
assert actual == expected
def test_get_one(self, mock_get_edx_api_data):
expected = factories.UserCredential()
mock_get_edx_api_data.return_value = expected
program_uuid = str(uuid.uuid4())
actual = get_credentials(self.user, program_uuid=program_uuid)
mock_get_edx_api_data.assert_called_once()
call = mock_get_edx_api_data.mock_calls[0]
__, __, kwargs = call
querystring = {
'username': self.user.username,
'status': 'awarded',
'only_visible': 'True',
'program_uuid': program_uuid,
}
cache_key = f'{self.credentials_config.CACHE_KEY}.{self.user.username}.{program_uuid}'
assert kwargs['querystring'] == querystring
assert kwargs['cache_key'] == cache_key
assert actual == expected
def test_type_filter(self, mock_get_edx_api_data):
get_credentials(self.user, credential_type='program')
mock_get_edx_api_data.assert_called_once()
call = mock_get_edx_api_data.mock_calls[0]
__, __, kwargs = call
querystring = {
'username': self.user.username,
'status': 'awarded',
'only_visible': 'True',
'type': 'program',
}
assert kwargs['querystring'] == querystring
| 34.304348
| 94
| 0.692966
|
e197512712815183282287183f2ae7fee192bfab
| 3,452
|
py
|
Python
|
test/test_calculate_cca.py
|
haydard/wyrm
|
ff3f675ea71a45f1dd91ecbc5944229ebb3342ec
|
[
"MIT"
] | null | null | null |
test/test_calculate_cca.py
|
haydard/wyrm
|
ff3f675ea71a45f1dd91ecbc5944229ebb3342ec
|
[
"MIT"
] | null | null | null |
test/test_calculate_cca.py
|
haydard/wyrm
|
ff3f675ea71a45f1dd91ecbc5944229ebb3342ec
|
[
"MIT"
] | null | null | null |
from __future__ import division
import unittest
import numpy as np
from numpy.random import randn
np.random.seed(42)
from wyrm.types import Data
from wyrm.processing import append, swapaxes, calculate_cca, apply_spatial_filter
class TestCalculateCCA(unittest.TestCase):
SAMPLES = 1000
CHANNELS_X = 10
CHANNELS_Y = 5
NOISE_LEVEL = 0.1
def setUp(self):
# X is a random mixture matrix of random variables
Sx = randn(self.SAMPLES, self.CHANNELS_X)
Ax = randn(self.CHANNELS_X, self.CHANNELS_X)
X = np.dot(Sx, Ax)
# Y is a random mixture matrix of random variables except the
# first component
Sy = randn(self.SAMPLES, self.CHANNELS_Y)
Sy[:, 0] = Sx[:, 0] + self.NOISE_LEVEL * randn(self.SAMPLES)
Ay = randn(self.CHANNELS_Y, self.CHANNELS_Y)
Y = np.dot(Sy, Ay)
# generate Data object
axes_x = [np.arange(X.shape[0]), np.arange(X.shape[1])]
axes_y = [np.arange(Y.shape[0]), np.arange(Y.shape[1])]
self.dat_x = Data(X, axes=axes_x, names=['time', 'channel'], units=['ms', '#'])
self.dat_y = Data(Y, axes=axes_y, names=['time', 'channel'], units=['ms', '#'])
def test_rho(self):
"""Test if the canonical correlation coefficient almost equals 1."""
rho, w_x, w_y = calculate_cca(self.dat_x, self.dat_y)
self.assertAlmostEqual(rho, 1.0, delta=0.01)
def test_diff_between_canonical_variables(self):
"""Test if the scaled canonical variables are almost same."""
rho, w_x, w_y = calculate_cca(self.dat_x, self.dat_y)
cv_x = apply_spatial_filter(self.dat_x, w_x)
cv_y = apply_spatial_filter(self.dat_y, w_y)
def scale(x):
tmp = x.data - x.data.mean()
return tmp / tmp[np.argmax(np.abs(tmp))]
diff = scale(cv_x) - scale(cv_y)
diff = np.sum(np.abs(diff)) / self.SAMPLES
self.assertTrue(diff < 0.1)
def test_raise_error_with_non_continuous_data(self):
"""Raise error if ``dat_x`` is not continuous Data object."""
dat = Data(randn(2, self.SAMPLES, self.CHANNELS_X),
axes=[[0, 1], self.dat_x.axes[0], self.dat_x.axes[1]],
names=['class', 'time', 'channel'],
units=['#', 'ms', '#'])
with self.assertRaises(AssertionError):
calculate_cca(dat, self.dat_x)
def test_raise_error_with_different_length_data(self):
"""Raise error if the length of ``dat_x`` and ``dat_y`` is different."""
dat = append(self.dat_x, self.dat_x)
with self.assertRaises(AssertionError):
calculate_cca(dat, self.dat_y)
def test_calculate_cca_swapaxes(self):
"""caluclate_cca must work with nonstandard timeaxis."""
res1 = calculate_cca(swapaxes(self.dat_x, 0, 1), swapaxes(self.dat_y, 0, 1), timeaxis=1)
res2 = calculate_cca(self.dat_x, self.dat_y)
np.testing.assert_array_equal(res1[0], res2[0])
np.testing.assert_array_equal(res1[1], res2[1])
np.testing.assert_array_equal(res1[2], res2[2])
def test_calculate_cca_copy(self):
"""caluclate_cca must not modify argument."""
cpy_x = self.dat_x.copy()
cpy_y = self.dat_y.copy()
calculate_cca(self.dat_x, self.dat_y)
self.assertEqual(self.dat_x, cpy_x)
self.assertEqual(self.dat_y, cpy_y)
if __name__ == '__main__':
unittest.main()
| 38.355556
| 96
| 0.62949
|
733413b7147a166cda4a59fa9cf774d5b5df9012
| 1,215
|
py
|
Python
|
workalendar/europe/bulgaria.py
|
chopanpma/workalendar
|
619687b9d788d0e8294d2831687d58a3444854a1
|
[
"MIT"
] | null | null | null |
workalendar/europe/bulgaria.py
|
chopanpma/workalendar
|
619687b9d788d0e8294d2831687d58a3444854a1
|
[
"MIT"
] | null | null | null |
workalendar/europe/bulgaria.py
|
chopanpma/workalendar
|
619687b9d788d0e8294d2831687d58a3444854a1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from workalendar.core import WesternCalendar, ChristianMixin
from workalendar.registry import iso_register
@iso_register('BG')
class Bulgaria(WesternCalendar, ChristianMixin):
'Bulgaria'
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(3, 3, "Liberation Day"), # Ден на Освобождението на Б
(5, 1, "International Workers' Day"), # Ден на труда и на междунар
(5, 6, "Saint George's Day"), # Гергьовден, ден на храброс
(5, 24, "Saints Cyril & Methodius Day"), # Ден на българската просвет
(9, 6, "Unification Day"), # Ден на Съединението
(9, 22, "Independence Day"), # Ден на независимостта на Б
# wikipedia says Non-attendance day for schools, otherwise a working da
# (11, 1, "National Awakening Day"), # Ден на народните будители
)
include_easter_sunday = True
include_easter_monday = True
include_christmas_eve = True # Бъдни вечер
include_christmas = True # Рождество Христово
include_boxing_day = True
# wikipedia says The Bulgarians have two days of Christmas,
# both called Christmas Day
boxing_day_label = "Christmas"
| 37.96875
| 79
| 0.687243
|
a0d4dde1c37c5314ab203f58c234aedfa6afffbf
| 240
|
py
|
Python
|
library/github_repo.py
|
tflynn/appconf_aws_ansible
|
221a22f4867256a0c63db2f809d7d5938b75abcb
|
[
"MIT"
] | null | null | null |
library/github_repo.py
|
tflynn/appconf_aws_ansible
|
221a22f4867256a0c63db2f809d7d5938b75abcb
|
[
"MIT"
] | null | null | null |
library/github_repo.py
|
tflynn/appconf_aws_ansible
|
221a22f4867256a0c63db2f809d7d5938b75abcb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from ansible.module_utils.basic import *
def main():
module = AnsibleModule(argument_spec={})
response = {"hello": "world"}
module.exit_json(changed=False, meta=response)
if __name__ == '__main__':
main()
| 17.142857
| 47
| 0.7
|
c17c5187610590bfe22c5a7a654d4c221addb17d
| 5,085
|
py
|
Python
|
dockerfile-templater.py
|
ongres/babelfish_install_tests
|
75ffbc01a9c696d51ab719b4b82d0a44e18269f5
|
[
"Apache-2.0"
] | null | null | null |
dockerfile-templater.py
|
ongres/babelfish_install_tests
|
75ffbc01a9c696d51ab719b4b82d0a44e18269f5
|
[
"Apache-2.0"
] | null | null | null |
dockerfile-templater.py
|
ongres/babelfish_install_tests
|
75ffbc01a9c696d51ab719b4b82d0a44e18269f5
|
[
"Apache-2.0"
] | null | null | null |
import sys
import glob
import re
distro=sys.argv[1]
def get_step_name(step_path):
path_parts = step_path.split("/")
return path_parts[-1]
def load_steps(distro):
common_steps = glob.glob("steps/**")
steps = {}
for step_path in common_steps:
step_name = get_step_name(step_path)
steps[step_name] = step_path
custom_steps = glob.glob("distros/{distro}/steps/*".format(distro=distro))
for step_path in custom_steps:
step_name = get_step_name(step_path)
steps[step_name] = step_path
return steps
def is_line_a_template_markup(line):
match = re.search("^{{ (.+) }}", line)
if match:
return match.group(1)
else:
return None
def load_dockerfile_template():
with open("Dockerfile.template") as initial_template_file:
return initial_template_file.readlines()
def load_step(step_path):
step_content = []
step_name = get_step_name(step_path)
with open(step_path) as step_file:
step_content = step_file.readlines()
starts_with=re.search("^#!", step_content[0])
if starts_with:
step_content.pop(0)
last_line_index = len(step_content) - 1
is_previous_command_multiline = False
parsed_step = []
for content_tuple in enumerate(step_content):
line_index = content_tuple[0]
content_line = content_tuple[1]
dockerfile_lile = content_line
if is_previous_command_multiline: # is multilining
dockerfile_lile = content_line
elif len(content_line.strip()) == 0: # is empty
dockerfile_lile = content_line
elif re.search("^sudo apt-get install -y ", content_line): # is installing ubuntu packages
dockerfile_lile = "RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends" + dockerfile_lile[23:]
elif re.search("^sudo su ", content_line): # is chaning user
dockerfile_lile = "USER " + dockerfile_lile[7:]
elif re.search("^sudo", content_line): # is running with sudo
dockerfile_lile = "RUN " + dockerfile_lile[5:]
elif re.search("^cd", content_line): # is moving between paths
cd_path = dockerfile_lile[3:]
if re.search("^/", cd_path): # is an absolute path
update_current_workdir(cd_path)
elif re.search("\\.\\.", cd_path): # is going up one level
workdir_parts = current_workdir.split("/")
relative_parts = cd_path.split("/")
for relative_path in relative_parts:
if re.search("^\\.\\.", relative_path):
workdir_parts = workdir_parts[:-1]
else:
workdir_parts.append(relative_path)
update_current_workdir("/".join(workdir_parts))
else: # is relative path
update_current_workdir(current_workdir + "/" + cd_path)
dockerfile_lile = "WORKDIR " + current_workdir
elif step_name == "start-database" and line_index == last_line_index:
line_parts = content_line.split(" ")
dockerfile_lile = 'CMD ["' + '", "'.join(line_parts) + '" ]'
else:
dockerfile_lile = "RUN " + content_line
parsed_step.append(dockerfile_lile)
if re.search("\\\\\n$", content_line): # matches if there is line is ending with \
is_previous_command_multiline = True
else:
is_previous_command_multiline = False
return parsed_step
def is_workdir_line(line):
match = re.search("^WORKDIR (.+)", line)
if match:
return match.group(1)
else:
return None
def update_current_workdir(workir):
global current_workdir
current_workdir = workir
def parse_dockerfile_template(steps):
parsed_dockerfile = []
dockerfile_template = load_dockerfile_template()
for dockerfile_line in dockerfile_template:
workir = is_workdir_line(dockerfile_line)
if workir:
update_current_workdir(workir)
step = is_line_a_template_markup(dockerfile_line)
if step:
parsed_dockerfile = parsed_dockerfile + load_step(steps[step])
else:
parsed_dockerfile.append(dockerfile_line)
return parsed_dockerfile
def write_dockerfile(distro, dockerfile_output):
dockerfile_file_format = "distros/{distro}/Dockerfile"
with open(dockerfile_file_format.format(distro = distro), 'w') as output_file:
output_file.writelines(dockerfile_output)
def get_distro_image(distro):
distro_parts = distro.split(".")
distro_name = distro_parts[0]
distro_version = ".".join(distro_parts[1:])
return distro_name + ":" + distro_version
def get_from_line(distro):
distro_image = get_distro_image(distro)
return "FROM {image}".format(image = distro_image)
current_workdir="/"
steps = load_steps(distro)
dockerfile = parse_dockerfile_template(steps)
dockerfile = [ get_from_line(distro) ] + dockerfile
write_dockerfile(distro, dockerfile)
| 33.675497
| 132
| 0.649558
|
ff08a62bff865fad4406e4f93a6d25a52c8a6747
| 14,184
|
py
|
Python
|
neo3/storage/implementations/memory.py
|
ixje/neo-mamba
|
8b8a7bf2e600f89b91caff253f25c1c8afee6c0a
|
[
"MIT"
] | null | null | null |
neo3/storage/implementations/memory.py
|
ixje/neo-mamba
|
8b8a7bf2e600f89b91caff253f25c1c8afee6c0a
|
[
"MIT"
] | null | null | null |
neo3/storage/implementations/memory.py
|
ixje/neo-mamba
|
8b8a7bf2e600f89b91caff253f25c1c8afee6c0a
|
[
"MIT"
] | 1
|
2021-05-12T08:23:33.000Z
|
2021-05-12T08:23:33.000Z
|
from __future__ import annotations
from typing import Iterator, Tuple, Dict, List
from neo3 import storage
from neo3.core import types
from neo3.network import payloads
from contextlib import suppress
from copy import deepcopy
class MemoryDB(storage.IDBImplementation):
BLOCK = 'blocks'
BLOCK_HEIGHT_MAP = 'blocksmap'
BLOCK_BEST_HEIGHT = 'blockheight'
CONTRACT = 'contracts'
STORAGE = 'storages'
TX = 'transactions'
def __init__(self, options: dict = None):
self.db: Dict[str, dict] = {
self.BLOCK: {},
self.BLOCK_HEIGHT_MAP: {},
self.CONTRACT: {},
self.STORAGE: {},
self.TX: {}
}
self._best_block_height = -1
def get_snapshotview(self) -> MemorySnapshot:
return MemorySnapshot(self)
def _internal_bestblockheight_get(self):
if self._best_block_height == -1:
raise KeyError
return self._best_block_height
def _internal_bestblockheight_put(self, height: int, batch=None):
if batch:
batch.put(self.BLOCK_BEST_HEIGHT, height, height)
else:
self._best_block_height = height
def _internal_bestblockheight_update(self, height: int, batch=None):
self._internal_bestblockheight_put(height, batch)
def _internal_block_put(self, block: payloads.Block, batch: WriteBatch = None) -> None:
if batch:
batch.put(self.BLOCK, block.hash(), block)
else:
self.db[self.BLOCK][block.hash()] = block
self.db[self.BLOCK_HEIGHT_MAP][block.index] = block.hash()
stored_value = -1
with suppress(KeyError):
stored_value = self._internal_bestblockheight_get()
if block.index > stored_value:
self._best_block_height = block.index
def _internal_block_update(self, block: payloads.Block, batch: WriteBatch = None) -> None:
self._internal_block_put(block, batch)
def _internal_block_delete(self, hash: types.UInt256, batch: WriteBatch = None) -> None:
if batch:
batch.delete(self.BLOCK, hash)
else:
with suppress(KeyError):
block = self.db[self.BLOCK].pop(hash)
self.db[self.BLOCK_HEIGHT_MAP].pop(block.index)
def _internal_block_get(self, hash: types.UInt256) -> payloads.Block:
value = self.db[self.BLOCK].get(hash, None)
if value is None:
raise KeyError
return deepcopy(value)
def _internal_block_get_by_height(self, height: int) -> payloads.Block:
block_hash = self.db[self.BLOCK_HEIGHT_MAP].get(height, None)
if block_hash is None:
raise KeyError
return self._internal_block_get(block_hash)
def _internal_block_all(self) -> Iterator[payloads.Block]:
for block in self.db[self.BLOCK].values():
yield deepcopy(block)
def _internal_contract_put(self, contract: storage.ContractState, batch: WriteBatch = None) -> None:
if batch:
batch.put(self.CONTRACT, contract.script_hash(), contract)
else:
self.db[self.CONTRACT][contract.script_hash()] = contract
def _internal_contract_update(self, contract: storage.ContractState, batch: WriteBatch = None) -> None:
self._internal_contract_put(contract, batch)
def _internal_contract_delete(self, script_hash: types.UInt160, batch: WriteBatch = None) -> None:
if batch:
batch.delete(self.CONTRACT, script_hash)
else:
with suppress(KeyError):
self.db[self.CONTRACT].pop(script_hash)
def _internal_contract_get(self, script_hash: types.UInt160) -> storage.ContractState:
value = self.db[self.CONTRACT].get(script_hash, None)
if value is None:
raise KeyError
return deepcopy(value)
def _internal_contract_all(self) -> Iterator[storage.ContractState]:
for contract in self.db[self.CONTRACT].values():
yield deepcopy(contract)
def _internal_storage_put(self, key: storage.StorageKey,
value: storage.StorageItem,
batch: WriteBatch = None) -> None:
if batch:
batch.put(self.STORAGE, key, value)
else:
self.db[self.STORAGE][key] = value
def _internal_storage_update(self, key: storage.StorageKey,
value: storage.StorageItem,
batch: WriteBatch = None) -> None:
self._internal_storage_put(key, value, batch)
def _internal_storage_delete(self, key: storage.StorageKey, batch: WriteBatch = None) -> None:
if batch:
batch.delete(self.STORAGE, key)
else:
with suppress(KeyError):
self.db[self.STORAGE].pop(key)
def _internal_storage_get(self, key: storage.StorageKey) -> storage.StorageItem:
value = self.db[self.STORAGE].get(key, None)
if value is None:
raise KeyError
return deepcopy(value)
def _internal_storage_all(self, contract_script_hash: types.UInt160 = None) -> Iterator[Tuple[storage.StorageKey,
storage.StorageItem]]:
for k, v in self.db[self.STORAGE].items():
if contract_script_hash:
if contract_script_hash == k.contract:
yield deepcopy(k), deepcopy(v)
else:
yield deepcopy(k), deepcopy(v)
def _internal_storage_find(self, contract_script_hash: types.UInt160,
key_prefix: bytes) -> Iterator[Tuple[storage.StorageKey, storage.StorageItem]]:
script_hash_len = 20
for k, v in self.db[self.STORAGE].items():
# k is of type StorageKey, which starts with a 20-byte script hash.
# We skip this and search only in the `key` attribute
if k.to_array()[script_hash_len:].startswith(key_prefix):
yield deepcopy(k), deepcopy(v)
def _internal_transaction_put(self, transaction: payloads.Transaction, batch: WriteBatch = None) -> None:
if batch:
batch.put(self.TX, transaction.hash(), transaction)
else:
self.db[self.TX][transaction.hash()] = transaction
def _internal_transaction_update(self, transaction: payloads.Transaction, batch: WriteBatch = None) -> None:
self._internal_transaction_put(transaction, batch)
def _internal_transaction_delete(self, hash: types.UInt256, batch: WriteBatch = None) -> None:
if batch:
batch.delete(self.TX, hash)
else:
with suppress(KeyError):
self.db[self.TX].pop(hash)
def _internal_transaction_get(self, hash: types.UInt256) -> payloads.Transaction:
value = self.db[self.TX].get(hash, None)
if value is None:
raise KeyError
return deepcopy(value)
def _internal_transaction_all(self) -> Iterator[payloads.Transaction]:
for tx in self.db[self.TX].values():
yield deepcopy(tx)
def write_batch(self, batch) -> None:
for table, action, pair in batch:
if action == 'delete':
item = self.db[table].pop(pair[0])
if table == self.BLOCK:
self.db[self.BLOCK_HEIGHT_MAP].pop(item.index)
elif action in ['update', 'add']:
if table == self.BLOCK_BEST_HEIGHT:
self._best_block_height = pair[0]
continue
self.db[table][pair[0]] = pair[1]
if table == self.BLOCK:
# pair = (UInt256, Block)
self.db[self.BLOCK_HEIGHT_MAP][pair[1].index] = pair[0]
class WriteBatch:
def __init__(self):
self.statements = []
def __iter__(self):
for s in self.statements:
yield s
def put(self, table, key, value) -> None:
self.statements.append((table, 'add', (key, value)))
def delete(self, table, key) -> None:
self.statements.append((table, 'delete', (key, None)))
class MemorySnapshot(storage.Snapshot):
def __init__(self, db: MemoryDB):
super(MemorySnapshot, self).__init__()
self._db = db
self._batch = WriteBatch()
self._block_cache = MemoryDBCachedBlockAccess(db, self._batch)
self._contract_cache = MemoryDBCachedContractAccess(db, self._batch)
self._storage_cache = MemoryDBCachedStorageAccess(db, self._batch)
self._tx_cache = MemoryDBCachedTXAccess(db, self._batch)
self._block_height_cache = MemoryBestBlockHeightAttribute(db, self._batch)
def commit(self) -> None:
super(MemorySnapshot, self).commit()
self._db.write_batch(self._batch)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# nothing to do
pass
class MemoryBestBlockHeightAttribute(storage.AttributeCache):
def __init__(self, db, batch):
super(MemoryBestBlockHeightAttribute, self).__init__()
self._db = db
self._batch = batch
def _get_internal(self):
return self._db._internal_bestblockheight_get()
def _update_internal(self, value):
self._db._internal_bestblockheight_update(value, self._batch)
class MemoryDBCachedBlockAccess(storage.CachedBlockAccess):
def __init__(self, db, batch):
super(MemoryDBCachedBlockAccess, self).__init__(db)
self._batch = batch
def commit(self) -> None:
keys_to_delete: List[types.UInt256] = []
for trackable in self._dictionary.values(): # trackable.item: payloads.Block
if trackable.state == storage.TrackState.ADDED:
self._db._internal_block_put(trackable.item, self._batch)
trackable.state = storage.TrackState.NONE
elif trackable.state == storage.TrackState.CHANGED:
self._db._internal_block_update(trackable.item, self._batch)
trackable.state = storage.TrackState.NONE
elif trackable.state == storage.TrackState.DELETED:
self._db._internal_block_delete(trackable.item.hash(), self._batch)
keys_to_delete.append(trackable.key)
for key in keys_to_delete:
with suppress(KeyError):
self._dictionary.pop(key)
self._changeset.clear()
def create_snapshot(self):
return storage.CloneBlockCache(self._db, self)
class MemoryDBCachedContractAccess(storage.CachedContractAccess):
def __init__(self, db, batch):
super(MemoryDBCachedContractAccess, self).__init__(db)
self._batch = batch
def commit(self) -> None:
keys_to_delete: List[types.UInt160] = []
for trackable in self.get_changeset(): # trackable.item: storage.ContractState
if trackable.state == storage.TrackState.ADDED:
self._db._internal_contract_put(trackable.item, self._batch)
trackable.state = storage.TrackState.NONE
elif trackable.state == storage.TrackState.CHANGED:
self._db._internal_contract_update(trackable.item, self._batch)
trackable.state = storage.TrackState.NONE
elif trackable.state == storage.TrackState.DELETED:
self._db._internal_contract_delete(trackable.item.script_hash(), self._batch)
keys_to_delete.append(trackable.key)
for key in keys_to_delete:
with suppress(KeyError):
self._dictionary.pop(key)
self._changeset.clear()
def create_snapshot(self):
return storage.CloneContractCache(self._db, self)
class MemoryDBCachedStorageAccess(storage.CachedStorageAccess):
def __init__(self, db, batch):
super(MemoryDBCachedStorageAccess, self).__init__(db)
self._batch = batch
def commit(self) -> None:
keys_to_delete: List[storage.StorageKey] = []
for trackable in self.get_changeset():
if trackable.state == storage.TrackState.ADDED:
self._db._internal_storage_put(trackable.key, trackable.item, self._batch)
trackable.state = storage.TrackState.NONE
elif trackable.state == storage.TrackState.CHANGED:
self._db._internal_storage_update(trackable.key, trackable.item, self._batch)
trackable.state = storage.TrackState.NONE
elif trackable.state == storage.TrackState.DELETED:
self._db._internal_storage_delete(trackable.key, self._batch)
keys_to_delete.append(trackable.key)
for key in keys_to_delete:
with suppress(KeyError):
self._dictionary.pop(key)
self._changeset.clear()
def create_snapshot(self):
return storage.CloneStorageCache(self._db, self)
class MemoryDBCachedTXAccess(storage.CachedTXAccess):
def __init__(self, db, batch):
super(MemoryDBCachedTXAccess, self).__init__(db)
self._batch = batch
def commit(self) -> None:
keys_to_delete: List[types.UInt256] = []
for trackable in self._dictionary.values(): # trackable.item: payloads.Transaction
if trackable.state == storage.TrackState.ADDED:
self._db._internal_transaction_put(trackable.item, self._batch)
trackable.state = storage.TrackState.NONE
elif trackable.state == storage.TrackState.CHANGED:
self._db._internal_transaction_update(trackable.item, self._batch)
trackable.state = storage.TrackState.NONE
elif trackable.state == storage.TrackState.DELETED:
self._db._internal_transaction_delete(trackable.item.hash(), self._batch)
keys_to_delete.append(trackable.key)
for key in keys_to_delete:
with suppress(KeyError):
self._dictionary.pop(key)
self._changeset.clear()
def create_snapshot(self):
return storage.CloneTXCache(self._db, self)
| 39.842697
| 120
| 0.632967
|
e557f3dca205ece6ccc7c7e39747dd8f669fdf9c
| 2,328
|
py
|
Python
|
ROS workspace/itu_odv_navigation/src/case studies/trianglepath.py
|
ITUROBLAB/-itu_odv_ws-idincern-
|
2d1757f69dae1d666b19006f455f87fcea779968
|
[
"MIT"
] | 1
|
2022-02-09T12:39:17.000Z
|
2022-02-09T12:39:17.000Z
|
ROS workspace/itu_odv_navigation/src/case studies/trianglepath.py
|
idincern/itu_odv
|
7c78714bbe4c9a35e14178ab1f1fbc92203a8244
|
[
"MIT"
] | null | null | null |
ROS workspace/itu_odv_navigation/src/case studies/trianglepath.py
|
idincern/itu_odv
|
7c78714bbe4c9a35e14178ab1f1fbc92203a8244
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# license removed for brevity
import rospy
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from std_msgs.msg import Int32
from std_msgs.msg import Float32
rospy.init_node("move_base_goal_sender")
def movebase_client(msg):
client = actionlib.SimpleActionClient('move_base',MoveBaseAction)
client.wait_for_server()
goal = MoveBaseGoal()
print msg
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.header.seq+=1
if(msg == 0.0 ) :
goal.target_pose.pose.position.x = 0.0
goal.target_pose.pose.position.y = 0.0
goal.target_pose.pose.orientation.z = 0.0
goal.target_pose.pose.orientation.w = 1.0
print goal
elif (msg == 1.0 ):
goal.target_pose.pose.position.x = 2.0
goal.target_pose.pose.position.y = 0
goal.target_pose.pose.orientation.z = 0.8660254037836398
goal.target_pose.pose.orientation.w = 0.5000000000013837
print goal
elif (msg == 2.0 ):
goal.target_pose.pose.position.x = 1.0
goal.target_pose.pose.position.y = 1.73205080757
goal.target_pose.pose.orientation.z = 0.8660254037836398
goal.target_pose.pose.orientation.w = -0.5000000000013837
print goal
else:
#goal=(0.0, 2, 0.0, 0.0, 0.0, 1.0, 0.0)
goal.target_pose.pose.position.x = 0.0
goal.target_pose.pose.position.y = 0.0
goal.target_pose.pose.orientation.z = 0.0
goal.target_pose.pose.orientation.w = 1.0
print goal
client.send_goal(goal)
wait = client.wait_for_result()
if not wait:
rospy.logerr("Action server not available!")
rospy.signal_shutdown("Action server not available!")
else:
return client.get_result()
def shutdown_msg():
print "Node has shut down!"
rospy.on_shutdown(shutdown_msg)
#autopath
msg=0
while not rospy.is_shutdown():
global flag
if abs(msg)<=3:
movebase_client(msg)
msg+=1
rospy.sleep(0)
#manualpath
#while not rospy.is_shutdown():
# msg=raw_input("Select point . . . 0(base)/1/2/3/4/5")
# msg=int(msg)
# movebase_client(msg)
# rospy.sleep(0)
| 26.157303
| 69
| 0.641323
|
4976a9fe01293d9bfaeb89791b2f2e7f5b4a6c8b
| 5,729
|
py
|
Python
|
tests/unit/benchmark/contexts/test_kubernetes.py
|
alexnemes/yardstick_enc
|
dc2d0eb663c7648271b04026b90046a27fe0b5fc
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/benchmark/contexts/test_kubernetes.py
|
alexnemes/yardstick_enc
|
dc2d0eb663c7648271b04026b90046a27fe0b5fc
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/benchmark/contexts/test_kubernetes.py
|
alexnemes/yardstick_enc
|
dc2d0eb663c7648271b04026b90046a27fe0b5fc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for yardstick.benchmark.contexts.kubernetes
from __future__ import absolute_import
import unittest
import mock
from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.contexts.kubernetes import KubernetesContext
context_cfg = {
'type': 'Kubernetes',
'name': 'k8s',
'servers': {
'host': {
'image': 'openretriever/yardstick',
'command': '/bin/bash',
'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
service ssh restart;while true ; do sleep 10000; done']
},
'target': {
'image': 'openretriever/yardstick',
'command': '/bin/bash',
'args': ['-c', 'chmod 700 ~/.ssh; chmod 600 ~/.ssh/*; \
service ssh restart;while true ; do sleep 10000; done']
}
}
}
prefix = 'yardstick.benchmark.contexts.kubernetes'
class KubernetesTestCase(unittest.TestCase):
def tearDown(self):
# clear kubernetes contexts from global list so we don't break other tests
Context.list = []
@mock.patch('{}.KubernetesContext._delete_ssh_key'.format(prefix))
@mock.patch('{}.KubernetesContext._delete_rcs'.format(prefix))
@mock.patch('{}.KubernetesContext._delete_pods'.format(prefix))
def test_undeploy(self,
mock_delete_pods,
mock_delete_rcs,
mock_delete_ssh):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context.undeploy()
self.assertTrue(mock_delete_ssh.called)
self.assertTrue(mock_delete_rcs.called)
self.assertTrue(mock_delete_pods.called)
@mock.patch('{}.KubernetesContext._wait_until_running'.format(prefix))
@mock.patch('{}.KubernetesTemplate.get_rc_pods'.format(prefix))
@mock.patch('{}.KubernetesContext._create_rcs'.format(prefix))
@mock.patch('{}.KubernetesContext._set_ssh_key'.format(prefix))
def test_deploy(self,
mock_set_ssh_key,
mock_create_rcs,
mock_get_rc_pods,
mock_wait_until_running):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
with mock.patch("yardstick.benchmark.contexts.kubernetes.time"):
k8s_context.deploy()
self.assertTrue(mock_set_ssh_key.called)
self.assertTrue(mock_create_rcs.called)
self.assertTrue(mock_get_rc_pods.called)
self.assertTrue(mock_wait_until_running.called)
@mock.patch('{}.paramiko'.format(prefix), **{"resource_filename.return_value": ""})
@mock.patch('{}.pkg_resources'.format(prefix), **{"resource_filename.return_value": ""})
@mock.patch('{}.utils'.format(prefix))
@mock.patch('{}.open'.format(prefix), create=True)
@mock.patch('{}.k8s_utils.delete_config_map'.format(prefix))
@mock.patch('{}.k8s_utils.create_config_map'.format(prefix))
def test_ssh_key(self, mock_create, mock_delete, mock_open, mock_utils, mock_resources,
mock_paramiko):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context._set_ssh_key()
k8s_context._delete_ssh_key()
self.assertTrue(mock_create.called)
self.assertTrue(mock_delete.called)
@mock.patch('{}.k8s_utils.read_pod_status'.format(prefix))
def test_wait_until_running(self, mock_read_pod_status):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context.template.pods = ['server']
mock_read_pod_status.return_value = 'Running'
k8s_context._wait_until_running()
@mock.patch('{}.k8s_utils.get_pod_list'.format(prefix))
def test_get_server(self, mock_get_pod_list):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
mock_get_pod_list.return_value.items = []
server = k8s_context._get_server('server')
self.assertIsNone(server)
@mock.patch('{}.KubernetesContext._create_rc'.format(prefix))
def test_create_rcs(self, mock_create_rc):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context._create_rcs()
self.assertTrue(mock_create_rc.called)
@mock.patch('{}.k8s_utils.create_replication_controller'.format(prefix))
def test_create_rc(self, mock_create_replication_controller):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context._create_rc({})
self.assertTrue(mock_create_replication_controller.called)
@mock.patch('{}.KubernetesContext._delete_rc'.format(prefix))
def test_delete_rcs(self, mock_delete_rc):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context._delete_rcs()
self.assertTrue(mock_delete_rc.called)
@mock.patch('{}.k8s_utils.delete_replication_controller'.format(prefix))
def test_delete_rc(self, mock_delete_replication_controller):
k8s_context = KubernetesContext()
k8s_context.init(context_cfg)
k8s_context._delete_rc({})
self.assertTrue(mock_delete_replication_controller.called)
def main():
unittest.main()
if __name__ == '__main__':
main()
| 37.444444
| 92
| 0.660674
|
811030bdb71038fb3248623243f926849cc8d84e
| 17,176
|
py
|
Python
|
nova/policies/servers.py
|
Nexenta/nova
|
ccecb507ff4bdcdd23d90e7b5b02a22c5a46ecc3
|
[
"Apache-2.0"
] | 1
|
2020-08-14T02:20:59.000Z
|
2020-08-14T02:20:59.000Z
|
nova/policies/servers.py
|
Nexenta/nova
|
ccecb507ff4bdcdd23d90e7b5b02a22c5a46ecc3
|
[
"Apache-2.0"
] | 2
|
2021-03-31T20:04:16.000Z
|
2021-12-13T20:45:03.000Z
|
nova/policies/servers.py
|
Nexenta/nova
|
ccecb507ff4bdcdd23d90e7b5b02a22c5a46ecc3
|
[
"Apache-2.0"
] | 1
|
2020-07-24T02:31:45.000Z
|
2020-07-24T02:31:45.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
RULE_AOO = base.RULE_ADMIN_OR_OWNER
SERVERS = 'os_compute_api:servers:%s'
NETWORK_ATTACH_EXTERNAL = 'network:attach_external_network'
ZERO_DISK_FLAVOR = SERVERS % 'create:zero_disk_flavor'
REQUESTED_DESTINATION = 'compute:servers:create:requested_destination'
CROSS_CELL_RESIZE = 'compute:servers:resize:cross_cell'
rules = [
policy.DocumentedRuleDefault(
name=SERVERS % 'index',
check_str=base.PROJECT_READER_OR_SYSTEM_READER,
description="List all servers",
operations=[
{
'method': 'GET',
'path': '/servers'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'detail',
check_str=base.PROJECT_READER_OR_SYSTEM_READER,
description="List all servers with detailed information",
operations=[
{
'method': 'GET',
'path': '/servers/detail'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'index:get_all_tenants',
check_str=base.SYSTEM_READER,
description="List all servers for all projects",
operations=[
{
'method': 'GET',
'path': '/servers'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
name=SERVERS % 'detail:get_all_tenants',
check_str=base.SYSTEM_READER,
description="List all servers with detailed information for "
" all projects",
operations=[
{
'method': 'GET',
'path': '/servers/detail'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
name=SERVERS % 'allow_all_filters',
check_str=base.SYSTEM_READER,
description="Allow all filters when listing servers",
operations=[
{
'method': 'GET',
'path': '/servers'
},
{
'method': 'GET',
'path': '/servers/detail'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
name=SERVERS % 'show',
check_str=base.PROJECT_READER_OR_SYSTEM_READER,
description="Show a server",
operations=[
{
'method': 'GET',
'path': '/servers/{server_id}'
}
],
scope_types=['system', 'project']),
# the details in host_status are pretty sensitive, only admins
# should do that by default.
policy.DocumentedRuleDefault(
name=SERVERS % 'show:host_status',
check_str=base.SYSTEM_ADMIN,
description="""
Show a server with additional host status information.
This means host_status will be shown irrespective of status value. If showing
only host_status UNKNOWN is desired, use the
``os_compute_api:servers:show:host_status:unknown-only`` policy rule.
Microvision 2.75 added the ``host_status`` attribute in the
``PUT /servers/{server_id}`` and ``POST /servers/{server_id}/action (rebuild)``
API responses which are also controlled by this policy rule, like the
``GET /servers*`` APIs.
""",
operations=[
{
'method': 'GET',
'path': '/servers/{server_id}'
},
{
'method': 'GET',
'path': '/servers/detail'
},
{
'method': 'PUT',
'path': '/servers/{server_id}'
},
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'show:host_status:unknown-only',
check_str=base.SYSTEM_ADMIN,
description="""
Show a server with additional host status information, only if host status is
UNKNOWN.
This policy rule will only be enforced when the
``os_compute_api:servers:show:host_status`` policy rule does not pass for the
request. An example policy configuration could be where the
``os_compute_api:servers:show:host_status`` rule is set to allow admin-only and
the ``os_compute_api:servers:show:host_status:unknown-only`` rule is set to
allow everyone.
""",
operations=[
{
'method': 'GET',
'path': '/servers/{server_id}'
},
{
'method': 'GET',
'path': '/servers/detail'
},
{
'method': 'PUT',
'path': '/servers/{server_id}'
},
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
],
scope_types=['system', 'project'],),
policy.DocumentedRuleDefault(
name=SERVERS % 'create',
check_str=base.PROJECT_MEMBER,
description="Create a server",
operations=[
{
'method': 'POST',
'path': '/servers'
}
],
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:forced_host',
# TODO(gmann): We need to make it SYSTEM_ADMIN.
# PROJECT_ADMIN is added for now because create server
# policy is project scoped and there is no way to
# pass the project_id in request body for system scoped
# roles so that create server for other project with force host.
# To achieve that, we need to update the create server API to
# accept the project_id for whom the server needs to be created
# and then change the scope of this policy to system-only
# Because that is API change it needs to be done with new
# microversion.
check_str=base.PROJECT_ADMIN,
description="""
Create a server on the specified host and/or node.
In this case, the server is forced to launch on the specified
host and/or node by bypassing the scheduler filters unlike the
``compute:servers:create:requested_destination`` rule.
""",
operations=[
{
'method': 'POST',
'path': '/servers'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=REQUESTED_DESTINATION,
check_str=base.RULE_ADMIN_API,
description="""
Create a server on the requested compute service host and/or
hypervisor_hostname.
In this case, the requested host and/or hypervisor_hostname is
validated by the scheduler filters unlike the
``os_compute_api:servers:create:forced_host`` rule.
""",
operations=[
{
'method': 'POST',
'path': '/servers'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:attach_volume',
check_str=base.PROJECT_MEMBER,
description="Create a server with the requested volume attached to it",
operations=[
{
'method': 'POST',
'path': '/servers'
}
],
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:attach_network',
check_str=base.PROJECT_MEMBER,
description="Create a server with the requested network attached "
" to it",
operations=[
{
'method': 'POST',
'path': '/servers'
}
],
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:trusted_certs',
check_str=base.PROJECT_MEMBER,
description="Create a server with trusted image certificate IDs",
operations=[
{
'method': 'POST',
'path': '/servers'
}
],
scope_types=['project']),
policy.DocumentedRuleDefault(
name=ZERO_DISK_FLAVOR,
# TODO(gmann): We need to make it SYSTEM_ADMIN.
# PROJECT_ADMIN is added for now because create server
# policy is project scoped and there is no way to
# pass the project_id in request body for system scoped
# roles so that create server for other project with zero disk flavor.
# To achieve that, we need to update the create server API to
# accept the project_id for whom the server needs to be created
# and then change the scope of this policy to system-only
# Because that is API change it needs to be done with new
# microversion.
check_str=base.PROJECT_ADMIN,
description="""
This rule controls the compute API validation behavior of creating a server
with a flavor that has 0 disk, indicating the server should be volume-backed.
For a flavor with disk=0, the root disk will be set to exactly the size of the
image used to deploy the instance. However, in this case the filter_scheduler
cannot select the compute host based on the virtual image size. Therefore, 0
should only be used for volume booted instances or for testing purposes.
WARNING: It is a potential security exposure to enable this policy rule
if users can upload their own images since repeated attempts to
create a disk=0 flavor instance with a large image can exhaust
the local disk of the compute (or shared storage cluster). See bug
https://bugs.launchpad.net/nova/+bug/1739646 for details.
""",
operations=[
{
'method': 'POST',
'path': '/servers'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=NETWORK_ATTACH_EXTERNAL,
# TODO(gmann): We need to make it SYSTEM_ADMIN.
# PROJECT_ADMIN is added for now because create server
# policy is project scoped and there is no way to
# pass the project_id in request body for system scoped
# roles so that create server for other project or attach the
# external network. To achieve that, we need to update the
# create server API to accept the project_id for whom the
# server needs to be created and then change the scope of this
# policy to system-only Because that is API change it needs to
# be done with new microversion.
check_str=base.PROJECT_ADMIN,
description="Attach an unshared external network to a server",
operations=[
# Create a server with a requested network or port.
{
'method': 'POST',
'path': '/servers'
},
# Attach a network or port to an existing server.
{
'method': 'POST',
'path': '/servers/{server_id}/os-interface'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'delete',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Delete a server",
operations=[
{
'method': 'DELETE',
'path': '/servers/{server_id}'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'update',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Update a server",
operations=[
{
'method': 'PUT',
'path': '/servers/{server_id}'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'confirm_resize',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Confirm a server resize",
operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (confirmResize)'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'revert_resize',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Revert a server resize",
operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (revertResize)'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'reboot',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Reboot a server",
operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (reboot)'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'resize',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Resize a server",
operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resize)'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=CROSS_CELL_RESIZE,
check_str=base.RULE_NOBODY,
description="Resize a server across cells. By default, this is "
"disabled for all users and recommended to be tested in a "
"deployment for admin users before opening it up to non-admin users. "
"Resizing within a cell is the default preferred behavior even if "
"this is enabled. ",
operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resize)'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'rebuild',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Rebuild a server",
operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'rebuild:trusted_certs',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Rebuild a server with trusted image certificate IDs",
operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create_image',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Create an image from a server",
operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (createImage)'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create_image:allow_volume_backed',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Create an image from a volume backed server",
operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (createImage)'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'start',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Start a server",
operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-start)'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'stop',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Stop a server",
operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-stop)'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'trigger_crash_dump',
check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
description="Trigger crash dump in a server",
operations=[
{
'method': 'POST',
'path': '/servers/{server_id}/action (trigger_crash_dump)'
}
],
scope_types=['system', 'project']),
]
def list_rules():
return rules
| 35.634855
| 79
| 0.575571
|
ad41cf9fea153e5f6ecd245cfb6e4d8738501b96
| 824
|
py
|
Python
|
data/example_old/configuration/access_policies/switch_policy/vpc_domains/mkconf.py
|
nihole/DAFT
|
89b393ab5c075c21d781c023d4facddfdf04ccff
|
[
"Apache-2.0"
] | 1
|
2019-05-29T12:33:35.000Z
|
2019-05-29T12:33:35.000Z
|
data/example_old/configuration/access_policies/switch_policy/vpc_domains/mkconf.py
|
nihole/DAFT
|
89b393ab5c075c21d781c023d4facddfdf04ccff
|
[
"Apache-2.0"
] | null | null | null |
data/example_old/configuration/access_policies/switch_policy/vpc_domains/mkconf.py
|
nihole/DAFT
|
89b393ab5c075c21d781c023d4facddfdf04ccff
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
ACY_SCRIPT_PATH = '../../../../../../scripts/'
template_path = ACY_SCRIPT_PATH + 'configuration/access_policies/switch_policy/vpc_domains/template.j2'
render_path = ACY_SCRIPT_PATH + 'render.py'
######### Main Body ######################
#### Should be the same for all ACI objects #########
import os
import sys
######### get file's names from the command line ####################
if (len(sys.argv)==2):
yaml_file = sys.argv[1]
else:
print (" ######################################################\n")
print (" Syntax is:\n")
print (" python3 mkconf.py configuration_yaml_file.yml\n")
print (" ######################################################\n")
quit()
cmd = 'python %s %s %s' % (render_path, template_path, yaml_file)
returned_value = os.system(cmd)
| 25.75
| 103
| 0.523058
|
ff4d3992d2641c4dd3cd59d8a4ff8334416cf2a4
| 707
|
py
|
Python
|
app.py
|
karwootang-gft/tb-houston-service
|
cd22d334bdac7ecea93f6640b9825286d10d1f26
|
[
"Apache-2.0"
] | 1
|
2020-03-06T17:34:45.000Z
|
2020-03-06T17:34:45.000Z
|
app.py
|
karwootang-gft/tb-houston-service
|
cd22d334bdac7ecea93f6640b9825286d10d1f26
|
[
"Apache-2.0"
] | 67
|
2020-05-29T08:23:26.000Z
|
2021-07-28T06:59:45.000Z
|
app.py
|
karwootang-gft/tb-houston-service
|
cd22d334bdac7ecea93f6640b9825286d10d1f26
|
[
"Apache-2.0"
] | null | null | null |
import os
import config
import logging
#print("DEBUG: {}".format(os.environ['DEBUG']))
#print("SQLALCHEMY_DATABASE_URI: {}".format(os.environ['SQLALCHEMY_DATABASE_URI']))
# Get theapplication instance
connex_app = config.connex_app
# connect logging between gunicorn and Flask
#gunicorn_logger = logging.getLogger("gunicorn.error")
gunicorn_logger = logging.getLogger("gunicorn.info")
connex_app.app.logger.handlers = gunicorn_logger.handlers
connex_app.app.logger.setLevel(gunicorn_logger.level)
# Read the swagger.yml file to configure the endpoints
connex_app.add_api('houston_service.yml', strict_validation=True)
if __name__ == "__main__":
connex_app.run(port=3000, debug=os.environ['DEBUG'])
| 32.136364
| 83
| 0.790665
|
5f19473435997dc5e52ec2a405c341deac730590
| 11,688
|
py
|
Python
|
gdal-1.10.0/swig/python/osgeo/gdal_array.py
|
TUW-GEO/OGRSpatialRef3D
|
eb54378eabb885dd1e13616b2eb6b2bde99d90e2
|
[
"MIT"
] | 6
|
2017-05-12T08:18:27.000Z
|
2022-01-17T17:16:11.000Z
|
gdal-1.10.0/swig/python/osgeo/gdal_array.py
|
TUW-GEO/OGRSpatialRef3D
|
eb54378eabb885dd1e13616b2eb6b2bde99d90e2
|
[
"MIT"
] | 1
|
2019-03-07T15:25:14.000Z
|
2019-03-07T15:25:14.000Z
|
gdal-1.10.0/swig/python/osgeo/gdal_array.py
|
TUW-GEO/OGRSpatialRef3D
|
eb54378eabb885dd1e13616b2eb6b2bde99d90e2
|
[
"MIT"
] | 1
|
2019-03-05T05:18:51.000Z
|
2019-03-05T05:18:51.000Z
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_gdal_array', [dirname(__file__)])
except ImportError:
import _gdal_array
return _gdal_array
if fp is not None:
try:
_mod = imp.load_module('_gdal_array', fp, pathname, description)
finally:
fp.close()
return _mod
_gdal_array = swig_import_helper()
del swig_import_helper
else:
import _gdal_array
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
def GetArrayFilename(*args):
"""GetArrayFilename(PyArrayObject * psArray) -> retStringAndCPLFree *"""
return _gdal_array.GetArrayFilename(*args)
def BandRasterIONumPy(*args, **kwargs):
"""
BandRasterIONumPy(Band band, int bWrite, int xoff, int yoff, int xsize, int ysize, PyArrayObject * psArray,
int buf_type) -> CPLErr
"""
return _gdal_array.BandRasterIONumPy(*args, **kwargs)
import numpy
import _gdal_array
import gdalconst
import gdal
gdal.AllRegister()
codes = { gdalconst.GDT_Byte : numpy.uint8,
gdalconst.GDT_UInt16 : numpy.uint16,
gdalconst.GDT_Int16 : numpy.int16,
gdalconst.GDT_UInt32 : numpy.uint32,
gdalconst.GDT_Int32 : numpy.int32,
gdalconst.GDT_Float32 : numpy.float32,
gdalconst.GDT_Float64 : numpy.float64,
gdalconst.GDT_CInt16 : numpy.complex64,
gdalconst.GDT_CInt32 : numpy.complex64,
gdalconst.GDT_CFloat32 : numpy.complex64,
gdalconst.GDT_CFloat64 : numpy.complex128
}
def OpenArray( array, prototype_ds = None ):
ds = gdal.Open( GetArrayFilename(array) )
if ds is not None and prototype_ds is not None:
if type(prototype_ds).__name__ == 'str':
prototype_ds = gdal.Open( prototype_ds )
if prototype_ds is not None:
CopyDatasetInfo( prototype_ds, ds )
return ds
def flip_code(code):
if isinstance(code, type):
# since several things map to complex64 we must carefully select
# the opposite that is an exact match (ticket 1518)
if code == numpy.int8:
return gdalconst.GDT_Byte
if code == numpy.complex64:
return gdalconst.GDT_CFloat32
for key, value in codes.items():
if value == code:
return key
return None
else:
try:
return codes[code]
except KeyError:
return None
def NumericTypeCodeToGDALTypeCode(numeric_type):
if not isinstance(numeric_type, type):
raise TypeError("Input must be a type")
return flip_code(numeric_type)
def GDALTypeCodeToNumericTypeCode(gdal_code):
return flip_code(gdal_code)
def LoadFile( filename, xoff=0, yoff=0, xsize=None, ysize=None ):
ds = gdal.Open( filename )
if ds is None:
raise ValueError("Can't open "+filename+"\n\n"+gdal.GetLastErrorMsg())
return DatasetReadAsArray( ds, xoff, yoff, xsize, ysize )
def SaveArray( src_array, filename, format = "GTiff", prototype = None ):
driver = gdal.GetDriverByName( format )
if driver is None:
raise ValueError("Can't find driver "+format)
return driver.CreateCopy( filename, OpenArray(src_array,prototype) )
def DatasetReadAsArray( ds, xoff=0, yoff=0, xsize=None, ysize=None, buf_obj=None ):
if xsize is None:
xsize = ds.RasterXSize
if ysize is None:
ysize = ds.RasterYSize
if ds.RasterCount == 1:
return BandReadAsArray( ds.GetRasterBand(1), xoff, yoff, xsize, ysize, buf_obj = buf_obj)
datatype = ds.GetRasterBand(1).DataType
for band_index in range(2,ds.RasterCount+1):
if datatype != ds.GetRasterBand(band_index).DataType:
datatype = gdalconst.GDT_Float32
typecode = GDALTypeCodeToNumericTypeCode( datatype )
if typecode == None:
datatype = gdalconst.GDT_Float32
typecode = numpy.float32
if buf_obj is not None:
for band_index in range(1,ds.RasterCount+1):
BandReadAsArray( ds.GetRasterBand(band_index),
xoff, yoff, xsize, ysize, buf_obj = buf_obj[band_index-1])
return buf_obj
array_list = []
for band_index in range(1,ds.RasterCount+1):
band_array = BandReadAsArray( ds.GetRasterBand(band_index),
xoff, yoff, xsize, ysize)
array_list.append( numpy.reshape( band_array, [1,ysize,xsize] ) )
return numpy.concatenate( array_list )
def BandReadAsArray( band, xoff = 0, yoff = 0, win_xsize = None, win_ysize = None,
buf_xsize=None, buf_ysize=None, buf_obj=None ):
"""Pure python implementation of reading a chunk of a GDAL file
into a numpy array. Used by the gdal.Band.ReadAsArray method."""
if win_xsize is None:
win_xsize = band.XSize
if win_ysize is None:
win_ysize = band.YSize
if buf_obj is None:
if buf_xsize is None:
buf_xsize = win_xsize
if buf_ysize is None:
buf_ysize = win_ysize
else:
if len(buf_obj.shape) == 2:
shape_buf_xsize = buf_obj.shape[1]
shape_buf_ysize = buf_obj.shape[0]
else:
shape_buf_xsize = buf_obj.shape[2]
shape_buf_ysize = buf_obj.shape[1]
if buf_xsize is not None and buf_xsize != shape_buf_xsize:
raise ValueError('Specified buf_xsize not consistant with array shape')
if buf_ysize is not None and buf_ysize != shape_buf_ysize:
raise ValueError('Specified buf_ysize not consistant with array shape')
buf_xsize = shape_buf_xsize
buf_ysize = shape_buf_ysize
if buf_obj is None:
datatype = band.DataType
typecode = GDALTypeCodeToNumericTypeCode( datatype )
if typecode == None:
datatype = gdalconst.GDT_Float32
typecode = numpy.float32
else:
datatype = NumericTypeCodeToGDALTypeCode( typecode )
if datatype == gdalconst.GDT_Byte and band.GetMetadataItem('PIXELTYPE', 'IMAGE_STRUCTURE') == 'SIGNEDBYTE':
typecode = numpy.int8
ar = numpy.empty([buf_ysize,buf_xsize], dtype = typecode)
if BandRasterIONumPy( band, 0, xoff, yoff, win_xsize, win_ysize,
ar, datatype ) != 0:
return None
return ar
else:
datatype = NumericTypeCodeToGDALTypeCode( buf_obj.dtype.type )
if not datatype:
raise ValueError("array does not have corresponding GDAL data type")
if BandRasterIONumPy( band, 0, xoff, yoff, win_xsize, win_ysize,
buf_obj, datatype ) != 0:
return None
return buf_obj
def BandWriteArray( band, array, xoff=0, yoff=0 ):
"""Pure python implementation of writing a chunk of a GDAL file
from a numpy array. Used by the gdal.Band.WriteArray method."""
if array is None or len(array.shape) != 2:
raise ValueError("expected array of dim 2")
xsize = array.shape[1]
ysize = array.shape[0]
if xsize + xoff > band.XSize or ysize + yoff > band.YSize:
raise ValueError("array larger than output file, or offset off edge")
datatype = NumericTypeCodeToGDALTypeCode( array.dtype.type )
# if we receive some odd type, like int64, try casting to a very
# generic type we do support (#2285)
if not datatype:
gdal.Debug( 'gdal_array', 'force array to float64' )
array = array.astype( numpy.float64 )
datatype = NumericTypeCodeToGDALTypeCode( array.dtype.type )
if not datatype:
raise ValueError("array does not have corresponding GDAL data type")
return BandRasterIONumPy( band, 1, xoff, yoff, xsize, ysize,
array, datatype )
def CopyDatasetInfo( src, dst, xoff=0, yoff=0 ):
"""
Copy georeferencing information and metadata from one dataset to another.
src: input dataset
dst: output dataset - It can be a ROI -
xoff, yoff: dst's offset with respect to src in pixel/line.
Notes: Destination dataset must have update access. Certain formats
do not support creation of geotransforms and/or gcps.
"""
dst.SetMetadata( src.GetMetadata() )
#Check for geo transform
gt = src.GetGeoTransform()
if gt != (0,1,0,0,0,1):
dst.SetProjection( src.GetProjectionRef() )
if (xoff == 0) and (yoff == 0):
dst.SetGeoTransform( gt )
else:
ngt = [gt[0],gt[1],gt[2],gt[3],gt[4],gt[5]]
ngt[0] = gt[0] + xoff*gt[1] + yoff*gt[2];
ngt[3] = gt[3] + xoff*gt[4] + yoff*gt[5];
dst.SetGeoTransform( ( ngt[0], ngt[1], ngt[2], ngt[3], ngt[4], ngt[5] ) )
#Check for GCPs
elif src.GetGCPCount() > 0:
if (xoff == 0) and (yoff == 0):
dst.SetGCPs( src.GetGCPs(), src.GetGCPProjection() )
else:
gcps = src.GetGCPs()
#Shift gcps
new_gcps = []
for gcp in gcps:
ngcp = gdal.GCP()
ngcp.GCPX = gcp.GCPX
ngcp.GCPY = gcp.GCPY
ngcp.GCPZ = gcp.GCPZ
ngcp.GCPPixel = gcp.GCPPixel - xoff
ngcp.GCPLine = gcp.GCPLine - yoff
ngcp.Info = gcp.Info
ngcp.Id = gcp.Id
new_gcps.append(ngcp)
try:
dst.SetGCPs( new_gcps , src.GetGCPProjection() )
except:
print ("Failed to set GCPs")
return
return
| 33.976744
| 115
| 0.613107
|
62deef4afd760b61d3fd2e062f26c8a66ba7010d
| 2,466
|
py
|
Python
|
python/paddle_fl/mpc/tests/unittests/test_op_scale.py
|
barrierye/PaddleFL
|
eff6ef28491fa2011686ca3daa4f680e5ef83deb
|
[
"Apache-2.0"
] | 379
|
2019-09-27T14:26:42.000Z
|
2022-03-29T14:28:12.000Z
|
python/paddle_fl/mpc/tests/unittests/test_op_scale.py
|
Sprate/PaddleFL
|
583691acd5db0a7ca331cc9a72415017b18669b8
|
[
"Apache-2.0"
] | 132
|
2019-10-16T03:22:03.000Z
|
2022-03-23T08:54:29.000Z
|
python/paddle_fl/mpc/tests/unittests/test_op_scale.py
|
Sprate/PaddleFL
|
583691acd5db0a7ca331cc9a72415017b18669b8
|
[
"Apache-2.0"
] | 106
|
2019-09-27T12:47:18.000Z
|
2022-03-29T09:07:25.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module test scale op.
"""
import unittest
from multiprocessing import Manager
import numpy as np
import test_op_base
from op_test import OpTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle_fl.mpc.data_utils.data_utils import get_datautils
aby3 = get_datautils('aby3')
class TestScaleOp(OpTest):
def setUp(self):
self.op_type = "mpc_scale"
self.dtype = np.int64
self.init_dtype_type()
input_p = np.random.random((10, 10))
self.inputs = {'X': self.lazy_share(input_p).astype(self.dtype)}
self.attrs = {'scale': -2.3}
self.outputs = {
'Out': self.lazy_share(input_p * self.attrs['scale'])
}
def init_dtype_type(self):
pass
def test_check_output(self):
place = core.CPUPlace()
self.check_output_with_place(place, atol=1e-3,)
def test_check_grad(self):
place = core.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', max_relative_error=0.05)
class TestScaleOpScaleVariable(OpTest):
def setUp(self):
self.op_type = "mpc_scale"
self.dtype = np.int64
self.init_dtype_type()
self.scale = -2.3
input_p = np.random.random((10, 10))
self.inputs = {
'X': self.lazy_share(input_p),
'ScaleTensor': np.array([self.scale]).astype('float')
}
self.attrs = {}
self.outputs = {'Out': self.lazy_share(input_p * self.scale)}
def init_dtype_type(self):
pass
def test_check_output(self):
place = core.CPUPlace()
self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self):
place = core.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', max_relative_error=0.05)
if __name__ == "__main__":
unittest.main()
| 29.710843
| 80
| 0.662206
|
9951e69a16244759f78389857e2911c0fba531c2
| 9,301
|
py
|
Python
|
tests/index/test_api_index_dataset.py
|
agdc-research-trial/gdf
|
82ed29c263eaf65f5c1fbb4e9207c99e9700b85c
|
[
"Apache-2.0"
] | 1
|
2015-06-01T01:31:44.000Z
|
2015-06-01T01:31:44.000Z
|
tests/index/test_api_index_dataset.py
|
agdc-research-trial/gdf
|
82ed29c263eaf65f5c1fbb4e9207c99e9700b85c
|
[
"Apache-2.0"
] | null | null | null |
tests/index/test_api_index_dataset.py
|
agdc-research-trial/gdf
|
82ed29c263eaf65f5c1fbb4e9207c99e9700b85c
|
[
"Apache-2.0"
] | null | null | null |
# This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2020 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
import datetime
from collections import namedtuple
from contextlib import contextmanager
from copy import deepcopy
from uuid import UUID
from datacube.index.postgres._datasets import DatasetResource
from datacube.index.exceptions import DuplicateRecordError
from datacube.model import DatasetType, MetadataType, Dataset
_nbar_uuid = UUID('f2f12372-8366-11e5-817e-1040f381a756')
_ortho_uuid = UUID('5cf41d98-eda9-11e4-8a8e-1040f381a756')
_telemetry_uuid = UUID('4ec8fe97-e8b9-11e4-87ff-1040f381a756')
# An NBAR with source datasets. Many fields have been removed to keep it semi-focused to our ingest test.
_EXAMPLE_NBAR = {
'id': str(_nbar_uuid),
'product_type': 'nbar_brdf',
'checksum_path': 'package.sha1',
'ga_label': 'LS8_OLITIRS_NBAR_P54_GALPGS01-002_112_079_20140126',
'ga_level': 'P54',
'size_bytes': 4550,
'platform': {'code': 'LANDSAT_8'},
'creation_dt': datetime.datetime(2014, 1, 26, 2, 5, 23, 126373).isoformat(),
'instrument': {'name': 'OLI_TIRS'},
'format': {'name': 'GeoTIFF'},
'extent': {
'center_dt': datetime.datetime(2014, 1, 26, 2, 5, 23, 126373).isoformat(),
'coord': {
'ul': {'lat': -26.37259, 'lon': 116.58914},
'lr': {'lat': -28.48062, 'lon': 118.96145},
'ur': {'lat': -26.36025, 'lon': 118.92432},
'll': {'lat': -28.49412, 'lon': 116.58121}
}
},
'lineage': {
'machine': {},
'source_datasets': {
'ortho': {
'product_level': 'L1T',
'product_type': 'ortho',
'id': str(_ortho_uuid),
'usgs': {
'scene_id': 'LC81120792014026ASA00'
},
'extent': {
'center_dt': datetime.datetime(2014, 1, 26, 2, 5, 23, 126373).isoformat(),
'coord': {
'ul': {'lat': -26.37259, 'lon': 116.58914},
'lr': {'lat': -28.48062, 'lon': 118.96145},
'ur': {'lat': -26.36025, 'lon': 118.92432},
'll': {'lat': -28.49412, 'lon': 116.58121}
}
},
'size_bytes': 1854924494,
'platform': {
'code': 'LANDSAT_8'},
'creation_dt': datetime.datetime(2015, 4, 7, 0, 58, 8).isoformat(),
'instrument': {'name': 'OLI_TIRS'},
'checksum_path': 'package.sha1',
'ga_label': 'LS8_OLITIRS_OTH_P51_GALPGS01-002_112_079_20140126',
'grid_spatial': {
'projection': {
'spatial_reference': 'EPSG:28350',
'resampling_option': 'CUBIC_CONVOLUTION',
'geo_ref_points': {
'ul': {'y': 7082987.5, 'x': 459012.5},
'lr': {'y': 6847987.5, 'x': 692012.5},
'ur': {'y': 7082987.5, 'x': 692012.5},
'll': {'y': 6847987.5, 'x': 459012.5}
},
'orientation': 'NORTH_UP',
}
},
'acquisition': {
'groundstation': {
'code': 'ASA',
'eods_domain_code': '002',
'label': 'Alice Springs'
}
},
'format': {'name': 'GEOTIFF'},
'lineage': {
'algorithm': {
'name': 'LPGS',
'parameters': {},
'version': '2.4.0'
},
'machine': {},
'source_datasets': {
'satellite_telemetry_data': {
'product_type': 'satellite_telemetry_data',
'checksum_path': 'package.sha1',
'id': str(_telemetry_uuid),
'ga_label': 'LS8_OLITIRS_STD-MD_P00_LC81160740742015089ASA00_'
'116_074_20150330T022553Z20150330T022657',
'ga_level': 'P00',
'size_bytes': 637660782,
'platform': {
'code': 'LANDSAT_8'},
'creation_dt': datetime.datetime(2015, 4, 22, 6, 32, 4).isoformat(),
'instrument': {'name': 'OLI_TIRS'},
'format': {
'name': 'MD'},
'lineage': {
'source_datasets': {}
}
}
}
}
}
}
}
}
_EXAMPLE_METADATA_TYPE = MetadataType(
{
'name': 'eo',
'dataset': dict(
id=['id'],
label=['ga_label'],
creation_time=['creation_dt'],
measurements=['image', 'bands'],
sources=['lineage', 'source_datasets']
)
},
dataset_search_fields={}
)
_EXAMPLE_DATASET_TYPE = DatasetType(
_EXAMPLE_METADATA_TYPE,
{
'name': 'eo',
'description': "",
'metadata_type': 'eo',
'metadata': {}
}
)
def _build_dataset(doc):
sources = {name: _build_dataset(src) for name, src in doc['lineage']['source_datasets'].items()}
return Dataset(_EXAMPLE_DATASET_TYPE, doc, uris=['file://test.zzz'], sources=sources)
_EXAMPLE_NBAR_DATASET = _build_dataset(_EXAMPLE_NBAR)
DatasetRecord = namedtuple('DatasetRecord', ['id', 'metadata', 'dataset_type_ref', 'uris',
'added', 'added_by', 'archived'])
class MockIndex(object):
def __init__(self, db):
self._db = db
class MockDb(object):
def __init__(self):
self.dataset = {}
self.dataset_source = set()
@contextmanager
def begin(self):
yield self
@contextmanager
def connect(self):
yield self
def get_dataset(self, id):
return self.dataset.get(id, None)
def get_locations(self, dataset):
return ['file:xxx']
def datasets_intersection(self, ids):
return [k for k in ids if k in self.dataset]
def insert_dataset_location(self, *args, **kwargs):
return
def insert_dataset(self, metadata_doc, dataset_id, dataset_type_id):
# Will we pretend this one was already ingested?
if dataset_id in self.dataset:
raise DuplicateRecordError('already ingested')
self.dataset[dataset_id] = DatasetRecord(dataset_id, deepcopy(metadata_doc), dataset_type_id,
None, None, None, None)
return True
def insert_dataset_source(self, classifier, dataset_id, source_dataset_id):
self.dataset_source.add((classifier, dataset_id, source_dataset_id))
class MockTypesResource(object):
def __init__(self, type_):
self.type = type_
def get(self, *args, **kwargs):
return self.type
def get_by_name(self, *args, **kwargs):
return self.type
def test_index_dataset():
mock_db = MockDb()
mock_types = MockTypesResource(_EXAMPLE_DATASET_TYPE)
datasets = DatasetResource(mock_db, mock_types)
dataset = datasets.add(_EXAMPLE_NBAR_DATASET)
ids = {d.id for d in mock_db.dataset.values()}
assert ids == {_nbar_uuid, _ortho_uuid, _telemetry_uuid}
# Three datasets (ours and the two embedded source datasets)
assert len(mock_db.dataset) == 3
# Our three datasets should be linked together
# Nbar -> Ortho -> Telemetry
assert len(mock_db.dataset_source) == 2
assert mock_db.dataset_source == {
('ortho', _nbar_uuid, _ortho_uuid),
('satellite_telemetry_data', _ortho_uuid, _telemetry_uuid)
}
# Nothing ingested, because we reported the first as already ingested.
dataset = datasets.add(_EXAMPLE_NBAR_DATASET)
assert len(mock_db.dataset) == 3
assert len(mock_db.dataset_source) == 2
def test_index_already_ingested_source_dataset():
mock_db = MockDb()
mock_types = MockTypesResource(_EXAMPLE_DATASET_TYPE)
datasets = DatasetResource(mock_db, mock_types)
dataset = datasets.add(_EXAMPLE_NBAR_DATASET.sources['ortho'])
assert len(mock_db.dataset) == 2
assert len(mock_db.dataset_source) == 1
dataset = datasets.add(_EXAMPLE_NBAR_DATASET)
assert len(mock_db.dataset) == 3
assert len(mock_db.dataset_source) == 2
def test_index_two_levels_already_ingested():
mock_db = MockDb()
mock_types = MockTypesResource(_EXAMPLE_DATASET_TYPE)
datasets = DatasetResource(mock_db, mock_types)
dataset = datasets.add(_EXAMPLE_NBAR_DATASET.sources['ortho'].sources['satellite_telemetry_data'])
assert len(mock_db.dataset) == 1
assert len(mock_db.dataset_source) == 0
dataset = datasets.add(_EXAMPLE_NBAR_DATASET)
assert len(mock_db.dataset) == 3
assert len(mock_db.dataset_source) == 2
| 35.231061
| 105
| 0.541555
|
82652488eb048098255e0ade33700f58ab86e333
| 3,650
|
py
|
Python
|
samples/python/plane_ar.py
|
gitwithmch/opencv
|
a8844de7b5ffad08b4463536d21aadd6c0d1f3b3
|
[
"BSD-3-Clause"
] | 1
|
2021-03-05T18:41:35.000Z
|
2021-03-05T18:41:35.000Z
|
samples/python/plane_ar.py
|
gitwithmch/opencv
|
a8844de7b5ffad08b4463536d21aadd6c0d1f3b3
|
[
"BSD-3-Clause"
] | null | null | null |
samples/python/plane_ar.py
|
gitwithmch/opencv
|
a8844de7b5ffad08b4463536d21aadd6c0d1f3b3
|
[
"BSD-3-Clause"
] | 1
|
2020-03-24T16:05:07.000Z
|
2020-03-24T16:05:07.000Z
|
#!/usr/bin/env python
'''
Planar augmented reality
==================
This sample shows an example of augmented reality overlay over a planar object
tracked by PlaneTracker from plane_tracker.py. solvePnP funciton is used to
estimate the tracked object location in 3d space.
video: http://www.youtube.com/watch?v=pzVbhxx6aog
Usage
-----
plane_ar.py [<video source>]
Keys:
SPACE - pause video
c - clear targets
Select a textured planar object to track by drawing a box with a mouse.
Use 'focal' slider to adjust to camera focal length for proper video augmentation.
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import video
import common
from plane_tracker import PlaneTracker
from video import presets
# Simple model of a house - cube with a triangular prism "roof"
ar_verts = np.float32([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0],
[0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1],
[0, 0.5, 2], [1, 0.5, 2]])
ar_edges = [(0, 1), (1, 2), (2, 3), (3, 0),
(4, 5), (5, 6), (6, 7), (7, 4),
(0, 4), (1, 5), (2, 6), (3, 7),
(4, 8), (5, 8), (6, 9), (7, 9), (8, 9)]
class App:
def __init__(self, src):
self.cap = video.create_capture(src, presets['book'])
self.frame = None
self.paused = False
self.tracker = PlaneTracker()
cv.namedWindow('plane')
cv.createTrackbar('focal', 'plane', 25, 50, common.nothing)
self.rect_sel = common.RectSelector('plane', self.on_rect)
def on_rect(self, rect):
self.tracker.add_target(self.frame, rect)
def run(self):
while True:
playing = not self.paused and not self.rect_sel.dragging
if playing or self.frame is None:
ret, frame = self.cap.read()
if not ret:
break
self.frame = frame.copy()
vis = self.frame.copy()
if playing:
tracked = self.tracker.track(self.frame)
for tr in tracked:
cv.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2)
for (x, y) in np.int32(tr.p1):
cv.circle(vis, (x, y), 2, (255, 255, 255))
self.draw_overlay(vis, tr)
self.rect_sel.draw(vis)
cv.imshow('plane', vis)
ch = cv.waitKey(1)
if ch == ord(' '):
self.paused = not self.paused
if ch == ord('c'):
self.tracker.clear()
if ch == 27:
break
def draw_overlay(self, vis, tracked):
x0, y0, x1, y1 = tracked.target.rect
quad_3d = np.float32([[x0, y0, 0], [x1, y0, 0], [x1, y1, 0], [x0, y1, 0]])
fx = 0.5 + cv.getTrackbarPos('focal', 'plane') / 50.0
h, w = vis.shape[:2]
K = np.float64([[fx*w, 0, 0.5*(w-1)],
[0, fx*w, 0.5*(h-1)],
[0.0,0.0, 1.0]])
dist_coef = np.zeros(4)
_ret, rvec, tvec = cv.solvePnP(quad_3d, tracked.quad, K, dist_coef)
verts = ar_verts * [(x1-x0), (y1-y0), -(x1-x0)*0.3] + (x0, y0, 0)
verts = cv.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2)
for i, j in ar_edges:
(x0, y0), (x1, y1) = verts[i], verts[j]
cv.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (255, 255, 0), 2)
if __name__ == '__main__':
print(__doc__)
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
App(video_src).run()
| 32.589286
| 84
| 0.52411
|
ea2670d6f495df1cd901f85951caa3c3aa900ae2
| 466
|
py
|
Python
|
receipts/rest_receipts/urls.py
|
rolisz/receipt_budget
|
74f73e7f8bb8b0b4fa89bfebf4c3c2c930511308
|
[
"BSD-3-Clause"
] | 15
|
2016-03-02T18:16:46.000Z
|
2022-03-05T10:55:58.000Z
|
receipts/rest_receipts/urls.py
|
rolisz/receipt_budget
|
74f73e7f8bb8b0b4fa89bfebf4c3c2c930511308
|
[
"BSD-3-Clause"
] | 1
|
2017-04-10T23:46:43.000Z
|
2017-04-10T23:46:43.000Z
|
receipts/rest_receipts/urls.py
|
rolisz/receipt_budget
|
74f73e7f8bb8b0b4fa89bfebf4c3c2c930511308
|
[
"BSD-3-Clause"
] | 11
|
2016-03-02T18:16:12.000Z
|
2020-07-19T11:57:27.000Z
|
from django.conf.urls import patterns, include, url
from rest_receipts import views
__author__ = 'Roland'
urlpatterns = patterns(
'',
url(r'^expense/add/', views.add),
url(r'^expense/index/', views.index),
url(r'^expense/edit/(\d+)', views.edit),
url(r'^expense/delete/(\d+)', views.delete),
url(r'^expenses/(.+?).json', views.expense_list_json, name="day_json"),
url(r'^expenses/(.+?).json', views.expense_list_json, name="all_json"),
)
| 35.846154
| 75
| 0.658798
|
4a5ee5270e415242cd035a7e73c872b3bcd7c5c3
| 759
|
py
|
Python
|
var/spack/repos/builtin/packages/r-goftest/package.py
|
varioustoxins/spack
|
cab0e4cb240f34891a6d753f3393e512f9a99e9a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/r-goftest/package.py
|
varioustoxins/spack
|
cab0e4cb240f34891a6d753f3393e512f9a99e9a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6
|
2022-01-08T08:41:11.000Z
|
2022-03-14T19:28:07.000Z
|
var/spack/repos/builtin/packages/r-goftest/package.py
|
foeroyingur/spack
|
5300cbbb2e569190015c72d0970d25425ea38647
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGoftest(RPackage):
"""Cramer-Von Mises and Anderson-Darling tests of goodness-of-fit for
continuous univariate distributions, using efficient algorithms.
"""
homepage = "https://cloud.r-project.org/package=goftest"
url = "https://cloud.r-project.org/src/contrib/goftest_1.2-2.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/goftest"
version('1.2-2', sha256='e497992666b002b6c6bed73bf05047ad7aa69eb58898da0ad8f1f5b2219e7647')
depends_on('r@3.3:', type=('build', 'run'))
| 36.142857
| 95
| 0.72859
|
811e733ff73268f0e369d3c59b0f993e4f5ec5c8
| 26,404
|
py
|
Python
|
syft/frameworks/torch/hook_args.py
|
amitkarn3/PythnSyft
|
8eaa637e1ca54c963281e847556cb14b4a76b46b
|
[
"Apache-1.1"
] | null | null | null |
syft/frameworks/torch/hook_args.py
|
amitkarn3/PythnSyft
|
8eaa637e1ca54c963281e847556cb14b4a76b46b
|
[
"Apache-1.1"
] | null | null | null |
syft/frameworks/torch/hook_args.py
|
amitkarn3/PythnSyft
|
8eaa637e1ca54c963281e847556cb14b4a76b46b
|
[
"Apache-1.1"
] | null | null | null |
import torch
import syft as sy
from syft.exceptions import RemoteTensorFoundError
from syft.exceptions import PureTorchTensorFoundError
from syft.exceptions import ResponseSignatureError
from syft.frameworks.torch.tensors.interpreters import AbstractTensor
from syft.frameworks.torch.tensors.interpreters import PointerTensor
from syft.frameworks.torch.tensors.interpreters import TorchTensor
from syft.frameworks.torch.tensors.interpreters import FixedPrecisionTensor
from syft.frameworks.torch.tensors.interpreters import AdditiveSharingTensor
from syft.frameworks.torch.tensors.interpreters import MultiPointerTensor
from syft.frameworks.torch.tensors.decorators import LoggingTensor
from typing import Callable
from typing import Union
from typing import Tuple
from typing import List
hook_method_args_functions = {}
hook_method_response_functions = {}
get_tensor_type_functions = {}
one = lambda _args: 1
# dict to specify the action depending of the type found
type_rule = {
list: lambda _args: [build_rule(a) for a in _args],
tuple: lambda _args: tuple([build_rule(a) for a in _args]),
dict: one, # FIXME This is for additiveShareTensor.child, it can be confusing and AST.child
# should perhaps be of type ShareDict extending dict or something like this
LoggingTensor: one,
FixedPrecisionTensor: one,
AdditiveSharingTensor: one,
MultiPointerTensor: one,
PointerTensor: one,
torch.Tensor: one,
torch.nn.Parameter: one,
}
# Dict to return the proper lambda function for the right torch or syft tensor type
forward_func = {
PointerTensor: lambda p: (_ for _ in ()).throw(RemoteTensorFoundError(p)),
torch.Tensor: lambda i: i.child
if hasattr(i, "child")
else (_ for _ in ()).throw(PureTorchTensorFoundError),
torch.nn.Parameter: lambda i: i.child
if hasattr(i, "child")
else (_ for _ in ()).throw(PureTorchTensorFoundError),
LoggingTensor: lambda i: i.child,
FixedPrecisionTensor: lambda i: i.child,
AdditiveSharingTensor: lambda i: i.child,
MultiPointerTensor: lambda i: i.child,
"my_syft_tensor_type": lambda i: i.child,
}
# Dict to return the proper lambda function for the right torch or syft tensor type
backward_func = {
TorchTensor: lambda i: i.wrap(),
torch.Tensor: lambda i: i.wrap(),
torch.nn.Parameter: lambda i: torch.nn.Parameter(data=i),
PointerTensor: lambda i: i,
LoggingTensor: lambda i: LoggingTensor().on(i, wrap=False),
FixedPrecisionTensor: lambda i, **kwargs: FixedPrecisionTensor(**kwargs).on(i, wrap=False),
AdditiveSharingTensor: lambda i, **kwargs: AdditiveSharingTensor(**kwargs).on(i, wrap=False),
MultiPointerTensor: lambda i, **kwargs: MultiPointerTensor(**kwargs).on(i, wrap=False),
"my_syft_tensor_type": lambda i, **kwargs: "my_syft_tensor_type(**kwargs).on(i, wrap=False)",
}
# methods that we really don't want to hook, for example because they have an arbitrary
# number of tensors in args signature response
exclude_methods = {"__getitem__", "view"}
exclude_functions = {"torch.unbind", "unbind"}
def hook_method_args(attr, method_self, args, kwargs):
"""Method arguments are sometimes simple types (such as strings or ints) but
sometimes they are custom Syft tensors such as wrappers (torch.Tensor) or LoggingTensor
or some other tensor type. Complex types (which have a .child attribute) need to
have arguments converted from the arg to arg.child so that the types match as the
method is being called down the chain. To make this efficient, we cache which args
need to be replaced with their children in a dictionary called
hook_method_args_functions. However, sometimes a method (an attr) has multiple
different argument signatures, such that sometimes arguments have .child objects
and other times they don't (such as x.div(), which can accept either a tensor or a
float as an argument). This invalidates the cache, so we need to have a try/except
which refreshes the cache if the signature triggers an error.
Args:
attr (str): the name of the method being called
method_self: the tensor on which the method is being called
args (list): the arguments being passed to the method
kwargs (dict): the keyword arguments being passed to the function
(these are not hooked ie replace with their .child attr)
"""
# Specify an id to distinguish methods from different classes
# As they won't be used with the same arg types
attr_id = type(method_self).__name__ + "." + attr
try:
assert attr not in exclude_methods
# Load the utility function to transform the args
hook_args = hook_method_args_functions[attr_id]
# Try running it
new_self, new_args = hook_args((method_self, args))
except (IndexError, KeyError, AssertionError): # Update the function in case of an error
args_hook_function, _ = build_hook_args_function((method_self, args))
# Store this utility function in the registry
hook_method_args_functions[attr_id] = args_hook_function
# Run it
new_self, new_args = args_hook_function((method_self, args))
return new_self, new_args, kwargs
def hook_function_args(attr, args, kwargs, return_args_type=False):
"""See hook_method_args for details
Args:
attr (str): the name of the function being called
args (list): the arguments being passed to the function
kwargs (dict): the keyword arguments being passed to the function
(these are not hooked ie replace with their .child attr)
return_args_type (bool): return the type of the tensors in the
original arguments
Returns:
- the arguments where all tensors are replaced with their child
- the type of this new child
(- the type of the tensors in the arguments)
"""
try:
# Load the utility function to transform the args
# TODO rename registry or use another one than for methods
hook_args = hook_method_args_functions[attr]
get_tensor_type_function = get_tensor_type_functions[attr]
# Try running it
new_args = hook_args(args)
except (IndexError, KeyError, AssertionError): # Update the function in case of an error
args_hook_function, get_tensor_type_function = build_hook_args_function(
args, return_tuple=True
)
# Store the utility functions in registries
hook_method_args_functions[attr] = args_hook_function
get_tensor_type_functions[attr] = get_tensor_type_function
# Run it
new_args = args_hook_function(args)
new_type = get_tensor_type_function(new_args)
if return_args_type:
args_type = get_tensor_type_function(args)
return new_args, kwargs, new_type, args_type
else:
return new_args, kwargs, new_type
def build_hook_args_function(args, return_tuple=False):
"""
Build the function f that hook the arguments:
f(args) = new_args
"""
# Inspect the call to find tensor arguments and return a rule whose
# structure is the same as the args object, with 1 where there was
# (torch or syft) tensors and 0 when not (ex: number, str, ...)
rule = build_rule(args)
# Build a function with this rule to efficiently replace syft tensors
# (but not pointer) with their child in the args objects
args_hook_function = build_args_hook(args, rule, return_tuple)
# Build a function with this rule to efficiently the child type of the
# tensor found in the args
get_tensor_type_function = build_get_tensor_type(rule)
return args_hook_function, get_tensor_type_function
def hook_response(attr, response, wrap_type, wrap_args={}, new_self=None):
"""
When executing a command, arguments are inspected and all tensors are replaced
with their child attribute until a pointer or a torch tensor is found (for
example an argument could be a torch wrapper with a child being a LoggingTensor, with
a child being a torch tensor). When the result of the command is calculated,
we need to rebuild this chain in the reverse order (in our example put back
a LoggingTensor on top of the result and then a torch wrapper).
To make this efficient, we cache which elements of the response (which can be more
complicated with nested tuples for example) need to be wrapped in a dictionary called
hook_method_response_functions. However, sometimes a method (an attr) has multiple
different response signatures. This invalidates the cache, so we need to have a
try/except which refreshes the cache if the signature triggers an error.
Args:
attr (str): the name of the method being called
response (list or dict): the arguments being passed to the tensor
wrap_type (type): the type of wrapper we'd like to have
wrap_args (dict): options to give to the wrapper (for example the
precision for the precision tensor)
new_self: used for the can just below of inplace ops
"""
# inline methods should just return new_self
if "__i" == attr[0:3]:
return new_self
# TODO: Why do we need to cast it in a tuple? this is a (small) time waste
response_is_tuple = isinstance(response, tuple)
if wrap_type == torch.nn.Parameter:
wrap_type = torch.Tensor
# Add an artificial tuple
if not response_is_tuple:
response = (response, 1)
hash_wrap_args = hash(frozenset(wrap_args.items()))
attr_id = f"{attr}@{wrap_type.__name__}.{response_is_tuple}.{hash_wrap_args}"
try:
assert attr not in exclude_functions
# Load the utility function to transform the args
response_hook_function = hook_method_response_functions[attr_id]
# Try running it
new_response = response_hook_function(response)
except (IndexError, KeyError, AssertionError): # Update the function in cas of an error
response_hook_function = build_hook_response_function(response, wrap_type, wrap_args)
# Store this utility function in the registry
hook_method_response_functions[attr_id] = response_hook_function
# Run it
new_response = response_hook_function(response)
# Remove the artificial tuple
if not response_is_tuple:
new_response, _ = new_response
return new_response
def build_hook_response_function(response, wrap_type, wrap_args):
"""
Build the function that hook the response.
Example:
p is of type Pointer
f is the hook_response_function
then f(p) = (Wrapper)>Pointer
"""
# Inspect the call to find tensor arguments and return a rule whose
# structure is the same as the response object, with 1 where there was
# (torch or syft) tensors and 0 when not (ex: number, str, ...)
rule = build_rule(response)
# Build a function with this rule to efficiently replace syft tensors
# (but not pointer) with their child in the args objects
response_hook_function = build_response_hook(response, rule, wrap_type, wrap_args)
return response_hook_function
def build_rule(args):
"""
Inspect the args object to find torch or syft tensor arguments and
return a rule whose structure is the same as the args object,
with 1 where there was (torch or syft) tensors and 0 when
not (ex: number, str, ...)
Example:
in: ([tensor(1, 2), Pointer@bob], 42)
out: ([1, 1], 0)
"""
type_args = type(args)
if type_args in type_rule:
return type_rule[type_args](args)
else:
return 0
def build_args_hook(args, rules, return_tuple=False):
"""
Build a function given some rules to efficiently replace in the args object
syft tensors with their child (but not pointer as they don't have .child),
and do nothing for other type of object including torch tensors, str,
numbers, bool, etc.
Pointers trigger an error which can be caught to get the location for
forwarding the call.
Args:
args (tuple): the arguments given to the function / method
rules (tuple): the same structure but with boolean, true when there is
a tensor
return_tuple (bool): force to return a tuple even with a single element
Return:
a function that replace syft arg in args with arg.child
"""
# get the transformation lambda for each args
lambdas = [
typed_identity(a) # return the same obj with an identity fct with a type check if needed
if not r # if the rule is a number == 0.
else build_args_hook(a, r, True) # If not, call recursively build_args_hook
if isinstance(r, (list, tuple)) # if the rule is a list or tuple.
# Last if not, rule is probably == 1 so use type to return the right transformation.
else lambda i: forward_func[type(i)](i)
for a, r in zip(args, rules) # And do this for all the args / rules provided
]
# Instead of iterating which is slow, we use trick to efficiently
# apply each lambda to each arg
folds = {
0: zero_fold,
1: one_fold(return_tuple),
2: two_fold,
3: three_fold,
4: four_fold,
5: five_fold,
6: six_fold,
7: seven_fold,
8: eight_fold,
}
try:
f = folds[len(lambdas)]
except KeyError:
f = many_fold
return lambda x: f(lambdas, x)
def build_get_tensor_type(rules, layer=None):
"""
Build a function which uses some rules to find efficiently the first tensor in
the args objects and return the type of its child.
Args:
rules (tuple): a skeleton object with the same structure as args but each tensor
is replaced with a 1 and other types (int, str) with a 0
layer (list or None): keep track of the path of inspection: each element in the list
stand for one layer of deepness into the object, and its value for the index
in the current layer. See example for details
Returns:
a function returning a type
Example:
*Understanding the layer parameter*
obj = (a, [b, (c, d)], e)
the layer position is for:
a: [0]
b: [1, 0]
c: [1, 1, 0]
d: [1, 1, 1]
e: [2]
*Global behaviour example*
rules = (0, [1, (0, 0), 0)
- First recursion level
0 found -> do nothing
list found -> recursive call with layer = [1]
- Second recursion level
1 found -> update layer to [1, 0]
build the function x: type(x[1][0])
break
- Back to first recursion level
save the function returned in the lambdas list
0 found -> do nothing
exit loop
return the first (and here unique) function
"""
# We keep note of the first layer or recursion level to return at the end
# only one function and instantiate the layer list the first time
first_layer = layer is None
if first_layer:
layer = []
# Iteration through the rules object
lambdas = []
for i, r in enumerate(rules):
if r == 1: # if a tensor is found
layer.append(i)
lambdas.append(
# the layer object is given to build a getter to reach the
# tensor position and then the type() is called on the obj found
lambda a: type(get_element_at[len(layer)](*layer)(a))
)
# we only need one to get the type of all tensors as they should be the same
break
if isinstance(r, (list, tuple)): # we iterate recursively if necessary
layer.append(i)
lambdas += build_get_tensor_type(r, layer)
if first_layer:
return lambdas[0]
else:
return lambdas
# Function helpers to convert [a, b, c, ...] -> obj[a][b][c][...]
def one_layer(idx1):
return lambda l: l[idx1]
def two_layers(idx1, idx2):
return lambda l: one_layer(idx2)(l[idx1])
def three_layers(idx1, *ids):
return lambda l: two_layers(*ids)(l[idx1])
def four_layers(idx1, *ids):
return lambda l: three_layers(*ids)(l[idx1])
get_element_at = {1: one_layer, 2: two_layers, 3: three_layers, 4: four_layers}
def build_response_hook(response, rules, wrap_type, wrap_args, return_tuple=False):
"""
Build a function given some rules to efficiently replace in the response object
syft or torch tensors with a wrapper, and do nothing for other types of object
including , str, numbers, bool, etc.
Args:
response: a response used to build the hook function
rules: the same structure objects but with boolean, at true when is replaces
a tensor
return_tuple: force to return a tuple even with a single element
Response:
a function to "wrap" the response
"""
# get the transformation lambda for each args
lambdas = [
(lambda i: i) # return the same object
if not r # if the rule is a number == 0.
else build_response_hook(
a, r, wrap_type, wrap_args, True
) # If not, call recursively build_response_hook
if isinstance(r, (list, tuple)) # if the rule is a list or tuple.
# Last if not, rule is probably == 1 so use type to return the right transformation.
else lambda i: backward_func[wrap_type](i, **wrap_args)
for a, r in zip(response, rules) # And do this for all the responses / rules provided
]
# Instead of iterating which is slow, we use trick to efficiently
# apply each lambda to each arg
folds = {
0: zero_fold,
1: one_fold(return_tuple),
2: two_fold,
3: three_fold,
4: four_fold,
5: five_fold,
6: six_fold,
7: seven_fold,
8: eight_fold,
}
try:
f = folds[len(lambdas)]
except KeyError:
f = many_fold
return lambda x: f(lambdas, x)
def zero_fold(*a, **k):
return tuple()
def one_fold(return_tuple, **kwargs):
def _one_fold(lambdas, args, **kwargs):
return lambdas[0](args[0], **kwargs)
def tuple_one_fold(lambdas, args):
return (lambdas[0](args[0], **kwargs),)
return {False: _one_fold, True: tuple_one_fold}[return_tuple]
def two_fold(lambdas, args, **kwargs):
return lambdas[0](args[0], **kwargs), lambdas[1](args[1], **kwargs)
def three_fold(lambdas, args, **kwargs):
return (
lambdas[0](args[0], **kwargs),
lambdas[1](args[1], **kwargs),
lambdas[2](args[2], **kwargs),
)
def four_fold(lambdas, args, **kwargs):
return (
lambdas[0](args[0], **kwargs),
lambdas[1](args[1], **kwargs),
lambdas[2](args[2], **kwargs),
lambdas[3](args[3], **kwargs),
)
def five_fold(lambdas, args, **kwargs):
return (
lambdas[0](args[0], **kwargs),
lambdas[1](args[1], **kwargs),
lambdas[2](args[2], **kwargs),
lambdas[3](args[3], **kwargs),
lambdas[4](args[4], **kwargs),
)
def six_fold(lambdas, args, **kwargs):
return (
lambdas[0](args[0], **kwargs),
lambdas[1](args[1], **kwargs),
lambdas[2](args[2], **kwargs),
lambdas[3](args[3], **kwargs),
lambdas[4](args[4], **kwargs),
lambdas[5](args[5], **kwargs),
)
def seven_fold(lambdas, args, **kwargs):
return (
lambdas[0](args[0], **kwargs),
lambdas[1](args[1], **kwargs),
lambdas[2](args[2], **kwargs),
lambdas[3](args[3], **kwargs),
lambdas[4](args[4], **kwargs),
lambdas[5](args[5], **kwargs),
lambdas[6](args[6], **kwargs),
)
def eight_fold(lambdas, args, **kwargs):
return (
lambdas[0](args[0], **kwargs),
lambdas[1](args[1], **kwargs),
lambdas[2](args[2], **kwargs),
lambdas[3](args[3], **kwargs),
lambdas[4](args[4], **kwargs),
lambdas[5](args[5], **kwargs),
lambdas[6](args[6], **kwargs),
lambdas[7](args[7], **kwargs),
)
def many_fold(lambdas, args, **kwargs):
return tuple([lambdas[i](args[i], **kwargs) for i in range(len(lambdas))])
# Add the possibility to make a type check in the identity function applied
# On some arg which could be None are of another type.
# Could add more checks but not sure it is needed so far.
def typed_identity(a):
"""
We need to add typed identity for arguments which can be either number
or tensors. If the argument changes from an int to a tensor, the
assertion error triggered by typed_identity will be caught and a
new signature will be computed for the command.
"""
if a is None:
def none_identity(i):
assert i is None
return i
return none_identity
elif type(a) in (int, float, bool):
def number_identity(i):
assert isinstance(i, type(a))
return i
return number_identity
else:
return lambda i: i
# -- Fast way to register responses and transform tensors in pointers
register_response_functions = {}
def register_response(
attr: str, response: object, response_ids: object, owner: sy.workers.AbstractWorker
) -> object:
"""
When a remote worker execute a command sent by someone else, the response is
inspected: all tensors are stored by this worker and a Pointer tensor is
made for each of them.
To make this efficient, we cache which elements of the response (which can be more
complicated with nested tuples for example) in the dict register_response_functions
However, sometimes a function (an attr) has multiple different response signatures.
This invalidates the cache, so we need to have a try/except which refreshes the
cache if the signature triggers an error.
Args:
attr (str): the name of the function being called
response (object): the response of this function
owner (BaseWorker): the worker which registers the tensors
"""
# TODO: Why do we need to cast it in a tuple? this is a (small) time waste
response_is_tuple = isinstance(response, tuple)
# Add an artificial tuple
if not response_is_tuple:
response = (response, 1)
attr_id = "{}".format(attr)
try:
assert attr not in exclude_functions
# Load the utility function to register the response and transform tensors with pointers
register_response_function = register_response_functions[attr_id]
# Try running it
new_response = register_response_function(response, response_ids=response_ids, owner=owner)
except (IndexError, KeyError, AssertionError): # Update the function in cas of an error
register_response_function = build_register_response_function(response)
# Store this utility function in the registry
register_response_functions[attr_id] = register_response_function
# Run it
new_response = register_response_function(response, response_ids=response_ids, owner=owner)
# Remove the artificial tuple
if not response_is_tuple:
new_response, _ = new_response
return new_response
def build_register_response_function(response: object) -> Callable:
"""
Build the function that registers the response and replaces tensors with pointers.
Example:
(1, tensor([1, 2]) is the response
f is the register_response_function
then f(p) = (1, (Wrapper)>Pointer)
"""
# Inspect the call to find tensor arguments and return a rule whose
# structure is the same as the response object, with 1 where there was
# (torch or syft) tensors and 0 when not (ex: number, str, ...)
rule = build_rule(response)
# Build a function with this rule to efficiently replace syft tensors
# (but not pointer) with their child in the args objects
response_hook_function = build_register_response(response, rule)
return response_hook_function
def register_tensor(
tensor: Union[torch.Tensor, AbstractTensor],
response_ids: List = list(),
owner: sy.workers.AbstractWorker = None,
) -> None:
"""
Register a tensor
Args:
tensor: the tensor
response_ids: list of ids where the tensor should be stored
and each id is pop out when needed
owner: the owner that makes the registration
Returns:
the pointer
"""
assert owner is not None
tensor.owner = owner
try:
tensor.id = response_ids.pop(-1)
except IndexError:
raise ResponseSignatureError
owner.register_obj(tensor)
def build_register_response(response: object, rules: Tuple, return_tuple: bool = False) -> Callable:
"""
Build a function given some rules to efficiently replace in the response object
torch tensors with a pointer after they are registered, and do nothing for other
types of object including , str, numbers, bool, etc.
Args:
response: the response
rules: the rule specifying where the tensors are
return_tuple: force to return a tuple even with a single element
Returns:
The function to apply on generic responses
"""
# get the transformation lambda for each args
lambdas = [
(lambda i, **kwargs: i) # return the same object
if not r # if the rule is a number == 0.
else build_register_response(a, r, True) # If not, call recursively build_response_hook
if isinstance(r, (list, tuple)) # if the rule is a list or tuple.
# Last if not, rule is probably == 1 so use type to return the right transformation.
else lambda i, **kwargs: register_tensor(i, **kwargs)
for a, r in zip(response, rules) # And do this for all the responses / rules provided
]
# Instead of iterating which is slow, we use trick to efficiently
# apply each lambda to each arg
folds = {
0: zero_fold,
1: one_fold(return_tuple),
2: two_fold,
3: three_fold,
4: four_fold,
5: five_fold,
6: six_fold,
7: seven_fold,
8: eight_fold,
}
try:
f = folds[len(lambdas)]
except KeyError:
f = many_fold
return lambda x, **kwargs: f(lambdas, x, **kwargs)
| 36.120383
| 100
| 0.669141
|
f095e58b628b43710f97096f97debf7ff5e791df
| 3,037
|
py
|
Python
|
lldb/test/API/lang/swift/hide_runtimesupport/TestSwiftHideRuntimeSupport.py
|
LaudateCorpus1/llvm-project
|
ff2e0f0c1112558b3f30d8afec7c9882c33c79e3
|
[
"Apache-2.0"
] | 605
|
2019-10-18T01:15:54.000Z
|
2022-03-31T14:31:04.000Z
|
lldb/test/API/lang/swift/hide_runtimesupport/TestSwiftHideRuntimeSupport.py
|
LaudateCorpus1/llvm-project
|
ff2e0f0c1112558b3f30d8afec7c9882c33c79e3
|
[
"Apache-2.0"
] | 3,180
|
2019-10-18T01:21:21.000Z
|
2022-03-31T23:25:41.000Z
|
lldb/test/API/lang/swift/hide_runtimesupport/TestSwiftHideRuntimeSupport.py
|
LaudateCorpus1/llvm-project
|
ff2e0f0c1112558b3f30d8afec7c9882c33c79e3
|
[
"Apache-2.0"
] | 275
|
2019-10-18T05:27:22.000Z
|
2022-03-30T09:04:21.000Z
|
# TestSwiftHideRuntimeSupport.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test that we hide runtime support values
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftHideRuntimeSupport(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@swiftTest
def test_swift_hide_runtime_support(self):
"""Test that we hide runtime support values"""
# This is the function to remove the custom settings in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd(
'settings set target.display-runtime-support-values true',
check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.runCmd("settings set target.display-runtime-support-values false")
self.build()
lldbutil.run_to_source_breakpoint(
self, 'break here', lldb.SBFileSpec('main.swift'))
self.expect(
'frame variable -d run',
substrs=['_0_0'],
matching=False)
self.expect('frame variable -d run', substrs=['193627'], matching=True)
var_opts = lldb.SBVariablesOptions()
var_opts.SetIncludeArguments(True)
var_opts.SetIncludeLocals(True)
var_opts.SetInScopeOnly(True)
var_opts.SetIncludeStatics(True)
var_opts.SetIncludeRuntimeSupportValues(False)
var_opts.SetUseDynamic(lldb.eDynamicCanRunTarget)
values = self.frame().GetVariables(var_opts)
found = False
for value in values:
if '_0_0' in value.name:
found = True
if '$' in value.name:
found = True
self.assertFalse(found, "found the thing I was not expecting")
var_opts.SetIncludeRuntimeSupportValues(True)
values = self.frame().GetVariables(var_opts)
found = False
for value in values:
if '_0_0' in value.name:
found = True
self.assertTrue(found, "not found the thing I was expecting")
self.runCmd("settings set target.display-runtime-support-values true")
self.expect(
'frame variable -d run',
substrs=['_0_0'],
matching=True)
self.runCmd("settings set target.display-runtime-support-values false")
self.expect(
'frame variable -d run',
substrs=['_0_0'],
matching=False)
| 33.01087
| 80
| 0.627593
|
55b4f8af8f921c1966f7af48316d06cb3d954d91
| 18,512
|
py
|
Python
|
test/functional/feature_bip68_sequence.py
|
phlsolo316/vidcoin
|
d6eec232378c329ebc2a31e7d21acf58cf62368d
|
[
"MIT"
] | null | null | null |
test/functional/feature_bip68_sequence.py
|
phlsolo316/vidcoin
|
d6eec232378c329ebc2a31e7d21acf58cf62368d
|
[
"MIT"
] | null | null | null |
test/functional/feature_bip68_sequence.py
|
phlsolo316/vidcoin
|
d6eec232378c329ebc2a31e7d21acf58cf62368d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP68 implementation."""
import time
from test_framework.blocktools import create_block, NORMAL_GBT_REQUEST_PARAMS, add_witness_commitment
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex
from test_framework.test_framework import VIDCoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
satoshi_round,
softfork_active,
)
from test_framework.script_util import DUMMY_P2WPKH_SCRIPT
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "non-BIP68-final"
class BIP68Test(VIDCoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [
[
"-acceptnonstdtxn=1",
"-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise
],
["-acceptnonstdtxn=0"],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Generate some coins
self.nodes[0].generate(110)
self.log.info("Running test disable flag")
self.test_disable_flag()
self.log.info("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
self.log.info("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
self.log.info("Running test BIP68 not consensus before activation")
self.test_bip68_not_consensus()
self.log.info("Activating BIP68 (and 112/113)")
self.activateCSV()
self.log.info("Verifying nVersion=2 transactions are standard.")
self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).")
self.test_version2_relay()
self.log.info("Passed")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 VID
utxos = self.nodes[0].listunspent(0, 0)
assert len(utxos) > 0
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, DUMMY_P2WPKH_SCRIPT)]
tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value - self.relayfee * COIN), DUMMY_P2WPKH_SCRIPT)]
tx2.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2))
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for _ in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), DUMMY_P2WPKH_SCRIPT))
rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))["hex"]
if (using_sequence_locks and not should_pass):
# This transaction should be rejected
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx)
else:
# This raw transaction should be accepted
self.nodes[0].sendrawtransaction(rawtx)
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), DUMMY_P2WPKH_SCRIPT)]
tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee * COIN), DUMMY_P2WPKH_SCRIPT)]
tx.rehash()
if (orig_tx.hash in node.getrawmempool()):
# sendrawtransaction should fail if the tx is in the mempool
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx))
else:
# sendrawtransaction should succeed if the tx is not in the mempool
node.sendrawtransaction(ToHex(tx))
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN))
cur_time = int(time.time())
for _ in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert tx2.hash in self.nodes[0].getrawmempool()
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
# Save block template now to use for the reorg later
tmpl = self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
self.nodes[0].generate(1)
assert tx2.hash not in self.nodes[0].getrawmempool()
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert tx3.hash in self.nodes[0].getrawmempool()
self.nodes[0].generate(1)
assert tx3.hash not in self.nodes[0].getrawmempool()
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert tx4.hash in self.nodes[0].getrawmempool()
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert tx5.hash not in self.nodes[0].getrawmempool()
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"]
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert tx4.hash not in self.nodes[0].getrawmempool()
assert tx3.hash in self.nodes[0].getrawmempool()
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
for i in range(2):
block = create_block(tmpl=tmpl, ntime=cur_time)
block.rehash()
block.solve()
tip = block.sha256
assert_equal(None if i == 1 else 'inconclusive', self.nodes[0].submitblock(ToHex(block)))
tmpl = self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
tmpl['previousblockhash'] = '%x' % tip
tmpl['transactions'] = []
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert tx3.hash not in mempool
assert tx2.hash in mempool
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(0)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks prior to
# activation height. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert not softfork_active(self.nodes[0], 'csv')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), DUMMY_P2WPKH_SCRIPT)]
# sign tx2
tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee * COIN), DUMMY_P2WPKH_SCRIPT)]
tx3.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3))
# make a block that violates bip68; ensure that the tip updates
block = create_block(tmpl=self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS))
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
add_witness_commitment(block)
block.solve()
assert_equal(None, self.nodes[0].submitblock(block.serialize().hex()))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
# getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block.
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert_greater_than(min_activation_height - height, 2)
self.nodes[0].generate(min_activation_height - height - 2)
assert not softfork_active(self.nodes[0], 'csv')
self.nodes[0].generate(1)
assert softfork_active(self.nodes[0], 'csv')
self.sync_blocks()
# Use self.nodes[1] to test that version 2 transactions are standard.
def test_version2_relay(self):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))["hex"]
self.nodes[1].sendrawtransaction(tx_signed)
if __name__ == '__main__':
BIP68Test().main()
| 44.714976
| 140
| 0.641098
|
b44202abbee5b2f20e54b68feca0ae11ba564157
| 2,367
|
py
|
Python
|
team_picker.py
|
hmumtaz/fpl-team-picker
|
e31c6665b75e5b846db169340ef9ce91d9d32ebe
|
[
"MIT"
] | null | null | null |
team_picker.py
|
hmumtaz/fpl-team-picker
|
e31c6665b75e5b846db169340ef9ce91d9d32ebe
|
[
"MIT"
] | null | null | null |
team_picker.py
|
hmumtaz/fpl-team-picker
|
e31c6665b75e5b846db169340ef9ce91d9d32ebe
|
[
"MIT"
] | null | null | null |
import requests
import pandas as pd
import numpy as np
import json
import warnings
from data_fetcher import Data_Fetcher
data_fetcher = Data_Fetcher()
fwds, mids, defs, keeps = data_fetcher.get_most_picked_players()
fwds.to_csv("fwds.csv", index=False)
mids.to_csv("mids.csv", index=False)
defs.to_csv("defs.csv", index=False)
keeps.to_csv("keeps.csv", index=False)
def pickTeam(keeps=keeps, defs=defs, fwds=fwds, mids=mids, budget=104):
warnings.simplefilter(action="ignore", category=FutureWarning)
team = pd.DataFrame()
players_left_to_add = 15
keeps_left_to_add = 2
defs_left_to_add = 5
mids_left_to_add = 5
fwds_left_to_add = 3
i = 0
while i < 10 and keeps_left_to_add > 0:
player_budget = budget / players_left_to_add
keeper = keeps.iloc[i]
if keeper.cost <= player_budget and keeper["second_name"] not in team.values:
team = team.append(keeper)
budget -= keeper.cost
keeps_left_to_add -= 1
players_left_to_add -= 1
i += 1
i = 0
while i < 10 and defs_left_to_add > 0:
player_budget = budget / players_left_to_add
defender = defs.iloc[i]
if (
defender.cost <= player_budget
and defender["second_name"] not in team.values
):
team = team.append(defender)
budget -= defender.cost
defs_left_to_add -= 1
players_left_to_add -= 1
i += 1
i = 0
while i < 10 and fwds_left_to_add > 0:
player_budget = budget / players_left_to_add
forward = fwds.iloc[i]
if forward.cost <= player_budget and forward["second_name"] not in team.values:
team = team.append(forward)
budget -= forward.cost
fwds_left_to_add -= 1
players_left_to_add -= 1
i += 1
i = 0
while i < 10 and mids_left_to_add > 0:
player_budget = budget / players_left_to_add
mid = mids.iloc[i]
if mid.cost <= player_budget and mid["second_name"] not in team.values:
team = team.append(mid)
budget -= mid.cost
mids_left_to_add -= 1
players_left_to_add -= 1
i += 1
i = 0
return team
team = pickTeam()
print(team)
print(f"Points: {team['points'].sum()}")
print(f"Cost: {team['cost'].sum()}")
| 29.962025
| 87
| 0.614702
|
c877454997a5147e76159ac9c9c9f0f76660a9c6
| 76,071
|
py
|
Python
|
src/sage/functions/orthogonal_polys.py
|
vbraun/sage
|
07d6c37d18811e2b377a9689790a7c5e24da16ba
|
[
"BSL-1.0"
] | 3
|
2016-06-19T14:48:31.000Z
|
2022-01-28T08:46:01.000Z
|
src/sage/functions/orthogonal_polys.py
|
vbraun/sage
|
07d6c37d18811e2b377a9689790a7c5e24da16ba
|
[
"BSL-1.0"
] | null | null | null |
src/sage/functions/orthogonal_polys.py
|
vbraun/sage
|
07d6c37d18811e2b377a9689790a7c5e24da16ba
|
[
"BSL-1.0"
] | 7
|
2021-11-08T10:01:59.000Z
|
2022-03-03T11:25:52.000Z
|
r"""
Orthogonal Polynomials
- The Chebyshev polynomial of the first kind arises as a solution
to the differential equation
.. MATH::
(1-x^2)\,y'' - x\,y' + n^2\,y = 0
and those of the second kind as a solution to
.. MATH::
(1-x^2)\,y'' - 3x\,y' + n(n+2)\,y = 0.
The Chebyshev polynomials of the first kind are defined by the
recurrence relation
.. MATH::
T_0(x) = 1 \, T_1(x) = x \, T_{n+1}(x) = 2xT_n(x) - T_{n-1}(x). \,
The Chebyshev polynomials of the second kind are defined by the
recurrence relation
.. MATH::
U_0(x) = 1 \, U_1(x) = 2x \, U_{n+1}(x) = 2xU_n(x) - U_{n-1}(x). \,
For integers `m,n`, they satisfy the orthogonality
relations
.. MATH::
\int_{-1}^1 T_n(x)T_m(x)\,\frac{dx}{\sqrt{1-x^2}} =\left\{ \begin{matrix} 0 &: n\ne m~~~~~\\ \pi &: n=m=0\\ \pi/2 &: n=m\ne 0 \end{matrix} \right.
and
.. MATH::
\int_{-1}^1 U_n(x)U_m(x)\sqrt{1-x^2}\,dx =\frac{\pi}{2}\delta_{m,n}.
They are named after Pafnuty Chebyshev (alternative
transliterations: Tchebyshef or Tschebyscheff).
- The Hermite polynomials are defined either by
.. MATH::
H_n(x)=(-1)^n e^{x^2/2}\frac{d^n}{dx^n}e^{-x^2/2}
(the "probabilists' Hermite polynomials"), or by
.. MATH::
H_n(x)=(-1)^n e^{x^2}\frac{d^n}{dx^n}e^{-x^2}
(the "physicists' Hermite polynomials"). Sage (via Maxima)
implements the latter flavor. These satisfy the orthogonality
relation
.. MATH::
\int_{-\infty}^\infty H_n(x)H_m(x)\,e^{-x^2}\,dx ={n!2^n}{\sqrt{\pi}}\delta_{nm}
They are named in honor of Charles Hermite.
- Each *Legendre polynomial* `P_n(x)` is an `n`-th degree polynomial.
It may be expressed using Rodrigues' formula:
.. MATH::
P_n(x) = (2^n n!)^{-1} {\frac{d^n}{dx^n} } \left[ (x^2 -1)^n \right].
These are solutions to Legendre's differential equation:
.. MATH::
{\frac{d}{dx}} \left[ (1-x^2) {\frac{d}{dx}} P(x) \right] + n(n+1)P(x) = 0.
and satisfy the orthogonality relation
.. MATH::
\int_{-1}^{1} P_m(x) P_n(x)\,dx = {\frac{2}{2n + 1}} \delta_{mn}
The *Legendre function of the second kind* `Q_n(x)` is another
(linearly independent) solution to the Legendre differential equation.
It is not an "orthogonal polynomial" however.
The associated Legendre functions of the first kind
`P_\ell^m(x)` can be given in terms of the "usual"
Legendre polynomials by
.. MATH::
\begin{array}{ll} P_\ell^m(x) &= (-1)^m(1-x^2)^{m/2}\frac{d^m}{dx^m}P_\ell(x) \\ &= \frac{(-1)^m}{2^\ell \ell!} (1-x^2)^{m/2}\frac{d^{\ell+m}}{dx^{\ell+m}}(x^2-1)^\ell. \end{array}
Assuming `0 \le m \le \ell`, they satisfy the orthogonality
relation:
.. MATH::
\int_{-1}^{1} P_k ^{(m)} P_\ell ^{(m)} dx = \frac{2 (\ell+m)!}{(2\ell+1)(\ell-m)!}\ \delta _{k,\ell},
where `\delta _{k,\ell}` is the Kronecker delta.
The associated Legendre functions of the second kind
`Q_\ell^m(x)` can be given in terms of the "usual"
Legendre polynomials by
.. MATH::
Q_\ell^m(x) = (-1)^m(1-x^2)^{m/2}\frac{d^m}{dx^m}Q_\ell(x).
They are named after Adrien-Marie Legendre.
- Laguerre polynomials may be defined by the Rodrigues formula
.. MATH::
L_n(x)=\frac{e^x}{n!}\frac{d^n}{dx^n}\left(e^{-x} x^n\right).
They are solutions of Laguerre's equation:
.. MATH::
x\,y'' + (1 - x)\,y' + n\,y = 0\,
and satisfy the orthogonality relation
.. MATH::
\int_0^\infty L_m(x) L_n(x) e^{-x}\,dx = \delta_{mn}.
The generalized Laguerre polynomials may be defined by the
Rodrigues formula:
.. MATH::
L_n^{(\alpha)}(x) = {\frac{x^{-\alpha} e^x}{n!}}{\frac{d^n}{dx^n}} \left(e^{-x} x^{n+\alpha}\right) .
(These are also sometimes called the associated Laguerre
polynomials.) The simple Laguerre polynomials are recovered from
the generalized polynomials by setting `\alpha =0`.
They are named after Edmond Laguerre.
- Jacobi polynomials are a class of orthogonal polynomials. They
are obtained from hypergeometric series in cases where the series
is in fact finite:
.. MATH::
P_n^{(\alpha,\beta)}(z) =\frac{(\alpha+1)_n}{n!} \,_2F_1\left(-n,1+\alpha+\beta+n;\alpha+1;\frac{1-z}{2}\right) ,
where `()_n` is Pochhammer's symbol (for the rising
factorial), (Abramowitz and Stegun p561.) and thus have the
explicit expression
.. MATH::
P_n^{(\alpha,\beta)} (z) = \frac{\Gamma (\alpha+n+1)}{n!\Gamma (\alpha+\beta+n+1)} \sum_{m=0}^n \binom{n}{m} \frac{\Gamma (\alpha + \beta + n + m + 1)}{\Gamma (\alpha + m + 1)} \left(\frac{z-1}{2}\right)^m .
They are named after Carl Jacobi.
- Ultraspherical or Gegenbauer polynomials are given in terms of
the Jacobi polynomials `P_n^{(\alpha,\beta)}(x)` with
`\alpha=\beta=a-1/2` by
.. MATH::
C_n^{(a)}(x)= \frac{\Gamma(a+1/2)}{\Gamma(2a)}\frac{\Gamma(n+2a)}{\Gamma(n+a+1/2)} P_n^{(a-1/2,a-1/2)}(x).
They satisfy the orthogonality relation
.. MATH::
\int_{-1}^1(1-x^2)^{a-1/2}C_m^{(a)}(x)C_n^{(a)}(x)\, dx =\delta_{mn}2^{1-2a}\pi \frac{\Gamma(n+2a)}{(n+a)\Gamma^2(a)\Gamma(n+1)} ,
for `a>-1/2`. They are obtained from hypergeometric series
in cases where the series is in fact finite:
.. MATH::
C_n^{(a)}(z) =\frac{(2a)^{\underline{n}}}{n!} \,_2F_1\left(-n,2a+n;a+\frac{1}{2};\frac{1-z}{2}\right)
where `\underline{n}` is the falling factorial. (See
Abramowitz and Stegun p561)
They are named for Leopold Gegenbauer (1849-1903).
For completeness, the Pochhammer symbol, introduced by Leo August
Pochhammer, `(x)_n`, is used in the theory of special
functions to represent the "rising factorial" or "upper factorial"
.. MATH::
(x)_n=x(x+1)(x+2)\cdots(x+n-1)=\frac{(x+n-1)!}{(x-1)!}.
On the other hand, the "falling factorial" or "lower factorial" is
.. MATH::
x^{\underline{n}}=\frac{x!}{(x-n)!} ,
in the notation of Ronald L. Graham, Donald E. Knuth and Oren
Patashnik in their book Concrete Mathematics.
.. TODO::
Implement Zernike polynomials.
:wikipedia:`Zernike_polynomials`
REFERENCES:
- [AS1964]_
- :wikipedia:`Chebyshev_polynomials`
- :wikipedia:`Legendre_polynomials`
- :wikipedia:`Hermite_polynomials`
- http://mathworld.wolfram.com/GegenbauerPolynomial.html
- :wikipedia:`Jacobi_polynomials`
- :wikipedia:`Laguerre_polynomia`
- :wikipedia:`Associated_Legendre_polynomials`
- [Koe1999]_
AUTHORS:
- David Joyner (2006-06)
- Stefan Reiterer (2010-)
- Ralf Stephan (2015-)
The original module wrapped some of the orthogonal/special functions
in the Maxima package "orthopoly" and was written by Barton
Willis of the University of Nebraska at Kearney.
"""
#*****************************************************************************
# Copyright (C) 2006 William Stein <wstein@gmail.com>
# 2006 David Joyner <wdj@usna.edu>
# 2010 Stefan Reiterer <maldun.finsterschreck@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from six.moves import range
import warnings
from sage.misc.latex import latex
from sage.misc.sage_eval import sage_eval
from sage.rings.all import ZZ, QQ, RR, CC
from sage.rings.polynomial.polynomial_element import Polynomial
from sage.rings.polynomial.polynomial_ring import is_PolynomialRing
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.real_mpfr import is_RealField
from sage.rings.complex_field import is_ComplexField
from sage.symbolic.ring import SR, is_SymbolicVariable
from sage.symbolic.function import BuiltinFunction, GinacFunction
from sage.symbolic.expression import Expression
from sage.functions.other import factorial, binomial
from sage.structure.all import parent
class OrthogonalFunction(BuiltinFunction):
"""
Base class for orthogonal polynomials.
This class is an abstract base class for all orthogonal polynomials since
they share similar properties. The evaluation as a polynomial
is either done via maxima, or with pynac.
Convention: The first argument is always the order of the polynomial,
the others are other values or parameters where the polynomial is
evaluated.
"""
def __init__(self, name, nargs=2, latex_name=None, conversions={}):
"""
:class:`OrthogonalFunction` class needs the same input parameter as
it's parent class.
EXAMPLES::
sage: from sage.functions.orthogonal_polys import OrthogonalFunction
sage: new = OrthogonalFunction('testo_P')
sage: new
testo_P
"""
try:
self._maxima_name = conversions['maxima']
except KeyError:
self._maxima_name = None
super(OrthogonalFunction,self).__init__(name=name, nargs=nargs,
latex_name=latex_name, conversions=conversions)
def eval_formula(self, *args):
"""
Evaluate this polynomial using an explicit formula.
EXAMPLES::
sage: from sage.functions.orthogonal_polys import OrthogonalFunction
sage: P = OrthogonalFunction('testo_P')
sage: P.eval_formula(1,2.0)
Traceback (most recent call last):
...
NotImplementedError: no explicit calculation of values implemented
"""
raise NotImplementedError("no explicit calculation of values implemented")
def _eval_special_values_(self, *args):
"""
Evaluate the polynomial explicitly for special values.
EXAMPLES::
sage: var('n')
n
sage: chebyshev_T(n,-1)
(-1)^n
"""
raise ValueError("no special values known")
def _eval_(self, n, *args):
"""
The :meth:`_eval_()` method decides which evaluation suits best
for the given input, and returns a proper value.
EXAMPLES::
sage: var('n,x')
(n, x)
sage: chebyshev_T(5,x)
16*x^5 - 20*x^3 + 5*x
"""
return None
def __call__(self, *args, **kwds):
"""
This overides the call method from SageObject to avoid problems with coercions,
since the _eval_ method is able to handle more data types than symbolic functions
would normally allow.
Thus we have the distinction between algebraic objects (if n is an integer),
and else as symbolic function.
EXAMPLES::
sage: chebyshev_T(5, x)
16*x^5 - 20*x^3 + 5*x
sage: chebyshev_T(5, x, algorithm='pari')
16*x^5 - 20*x^3 + 5*x
sage: chebyshev_T(5, x, algorithm='maxima')
16*x^5 - 20*x^3 + 5*x
sage: chebyshev_T(5, x, algorithm='recursive')
16*x^5 - 20*x^3 + 5*x
"""
algorithm = kwds.get('algorithm', None)
if algorithm == 'pari':
return self.eval_pari(*args, **kwds)
elif algorithm == 'recursive':
return self.eval_recursive(*args, **kwds)
elif algorithm == 'maxima':
from sage.calculus.calculus import maxima
kwds['hold'] = True
return maxima(self._eval_(*args, **kwds))._sage_()
return super(OrthogonalFunction,self).__call__(*args, **kwds)
class ChebyshevFunction(OrthogonalFunction):
"""
Abstract base class for Chebyshev polynomials of the first and second kind.
EXAMPLES::
sage: chebyshev_T(3,x)
4*x^3 - 3*x
"""
def __call__(self, n, *args, **kwds):
"""
This overides the call method from SageObject to avoid problems with coercions,
since the _eval_ method is able to handle more data types than symbolic functions
would normally allow.
Thus we have the distinction between algebraic objects (if n is an integer),
and else as symbolic function.
EXAMPLES::
sage: K.<a> = NumberField(x^3-x-1)
sage: chebyshev_T(5, a)
16*a^2 + a - 4
sage: chebyshev_T(5,MatrixSpace(ZZ, 2)([1, 2, -4, 7]))
[-40799 44162]
[-88324 91687]
sage: R.<x> = QQ[]
sage: parent(chebyshev_T(5, x))
Univariate Polynomial Ring in x over Rational Field
sage: chebyshev_T(5, 2, hold=True)
chebyshev_T(5, 2)
sage: chebyshev_T(1,2,3)
Traceback (most recent call last):
...
TypeError: Symbolic function chebyshev_T takes exactly 2 arguments (3 given)
"""
# If n is an integer: consider the polynomial as an algebraic (not symbolic) object
if n in ZZ and not kwds.get('hold', False):
try:
return self._eval_(n, *args)
except Exception:
pass
return super(ChebyshevFunction,self).__call__(n, *args, **kwds)
def _eval_(self, n, x):
"""
The :meth:`_eval_()` method decides which evaluation suits best
for the given input, and returns a proper value.
EXAMPLES::
sage: var('n,x')
(n, x)
sage: chebyshev_T(5,x)
16*x^5 - 20*x^3 + 5*x
sage: chebyshev_T(64, x)
2*(2*(2*(2*(2*(2*x^2 - 1)^2 - 1)^2 - 1)^2 - 1)^2 - 1)^2 - 1
sage: chebyshev_T(n,-1)
(-1)^n
sage: chebyshev_T(-7,x)
64*x^7 - 112*x^5 + 56*x^3 - 7*x
sage: chebyshev_T(3/2,x)
chebyshev_T(3/2, x)
sage: R.<t> = QQ[]
sage: chebyshev_T(2,t)
2*t^2 - 1
sage: chebyshev_U(2,t)
4*t^2 - 1
sage: parent(chebyshev_T(4, RIF(5)))
Real Interval Field with 53 bits of precision
sage: RR2 = RealField(5)
sage: chebyshev_T(100000,RR2(2))
8.9e57180
sage: chebyshev_T(5,Qp(3)(2))
2 + 3^2 + 3^3 + 3^4 + 3^5 + O(3^20)
sage: chebyshev_T(100001/2, 2)
doctest:...: RuntimeWarning: mpmath failed, keeping expression unevaluated
chebyshev_T(100001/2, 2)
sage: chebyshev_U._eval_(1.5, Mod(8,9)) is None
True
"""
# n is an integer => evaluate algebraically (as polynomial)
if n in ZZ:
n = ZZ(n)
# Expanded symbolic expression only for small values of n
if isinstance(x, Expression) and n.abs() < 32:
return self.eval_formula(n, x)
return self.eval_algebraic(n, x)
if isinstance(x, Expression) or isinstance(n, Expression):
# Check for known identities
try:
return self._eval_special_values_(n, x)
except ValueError:
# Don't evaluate => keep symbolic
return None
# n is not an integer and neither n nor x is symbolic.
# We assume n and x are real/complex and evaluate numerically
try:
import sage.libs.mpmath.all as mpmath
return self._evalf_(n, x)
except mpmath.NoConvergence:
warnings.warn("mpmath failed, keeping expression unevaluated",
RuntimeWarning)
return None
except Exception:
# Numerical evaluation failed => keep symbolic
return None
class Func_chebyshev_T(ChebyshevFunction):
"""
Chebyshev polynomials of the first kind.
REFERENCE:
- [AS1964]_ 22.5.31 page 778 and 6.1.22 page 256.
EXAMPLES::
sage: chebyshev_T(5,x)
16*x^5 - 20*x^3 + 5*x
sage: var('k')
k
sage: test = chebyshev_T(k,x)
sage: test
chebyshev_T(k, x)
"""
def __init__(self):
"""
Init method for the chebyshev polynomials of the first kind.
EXAMPLES::
sage: var('n, x')
(n, x)
sage: from sage.functions.orthogonal_polys import Func_chebyshev_T
sage: chebyshev_T2 = Func_chebyshev_T()
sage: chebyshev_T2(1,x)
x
sage: chebyshev_T(x, x)._sympy_()
chebyshevt(x, x)
sage: maxima(chebyshev_T(1,x, hold=True))
_SAGE_VAR_x
sage: maxima(chebyshev_T(n, chebyshev_T(n, x)))
chebyshev_t(_SAGE_VAR_n,chebyshev_t(_SAGE_VAR_n,_SAGE_VAR_x))
"""
ChebyshevFunction.__init__(self, 'chebyshev_T', nargs=2,
conversions=dict(maxima='chebyshev_t',
mathematica='ChebyshevT',
sympy='chebyshevt',
giac='tchebyshev1'))
def _latex_(self):
r"""
TESTS::
sage: latex(chebyshev_T)
T_n
"""
return r"T_n"
def _print_latex_(self, n, z):
r"""
TESTS::
sage: latex(chebyshev_T(3, x, hold=True))
T_{3}\left(x\right)
"""
return r"T_{{{}}}\left({}\right)".format(latex(n), latex(z))
def _eval_special_values_(self, n, x):
"""
Values known for special values of x.
For details see [AS1964]_ 22.4 (p. 777)
EXAMPLES:
sage: var('n')
n
sage: chebyshev_T(n,1)
1
sage: chebyshev_T(n,0)
1/2*(-1)^(1/2*n)*((-1)^n + 1)
sage: chebyshev_T(n,-1)
(-1)^n
sage: chebyshev_T._eval_special_values_(3/2,x)
Traceback (most recent call last):
...
ValueError: no special value found
sage: chebyshev_T._eval_special_values_(n, 0.1)
Traceback (most recent call last):
...
ValueError: no special value found
"""
if x == 1:
return x
if x == -1:
return x**n
if x == 0:
return (1+(-1)**n)*(-1)**(n/2)/2
raise ValueError("no special value found")
def _evalf_(self, n, x, **kwds):
"""
Evaluates :class:`chebyshev_T` numerically with mpmath.
EXAMPLES::
sage: chebyshev_T._evalf_(10,3)
2.26195370000000e7
sage: chebyshev_T._evalf_(10,3,parent=RealField(75))
2.261953700000000000000e7
sage: chebyshev_T._evalf_(10,I)
-3363.00000000000
sage: chebyshev_T._evalf_(5,0.3)
0.998880000000000
sage: chebyshev_T(1/2, 0)
0.707106781186548
sage: chebyshev_T(1/2, 3/2)
1.11803398874989
sage: chebyshev_T._evalf_(1.5, Mod(8,9))
Traceback (most recent call last):
...
TypeError: cannot evaluate chebyshev_T with parent Ring of integers modulo 9
This simply evaluates using :class:`RealField` or :class:`ComplexField`::
sage: chebyshev_T(1234.5, RDF(2.1))
5.48174256255782e735
sage: chebyshev_T(1234.5, I)
-1.21629397684152e472 - 1.21629397684152e472*I
For large values of ``n``, mpmath fails (but the algebraic formula
still works)::
sage: chebyshev_T._evalf_(10^6, 0.1)
Traceback (most recent call last):
...
NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms.
sage: chebyshev_T(10^6, 0.1)
0.636384327171504
"""
try:
real_parent = kwds['parent']
except KeyError:
real_parent = parent(x)
if not is_RealField(real_parent) and not is_ComplexField(real_parent):
# parent is not a real or complex field: figure out a good parent
if x in RR:
x = RR(x)
real_parent = RR
elif x in CC:
x = CC(x)
real_parent = CC
if not is_RealField(real_parent) and not is_ComplexField(real_parent):
raise TypeError("cannot evaluate chebyshev_T with parent {}".format(real_parent))
from sage.libs.mpmath.all import call as mpcall
from sage.libs.mpmath.all import chebyt as mpchebyt
return mpcall(mpchebyt, n, x, parent=real_parent)
def eval_formula(self, n, x):
"""
Evaluate ``chebyshev_T`` using an explicit formula.
See [AS1964]_ 227 (p. 782) for details for the recursions.
See also [Koe1999]_ for fast evaluation techniques.
INPUT:
- ``n`` -- an integer
- ``x`` -- a value to evaluate the polynomial at (this can be
any ring element)
EXAMPLES::
sage: chebyshev_T.eval_formula(-1,x)
x
sage: chebyshev_T.eval_formula(0,x)
1
sage: chebyshev_T.eval_formula(1,x)
x
sage: chebyshev_T.eval_formula(2,0.1) == chebyshev_T._evalf_(2,0.1)
True
sage: chebyshev_T.eval_formula(10,x)
512*x^10 - 1280*x^8 + 1120*x^6 - 400*x^4 + 50*x^2 - 1
sage: chebyshev_T.eval_algebraic(10,x).expand()
512*x^10 - 1280*x^8 + 1120*x^6 - 400*x^4 + 50*x^2 - 1
"""
if n < 0:
return self.eval_formula(-n, x)
elif n == 0:
return parent(x).one()
res = parent(x).zero()
for j in range(n // 2 + 1):
f = factorial(n-1-j) / factorial(j) / factorial(n-2*j)
res += (-1)**j * (2*x)**(n-2*j) * f
res *= n/2
return res
def eval_algebraic(self, n, x):
"""
Evaluate :class:`chebyshev_T` as polynomial, using a recursive
formula.
INPUT:
- ``n`` -- an integer
- ``x`` -- a value to evaluate the polynomial at (this can be
any ring element)
EXAMPLES::
sage: chebyshev_T.eval_algebraic(5, x)
2*(2*(2*x^2 - 1)*x - x)*(2*x^2 - 1) - x
sage: chebyshev_T(-7, x) - chebyshev_T(7,x)
0
sage: R.<t> = ZZ[]
sage: chebyshev_T.eval_algebraic(-1, t)
t
sage: chebyshev_T.eval_algebraic(0, t)
1
sage: chebyshev_T.eval_algebraic(1, t)
t
sage: chebyshev_T(7^100, 1/2)
1/2
sage: chebyshev_T(7^100, Mod(2,3))
2
sage: n = 97; x = RIF(pi/2/n)
sage: chebyshev_T(n, cos(x)).contains_zero()
True
sage: R.<t> = Zp(2, 8, 'capped-abs')[]
sage: chebyshev_T(10^6+1, t)
(2^7 + O(2^8))*t^5 + (O(2^8))*t^4 + (2^6 + O(2^8))*t^3 + (O(2^8))*t^2 + (1 + 2^6 + O(2^8))*t + (O(2^8))
"""
if n == 0:
return parent(x).one()
if n < 0:
return self._eval_recursive_(-n, x)[0]
return self._eval_recursive_(n, x)[0]
def _eval_recursive_(self, n, x, both=False):
"""
If ``both=True``, compute ``(T(n,x), T(n-1,x))`` using a
recursive formula.
If ``both=False``, return instead a tuple ``(T(n,x), False)``.
EXAMPLES::
sage: chebyshev_T._eval_recursive_(5, x)
(2*(2*(2*x^2 - 1)*x - x)*(2*x^2 - 1) - x, False)
sage: chebyshev_T._eval_recursive_(5, x, True)
(2*(2*(2*x^2 - 1)*x - x)*(2*x^2 - 1) - x, 2*(2*x^2 - 1)^2 - 1)
"""
if n == 1:
return x, parent(x).one()
assert n >= 2
a, b = self._eval_recursive_((n+1)//2, x, both or n % 2)
if n % 2 == 0:
return 2*a*a - 1, both and 2*a*b - x
else:
return 2*a*b - x, both and 2*b*b - 1
def _eval_numpy_(self, n, x):
"""
Evaluate ``self`` using numpy.
EXAMPLES::
sage: import numpy
sage: z = numpy.array([1,2])
sage: z2 = numpy.array([[1,2],[1,2]])
sage: z3 = numpy.array([1,2,3.])
sage: chebyshev_T(1,z)
array([ 1., 2.])
sage: chebyshev_T(1,z2)
array([[ 1., 2.],
[ 1., 2.]])
sage: chebyshev_T(1,z3)
array([ 1., 2., 3.])
sage: chebyshev_T(z,0.1)
array([ 0.1 , -0.98])
"""
from scipy.special import eval_chebyt
return eval_chebyt(n, x)
def _derivative_(self, n, x, diff_param):
"""
Return the derivative of :class:`chebyshev_T` in form of the Chebyshev
polynomial of the second kind :class:`chebyshev_U`.
EXAMPLES::
sage: var('k')
k
sage: derivative(chebyshev_T(k,x),x)
k*chebyshev_U(k - 1, x)
sage: derivative(chebyshev_T(3,x),x)
12*x^2 - 3
sage: derivative(chebyshev_T(k,x),k)
Traceback (most recent call last):
...
NotImplementedError: derivative w.r.t. to the index is not supported yet
"""
if diff_param == 0:
raise NotImplementedError("derivative w.r.t. to the index is not supported yet")
elif diff_param == 1:
return n*chebyshev_U(n-1, x)
raise ValueError("illegal differentiation parameter {}".format(diff_param))
chebyshev_T = Func_chebyshev_T()
class Func_chebyshev_U(ChebyshevFunction):
"""
Class for the Chebyshev polynomial of the second kind.
REFERENCE:
- [AS1964]_ 22.8.3 page 783 and 6.1.22 page 256.
EXAMPLES::
sage: R.<t> = QQ[]
sage: chebyshev_U(2,t)
4*t^2 - 1
sage: chebyshev_U(3,t)
8*t^3 - 4*t
"""
def __init__(self):
"""
Init method for the chebyshev polynomials of the second kind.
EXAMPLES::
sage: var('n, x')
(n, x)
sage: from sage.functions.orthogonal_polys import Func_chebyshev_U
sage: chebyshev_U2 = Func_chebyshev_U()
sage: chebyshev_U2(1,x)
2*x
sage: chebyshev_U(x, x)._sympy_()
chebyshevu(x, x)
sage: maxima(chebyshev_U(2,x, hold=True))
3*((-(8*(1-_SAGE_VAR_x))/3)+(4*(1-_SAGE_VAR_x)^2)/3+1)
sage: maxima(chebyshev_U(n,x, hold=True))
chebyshev_u(_SAGE_VAR_n,_SAGE_VAR_x)
"""
ChebyshevFunction.__init__(self, 'chebyshev_U', nargs=2,
conversions=dict(maxima='chebyshev_u',
mathematica='ChebyshevU',
sympy='chebyshevu',
giac='tchebyshev2'))
def _latex_(self):
r"""
TESTS::
sage: latex(chebyshev_U)
U_n
"""
return r"U_n"
def _print_latex_(self, n, z):
r"""
TESTS::
sage: latex(chebyshev_U(3, x, hold=True))
U_{3}\left(x\right)
"""
return r"U_{{{}}}\left({}\right)".format(latex(n), latex(z))
def eval_formula(self, n, x):
"""
Evaluate ``chebyshev_U`` using an explicit formula.
See [AS1964]_ 227 (p. 782) for details on the recursions.
See also [Koe1999]_ for the recursion formulas.
INPUT:
- ``n`` -- an integer
- ``x`` -- a value to evaluate the polynomial at (this can be
any ring element)
EXAMPLES::
sage: chebyshev_U.eval_formula(10, x)
1024*x^10 - 2304*x^8 + 1792*x^6 - 560*x^4 + 60*x^2 - 1
sage: chebyshev_U.eval_formula(-2, x)
-1
sage: chebyshev_U.eval_formula(-1, x)
0
sage: chebyshev_U.eval_formula(0, x)
1
sage: chebyshev_U.eval_formula(1, x)
2*x
sage: chebyshev_U.eval_formula(2,0.1) == chebyshev_U._evalf_(2,0.1)
True
"""
if n < -1:
return -self.eval_formula(-n-2, x)
res = parent(x).zero()
for j in range(n // 2 + 1):
f = binomial(n-j, j)
res += (-1)**j * (2*x)**(n-2*j) * f
return res
def eval_algebraic(self, n, x):
"""
Evaluate :class:`chebyshev_U` as polynomial, using a recursive
formula.
INPUT:
- ``n`` -- an integer
- ``x`` -- a value to evaluate the polynomial at (this can be
any ring element)
EXAMPLES::
sage: chebyshev_U.eval_algebraic(5,x)
-2*((2*x + 1)*(2*x - 1)*x - 4*(2*x^2 - 1)*x)*(2*x + 1)*(2*x - 1)
sage: parent(chebyshev_U(3, Mod(8,9)))
Ring of integers modulo 9
sage: parent(chebyshev_U(3, Mod(1,9)))
Ring of integers modulo 9
sage: chebyshev_U(-3,x) + chebyshev_U(1,x)
0
sage: chebyshev_U(-1,Mod(5,8))
0
sage: parent(chebyshev_U(-1,Mod(5,8)))
Ring of integers modulo 8
sage: R.<t> = ZZ[]
sage: chebyshev_U.eval_algebraic(-2, t)
-1
sage: chebyshev_U.eval_algebraic(-1, t)
0
sage: chebyshev_U.eval_algebraic(0, t)
1
sage: chebyshev_U.eval_algebraic(1, t)
2*t
sage: n = 97; x = RIF(pi/n)
sage: chebyshev_U(n-1, cos(x)).contains_zero()
True
sage: R.<t> = Zp(2, 6, 'capped-abs')[]
sage: chebyshev_U(10^6+1, t)
(2 + O(2^6))*t + (O(2^6))
"""
if n == -1:
return parent(x).zero()
if n < 0:
return -self._eval_recursive_(-n-2, x)[0]
return self._eval_recursive_(n, x)[0]
def _eval_recursive_(self, n, x, both=False):
"""
If ``both=True``, compute ``(U(n,x), U(n-1,x))`` using a
recursive formula.
If ``both=False``, return instead a tuple ``(U(n,x), False)``.
EXAMPLES::
sage: chebyshev_U._eval_recursive_(3, x)
(4*((2*x + 1)*(2*x - 1) - 2*x^2)*x, False)
sage: chebyshev_U._eval_recursive_(3, x, True)
(4*((2*x + 1)*(2*x - 1) - 2*x^2)*x, ((2*x + 1)*(2*x - 1) + 2*x)*((2*x + 1)*(2*x - 1) - 2*x))
"""
if n == 0:
return parent(x).one(), 2*x
assert n >= 1
a, b = self._eval_recursive_((n-1)//2, x, True)
if n % 2 == 0:
return (b+a)*(b-a), both and 2*b*(x*b-a)
else:
return 2*a*(b-x*a), both and (b+a)*(b-a)
def _evalf_(self, n, x, **kwds):
"""
Evaluate :class:`chebyshev_U` numerically with mpmath.
EXAMPLES::
sage: chebyshev_U(5,-4+3.*I)
98280.0000000000 - 11310.0000000000*I
sage: chebyshev_U(10,3).n(75)
4.661117900000000000000e7
sage: chebyshev_U._evalf_(1.5, Mod(8,9))
Traceback (most recent call last):
...
TypeError: cannot evaluate chebyshev_U with parent Ring of integers modulo 9
"""
try:
real_parent = kwds['parent']
except KeyError:
real_parent = parent(x)
if not is_RealField(real_parent) and not is_ComplexField(real_parent):
# parent is not a real or complex field: figure out a good parent
if x in RR:
x = RR(x)
real_parent = RR
elif x in CC:
x = CC(x)
real_parent = CC
if not is_RealField(real_parent) and not is_ComplexField(real_parent):
raise TypeError("cannot evaluate chebyshev_U with parent {}".format(real_parent))
from sage.libs.mpmath.all import call as mpcall
from sage.libs.mpmath.all import chebyu as mpchebyu
return mpcall(mpchebyu, n, x, parent=real_parent)
def _eval_special_values_(self, n, x):
"""
Values known for special values of x.
See [AS1964]_ 22.4 (p.777).
EXAMPLES::
sage: var('n')
n
sage: chebyshev_U(n,1)
n + 1
sage: chebyshev_U(n,0)
1/2*(-1)^(1/2*n)*((-1)^n + 1)
sage: chebyshev_U(n,-1)
(-1)^n*(n + 1)
sage: chebyshev_U._eval_special_values_(n, 2)
Traceback (most recent call last):
...
ValueError: no special value found
"""
if x == 1:
return x*(n+1)
if x == -1:
return x**n*(n+1)
if x == 0:
return (1+(-1)**n)*(-1)**(n/2)/2
raise ValueError("no special value found")
def _eval_numpy_(self, n, x):
"""
Evaluate ``self`` using numpy.
EXAMPLES::
sage: import numpy
sage: z = numpy.array([1,2])
sage: z2 = numpy.array([[1,2],[1,2]])
sage: z3 = numpy.array([1,2,3.])
sage: chebyshev_U(1,z)
array([ 2., 4.])
sage: chebyshev_U(1,z2)
array([[ 2., 4.],
[ 2., 4.]])
sage: chebyshev_U(1,z3)
array([ 2., 4., 6.])
sage: chebyshev_U(z,0.1)
array([ 0.2 , -0.96])
"""
from scipy.special import eval_chebyu
return eval_chebyu(n, x)
def _derivative_(self, n, x, diff_param):
"""
Return the derivative of :class:`chebyshev_U` in form of the Chebyshev
polynomials of the first and second kind.
EXAMPLES::
sage: var('k')
k
sage: derivative(chebyshev_U(k,x),x)
((k + 1)*chebyshev_T(k + 1, x) - x*chebyshev_U(k, x))/(x^2 - 1)
sage: derivative(chebyshev_U(3,x),x)
24*x^2 - 4
sage: derivative(chebyshev_U(k,x),k)
Traceback (most recent call last):
...
NotImplementedError: derivative w.r.t. to the index is not supported yet
"""
if diff_param == 0:
raise NotImplementedError("derivative w.r.t. to the index is not supported yet")
elif diff_param == 1:
return ((n+1)*chebyshev_T(n+1, x) - x*chebyshev_U(n,x)) / (x*x-1)
raise ValueError("illegal differentiation parameter {}".format(diff_param))
chebyshev_U = Func_chebyshev_U()
class Func_legendre_P(BuiltinFunction):
def __init__(self):
r"""
Init method for the Legendre polynomials of the first kind.
EXAMPLES::
sage: loads(dumps(legendre_P))
legendre_P
"""
BuiltinFunction.__init__(self, 'legendre_P', nargs=2, latex_name=r"P",
conversions={'maxima':'legendre_p',
'mathematica':'LegendreP',
'maple':'LegendreP',
'giac':'legendre'})
def _eval_(self, n, x, *args, **kwds):
r"""
Return an evaluation of this Legendre P expression.
EXAMPLES::
sage: legendre_P(4, 2.0)
55.3750000000000
sage: legendre_P(1, x)
x
sage: legendre_P(4, x+1)
35/8*(x + 1)^4 - 15/4*(x + 1)^2 + 3/8
sage: legendre_P(1/2, I+1.)
1.05338240025858 + 0.359890322109665*I
sage: legendre_P(0, SR(1)).parent()
Symbolic Ring
"""
ret = self._eval_special_values_(n, x)
if ret is not None:
return ret
if n in ZZ:
ret = self.eval_pari(n, x)
if ret is not None:
return ret
def _eval_special_values_(self, n, x):
"""
Special values known.
EXAMPLES::
sage: legendre_P(0, 0)
1
sage: legendre_P(1, x)
x
"""
if n == 0 or n == -1 or x == 1:
return ZZ(1)
if n == 1 or n == -2:
return x
def _evalf_(self, n, x, parent=None, **kwds):
"""
EXAMPLES::
sage: legendre_P(4, 2.)
55.3750000000000
sage: legendre_P(5.5,1.00001)
1.00017875754114
sage: legendre_P(1/2, I+1).n()
1.05338240025858 + 0.359890322109665*I
sage: legendre_P(1/2, I+1).n(59)
1.0533824002585801 + 0.35989032210966539*I
sage: legendre_P(42, RR(12345678))
2.66314881466753e309
sage: legendre_P(42, Reals(20)(12345678))
2.6632e309
sage: legendre_P(201/2, 0).n()
0.0561386178630179
sage: legendre_P(201/2, 0).n(100)
0.056138617863017877699963095883
"""
ret = self._eval_special_values_(n, x)
if ret is not None:
return ret
import mpmath
from sage.libs.mpmath.all import call as mpcall
return mpcall(mpmath.legenp, n, 0, x, parent=parent)
def eval_pari(self, n, arg, **kwds):
"""
Use Pari to evaluate legendre_P for integer, symbolic, and
polynomial argument.
EXAMPLES::
sage: R.<x> = QQ[]
sage: legendre_P(4,x)
35/8*x^4 - 15/4*x^2 + 3/8
sage: legendre_P(10000,x).coefficient(x,1)
0
sage: var('t,x')
(t, x)
sage: legendre_P(-5,t)
35/8*t^4 - 15/4*t^2 + 3/8
sage: legendre_P(4, x+1)
35/8*(x + 1)^4 - 15/4*(x + 1)^2 + 3/8
sage: legendre_P(4, sqrt(2))
83/8
sage: legendre_P(4, I*e)
35/8*e^4 + 15/4*e^2 + 3/8
"""
if n < 0:
n = - n - 1
P = parent(arg)
if P in (ZZ, QQ, RR, CC, SR):
from sage.libs.pari.all import pari
R = PolynomialRing(QQ, 'x')
pol = R(pari.pollegendre(n))
return sum(b * arg**a for a, b in enumerate(pol))
elif is_PolynomialRing(P):
from sage.libs.pari.all import pari
if arg == P.gen():
return P(pari.pollegendre(n))
else:
R = PolynomialRing(QQ, 'x')
pol = R(pari.pollegendre(n))
pol = pol.subs({pol.parent().gen():arg})
pol = pol.change_ring(P.base_ring())
return pol
def _derivative_(self, n, x, *args,**kwds):
"""
Return the derivative of legendre_P.
EXAMPLES::
sage: n = var('n')
sage: derivative(legendre_P(n,x), x)
(n*x*legendre_P(n, x) - n*legendre_P(n - 1, x))/(x^2 - 1)
sage: derivative(legendre_P(3,x), x)
15/2*x^2 - 3/2
sage: derivative(legendre_P(n,x), n)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
"""
diff_param = kwds['diff_param']
if diff_param == 0:
raise NotImplementedError("Derivative w.r.t. to the index is not supported.")
else:
return (n*legendre_P(n-1, x) - n*x*legendre_P(n, x))/(1 - x**2)
legendre_P = Func_legendre_P()
class Func_legendre_Q(BuiltinFunction):
def __init__(self):
r"""
EXAMPLES::
sage: loads(dumps(legendre_Q))
legendre_Q
sage: maxima(legendre_Q(20,x, hold=True))._sage_().coefficient(x,10)
-29113619535/131072*log(-(x + 1)/(x - 1))
"""
BuiltinFunction.__init__(self, "legendre_Q", nargs=2, latex_name=r"Q",
conversions={'maxima':'legendre_q', 'mathematica':'LegendreQ',
'maple':'LegendreQ'})
def _eval_(self, n, x, *args, **kwds):
r"""
Return an evaluation of this Legendre Q expression.
EXAMPLES::
sage: legendre_Q(2,x)
1/4*(3*x^2 - 1)*(log(x + 1) - log(-x + 1)) - 3/2*x
sage: legendre_Q(5,0)
-8/15
sage: legendre_Q(2,2*x)
1/4*(12*x^2 - 1)*(log(2*x + 1) - log(-2*x + 1)) - 3*x
sage: legendre_Q(1/2, I+1.)
-0.511424110789061 + 1.34356195297194*I
sage: legendre_Q(-1,x)
Infinity
"""
ret = self._eval_special_values_(n, x)
if ret is not None:
return ret
if n in ZZ:
if n < 0:
from sage.rings.infinity import unsigned_infinity
return SR(unsigned_infinity);
return self.eval_formula(n, x)
def _eval_special_values_(self, n, x):
"""
Special values known.
EXAMPLES::
sage: var('n')
n
sage: legendre_Q(n, 0)
-1/2*sqrt(pi)*sin(1/2*pi*n)*gamma(1/2*n + 1/2)/gamma(1/2*n + 1)
sage: legendre_Q(-1., 0.)
+infinity
sage: legendre_Q(-1/2, 2)
elliptic_kc(3/2)
"""
if n == QQ(-1)/2:
from sage.functions.special import elliptic_kc
return elliptic_kc((x+1)/2)
if x == 1:
from sage.rings.infinity import unsigned_infinity
return SR(unsigned_infinity)
if x == -1:
from sage.rings.infinity import unsigned_infinity
return SR(unsigned_infinity)
if x == 0:
from sage.functions.other import gamma, sqrt
from sage.functions.trig import sin
try:
gam = gamma((n+1)/2)/gamma(n/2 + 1)
if gam.is_infinity():
return gam
return -(sqrt(SR.pi()))/2 * sin(SR.pi()/2*n) * gam
except TypeError:
pass
def _evalf_(self, n, x, parent=None, **kwds):
"""
Float evaluation of Legendre Q(n, x) function.
EXAMPLES::
sage: legendre_Q(4, 2.)
0.00116107583162041 - 86.9828465962674*I
sage: legendre_Q(1/2, I+1.)
-0.511424110789061 + 1.34356195297194*I
sage: legendre_Q(1/2, I+1).n(59)
-0.51142411078906080 + 1.3435619529719394*I
"""
ret = self._eval_special_values_(n, x)
if ret is not None:
return ret
import mpmath
from sage.libs.mpmath.all import call as mpcall
return mpcall(mpmath.legenq, n, 0, x, parent=parent)
def eval_recursive(self, n, arg, **kwds):
"""
Return expanded Legendre Q(n, arg) function expression.
EXAMPLES::
sage: legendre_Q.eval_recursive(2,x)
3/4*x^2*(log(x + 1) - log(-x + 1)) - 3/2*x - 1/4*log(x + 1) + 1/4*log(-x + 1)
sage: legendre_Q.eval_recursive(20,x).expand().coefficient(x,10)
-29113619535/131072*log(x + 1) + 29113619535/131072*log(-x + 1)
"""
from sage.functions.log import ln
if n == 0:
return (ln(1+arg)-ln(1-arg))/2
elif n == 1:
return arg/2*(ln(1+arg)-ln(1-arg))-1
x, l = PolynomialRing(QQ, 'x,l').gens()
help1 = l / 2
help2 = x / 2 * l - 1
for j in range(1, n):
help3 = (2 * j + 1) * x * help2 - j * help1
help3 = help3 / (j + 1)
help1 = help2
help2 = help3
sum1 = sum(help3.monomial_coefficient(mon)*arg**(mon.exponents()[0][0])
for mon in help3.monomials() if not l.divides(mon))
sum2 = sum(help3.monomial_coefficient(mon)*arg**(mon.exponents()[0][0])*(ln(1+arg)-ln(1-arg))
for mon in help3.monomials() if l.divides(mon))
return sum1 + sum2
def eval_formula(self, n, arg, **kwds):
"""
Return expanded Legendre ``Q(n, arg)`` function expression.
REFERENCE:
- T. M. Dunster, Legendre and Related Functions, http://dlmf.nist.gov/14.7#E2
EXAMPLES::
sage: legendre_Q.eval_formula(1, x)
1/2*x*(log(x + 1) - log(-x + 1)) - 1
sage: legendre_Q.eval_formula(2,x).expand().collect(log(1+x)).collect(log(1-x))
1/4*(3*x^2 - 1)*log(x + 1) - 1/4*(3*x^2 - 1)*log(-x + 1) - 3/2*x
sage: legendre_Q.eval_formula(20,x).coefficient(x,10)
-29113619535/131072*log(x + 1) + 29113619535/131072*log(-x + 1)
sage: legendre_Q(0, 2)
-1/2*I*pi + 1/2*log(3)
sage: legendre_Q(0, 2.)
0.549306144334055 - 1.57079632679490*I
"""
from sage.functions.log import ln
if n == 0:
return (ln(1+arg)-ln(1-arg))/2
elif n == 1:
return arg/2*(ln(1+arg)-ln(1-arg))-1
arg = SR(arg)
return legendre_P(n, arg)*(ln(1+arg)-ln(1-arg))/2 - self._Wfunc(n, arg)
def _Wfunc(self, n, arg):
"""
Helper function for ``eval_formula()``.
EXAMPLES::
sage: legendre_Q._Wfunc(2, x)
3/2*x
sage: legendre_Q._Wfunc(7, x)
429/16*x^6 - 275/8*x^4 + 849/80*x^2 - 16/35
"""
if n == 0:
return 0
if n == 1:
return 1
x = PolynomialRing(QQ, 'x').gen()
help1 = 0
help2 = 1
for j in range(2, n + 1):
help3 = (2 * j - 1) * x * help2 - (j - 1) * help1
help3 = help3 / j
help1 = help2
help2 = help3
return sum(b * arg**a for a, b in enumerate(help3))
def _derivative_(self, n, x, *args,**kwds):
"""
Return the derivative of legendre_Q.
EXAMPLES::
sage: n = var('n')
sage: derivative(legendre_Q(n,x), x)
(n*x*legendre_Q(n, x) - n*legendre_Q(n - 1, x))/(x^2 - 1)
sage: ex1 = legendre_Q(5,x,hold=True).diff(x).expand().simplify_full()
sage: ex2 = legendre_Q(5,x).diff(x).expand().simplify_full()
sage: ex1.subs(x=7).n() == ex2.subs(x=7).n()
True
sage: derivative(legendre_Q(n,x), n)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
"""
diff_param = kwds['diff_param']
if diff_param == 0:
raise NotImplementedError("Derivative w.r.t. to the index is not supported.")
else:
return (n*x*legendre_Q(n, x) - n*legendre_Q(n-1, x))/(x**2 - 1)
legendre_Q = Func_legendre_Q()
class Func_assoc_legendre_P(BuiltinFunction):
def __init__(self):
r"""
EXAMPLES::
sage: loads(dumps(gen_legendre_P))
gen_legendre_P
sage: maxima(gen_legendre_P(20,6,x, hold=True))._sage_().expand().coefficient(x,10)
2508866163428625/128
"""
BuiltinFunction.__init__(self, "gen_legendre_P", nargs=3, latex_name=r"P",
conversions={'maxima':'assoc_legendre_p', 'mathematica':'LegendreP',
'maple':'LegendreP'})
def _eval_(self, n, m, x, *args, **kwds):
r"""
Return an evaluation of this Legendre P(n, m, x) expression.
EXAMPLES::
sage: gen_legendre_P(3,2,2)
-90
sage: gen_legendre_P(13/2,2,0)
2*sqrt(2)*gamma(19/4)/(sqrt(pi)*gamma(13/4))
sage: gen_legendre_P(3,2,x)
-15*(x^2 - 1)*x
sage: gen_legendre_P(3,2,2).n() # abs tol 1e-14
-90.0000000000000
"""
ret = self._eval_special_values_(n, m, x)
if ret is not None:
return ret
if (n in ZZ and m in ZZ
and n >= 0 and m >= 0
and (x in ZZ or not SR(x).is_numeric())):
return self.eval_poly(n, m, x)
def _eval_special_values_(self, n, m, x):
"""
Special values known.
EXAMPLES::
sage: gen_legendre_P(2,3,4)
0
sage: gen_legendre_P(2,0,4)==legendre_P(2,4)
True
sage: gen_legendre_P(2,2,4)
45
sage: gen_legendre_P(2,2,x)
3*x^2 - 3
sage: gen_legendre_P(13/2,2,0)
2*sqrt(2)*gamma(19/4)/(sqrt(pi)*gamma(13/4))
sage: (m,n) = var('m,n')
sage: gen_legendre_P(n,m,0)
2^m*cos(1/2*pi*m + 1/2*pi*n)*gamma(1/2*m + 1/2*n + 1/2)/(sqrt(pi)*gamma(-1/2*m + 1/2*n + 1))
sage: gen_legendre_P(n,3,0)
8*cos(3/2*pi + 1/2*pi*n)*gamma(1/2*n + 2)/(sqrt(pi)*gamma(1/2*n - 1/2))
sage: gen_legendre_P(3,m,0)
2^m*cos(3/2*pi + 1/2*pi*m)*gamma(1/2*m + 2)/(sqrt(pi)*gamma(-1/2*m + 5/2))
"""
if m > n:
return ZZ(0)
if m == 0:
return legendre_P(n, x)
if n == m:
return factorial(2*m)/2**m/factorial(m) * (x**2-1)**(m/2)
if x == 0:
from sage.functions.other import gamma, sqrt
from sage.functions.trig import cos
if m in QQ and n in QQ:
return 2**m/sqrt(SR.pi())*cos((n+m)/2*SR.pi())*(gamma(QQ(n+m+1)/2)/gamma(QQ(n-m)/2+1))
elif isinstance(n, Expression) or isinstance(m, Expression):
return 2**m/sqrt(SR.pi())*cos((n+m)/2*SR.pi())*(gamma((n+m+1)/2)/gamma((n-m)/2+1))
def _evalf_(self, n, m, x, parent=None, **kwds):
"""
Float evaluation of Legendre P(n, m, x) function.
EXAMPLES::
sage: gen_legendre_P(10,2,3).n() # abs tol 1e-14
-7.19496360000000e8
sage: gen_legendre_P(5/2,2,1.+I)
14.3165258449040 - 12.7850496155152*I
sage: gen_legendre_P(5/2,2,ComplexField(70)(1+I))
14.316525844904028532 - 12.785049615515157033*I
"""
ret = self._eval_special_values_(n, m, x)
if ret is not None:
return ret
import mpmath
from sage.libs.mpmath.all import call as mpcall
return mpcall(mpmath.legenp, n, m, x, parent=parent)
def eval_poly(self, n, m, arg, **kwds):
"""
Return the associated Legendre P(n, m, arg) polynomial for integers `n > -1, m > -1`.
EXAMPLES::
sage: gen_legendre_P(7,4,x)
3465/2*(13*x^3 - 3*x)*(x^2 - 1)^2
sage: gen_legendre_P(3,1,sqrt(x))
-3/2*(5*x - 1)*sqrt(-x + 1)
REFERENCE:
- T. M. Dunster, Legendre and Related Functions, http://dlmf.nist.gov/14.7#E10
"""
from sage.functions.other import factorial
if n < 0 or m < 0:
return
R = PolynomialRing(QQ, 'x')
x = R.gen()
p = (1-x**2)**ZZ(n)
for i in range(m + n):
p = p.diff(x)
ex1 = (1-arg**2)**(QQ(m)/2)/2**n/factorial(ZZ(n))
ex2 = sum(b * arg**a for a, b in enumerate(p))
return (-1)**(m+n)*ex1*ex2
def _derivative_(self, n, m, x, *args,**kwds):
"""
Return the derivative of ``gen_legendre_P(n,m,x)``.
EXAMPLES::
sage: (m,n) = var('m,n')
sage: derivative(gen_legendre_P(n,m,x), x)
-((n + 1)*x*gen_legendre_P(n, m, x) + (m - n - 1)*gen_legendre_P(n + 1, m, x))/(x^2 - 1)
sage: gen_legendre_P(3,2,x,hold=True).diff(x).expand().simplify_full()
-45*x^2 + 15
sage: derivative(gen_legendre_P(n,m,x), n)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
"""
diff_param = kwds['diff_param']
if diff_param == 0:
raise NotImplementedError("Derivative w.r.t. to the index is not supported.")
else:
return ((n-m+1)*gen_legendre_P(n+1, m, x) - (n+1)*x*gen_legendre_P(n, m, x))/(x**2 - 1)
gen_legendre_P = Func_assoc_legendre_P()
class Func_assoc_legendre_Q(BuiltinFunction):
def __init__(self):
r"""
EXAMPLES::
sage: loads(dumps(gen_legendre_Q))
gen_legendre_Q
sage: maxima(gen_legendre_Q(2,1,3, hold=True))._sage_().simplify_full()
1/4*sqrt(2)*(36*pi - 36*I*log(2) + 25*I)
"""
BuiltinFunction.__init__(self, "gen_legendre_Q", nargs=3, latex_name=r"Q",
conversions={'maxima':'assoc_legendre_q', 'mathematica':'LegendreQ',
'maple':'LegendreQ'})
def _eval_(self, n, m, x, *args, **kwds):
r"""
Return an evaluation of this Legendre Q(n, m, x) expression.
EXAMPLES::
sage: gen_legendre_Q(2,1,3)
-1/4*sqrt(-2)*(-36*I*pi + 36*log(2) - 25)
"""
ret = self._eval_special_values_(n, m, x)
if ret is not None:
return ret
if (n in ZZ and m in ZZ
and n >= 0 and m >= 0
and (x in ZZ or not SR(x).is_numeric())):
return self.eval_recursive(n, m, x)
def _eval_special_values_(self, n, m, x):
"""
Special values known.
EXAMPLES::
sage: n, m = var('n m')
sage: gen_legendre_Q(n,m,0)
-sqrt(pi)*2^(m - 1)*sin(1/2*pi*m + 1/2*pi*n)*gamma(1/2*m + 1/2*n + 1/2)/gamma(-1/2*m + 1/2*n + 1)
"""
if m == 0:
return legendre_Q(n, x)
if x.is_zero():
from sage.functions.other import gamma, sqrt
from sage.functions.trig import sin
if m in QQ and n in QQ:
return -(sqrt(SR.pi()))*sin(SR.pi()/2*(m+n))*gamma(QQ(m+n+1)/2)/gamma(QQ(n-m)/2 + 1)*2**(m-1)
elif isinstance(n, Expression) or isinstance(m, Expression):
return -(sqrt(SR.pi()))*sin(SR.pi()/2*(m+n))*gamma((m+n+1)/2)/gamma((n-m)/2 + 1)*2**(m-1)
def _evalf_(self, n, m, x, parent=None, **kwds):
"""
Float evaluation of Legendre Q(n, m, x) function.
EXAMPLES::
sage: gen_legendre_Q(2,1,3.)
-39.9859464434253 + 0.0165114736149193*I
sage: gen_legendre_Q(2,1,ComplexField(70)(3))
-39.985946443425296223 + 0.016511473614919329585*I
"""
ret = self._eval_special_values_(n, m, x)
if ret is not None:
return ret
import mpmath
from sage.libs.mpmath.all import call as mpcall
return mpcall(mpmath.legenq, n, m, x, parent=parent)
def eval_recursive(self, n, m, x, **kwds):
"""
Return the associated Legendre Q(n, m, arg) function for integers `n > -1, m > -1`.
EXAMPLES::
sage: gen_legendre_Q(3,4,x)
48/(x^2 - 1)^2
sage: gen_legendre_Q(4,5,x)
-384/((x^2 - 1)^2*sqrt(-x^2 + 1))
sage: gen_legendre_Q(0,1,x)
-1/sqrt(-x^2 + 1)
sage: gen_legendre_Q(0,2,x)
-1/2*((x + 1)^2 - (x - 1)^2)/(x^2 - 1)
sage: gen_legendre_Q(2,2,x).subs(x=2).expand()
9/2*I*pi - 9/2*log(3) + 14/3
"""
from sage.functions.all import sqrt
if m == n + 1 or n == 0:
if m.mod(2).is_zero():
denom = (1 - x**2)**(m/2)
else:
denom = sqrt(1 - x**2)*(1 - x**2)**((m-1)/2)
if m == n + 1:
return (-1)**m*(m-1).factorial()*2**n/denom
else:
return (-1)**m*(m-1).factorial()*((x+1)**m - (x-1)**m)/(2*denom)
else:
return ((n-m+1)*x*gen_legendre_Q(n,m-1,x)-(n+m-1)*gen_legendre_Q(n-1,m-1,x))/sqrt(1-x**2)
def _derivative_(self, n, m, x, *args,**kwds):
"""
Return the derivative of ``gen_legendre_Q(n,m,x)``.
EXAMPLES::
sage: (m,n) = var('m,n')
sage: derivative(gen_legendre_Q(n,m,x), x)
-((n + 1)*x*gen_legendre_Q(n, m, x) + (m - n - 1)*gen_legendre_Q(n + 1, m, x))/(x^2 - 1)
sage: ex1=gen_legendre_Q(3,2,x,hold=True).diff(x).expand().simplify_full()
sage: ex2=gen_legendre_Q(3,2,x).diff(x).expand().simplify_full()
sage: ex1.subs(x=5).n() == ex2.subs(x=5).n()
True
sage: derivative(gen_legendre_Q(n,m,x), n)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
"""
diff_param = kwds['diff_param']
if diff_param == 0:
raise NotImplementedError("Derivative w.r.t. to the index is not supported.")
else:
return ((n-m+1)*gen_legendre_Q(n+1, m, x) - (n+1)*x*gen_legendre_Q(n, m, x))/(x**2 - 1)
gen_legendre_Q = Func_assoc_legendre_Q()
class Func_hermite(GinacFunction):
"""
Returns the Hermite polynomial for integers `n > -1`.
REFERENCE:
- [AS1964]_ 22.5.40 and 22.5.41, page 779.
EXAMPLES::
sage: x = PolynomialRing(QQ, 'x').gen()
sage: hermite(2,x)
4*x^2 - 2
sage: hermite(3,x)
8*x^3 - 12*x
sage: hermite(3,2)
40
sage: S.<y> = PolynomialRing(RR)
sage: hermite(3,y)
8.00000000000000*y^3 - 12.0000000000000*y
sage: R.<x,y> = QQ[]
sage: hermite(3,y^2)
8*y^6 - 12*y^2
sage: w = var('w')
sage: hermite(3,2*w)
64*w^3 - 24*w
sage: hermite(5,3.1416)
5208.69733891963
sage: hermite(5,RealField(100)(pi))
5208.6167627118104649470287166
Check that :trac:`17192` is fixed::
sage: x = PolynomialRing(QQ, 'x').gen()
sage: hermite(0,x)
1
sage: hermite(-1,x)
Traceback (most recent call last):
...
RuntimeError: hermite_eval: The index n must be a nonnegative integer
sage: hermite(-7,x)
Traceback (most recent call last):
...
RuntimeError: hermite_eval: The index n must be a nonnegative integer
sage: _ = var('m x')
sage: hermite(m, x).diff(m)
Traceback (most recent call last):
...
RuntimeError: derivative w.r.t. to the index is not supported yet
"""
def __init__(self):
r"""
Init method for the Hermite polynomials.
EXAMPLES::
sage: loads(dumps(hermite))
hermite
sage: hermite(x, x)._sympy_()
hermite(x, x)
"""
GinacFunction.__init__(self, "hermite", nargs=2, latex_name=r"H",
conversions={'maxima':'hermite', 'mathematica':'HermiteH',
'maple':'HermiteH', 'sympy':'hermite'}, preserved_arg=2)
hermite = Func_hermite()
class Func_jacobi_P(OrthogonalFunction):
r"""
Return the Jacobi polynomial `P_n^{(a,b)}(x)` for
integers `n > -1` and a and b symbolic or `a > -1`
and `b > -1`. The Jacobi polynomials are actually defined
for all a and b. However, the Jacobi polynomial weight
`(1-x)^a(1+x)^b` isn't integrable for `a \leq -1`
or `b \leq -1`.
REFERENCE:
- Table on page 789 in [AS1964]_.
EXAMPLES::
sage: x = PolynomialRing(QQ, 'x').gen()
sage: jacobi_P(2,0,0,x)
3/2*x^2 - 1/2
sage: jacobi_P(2,1,2,1.2)
5.01000000000000
"""
def __init__(self):
r"""
Init method for the Jacobi polynomials.
EXAMPLES::
sage: _ = var('n a b x')
sage: loads(dumps(jacobi_P))
jacobi_P
sage: jacobi_P(n, a, b, x, hold=True)._sympy_()
jacobi(n, a, b, x)
"""
OrthogonalFunction.__init__(self, "jacobi_P", nargs=4, latex_name=r"P",
conversions={'maxima':'jacobi_p', 'mathematica':'JacobiP',
'maple':'JacobiP', 'sympy':'jacobi'})
def _eval_(self, n, a, b, x):
"""
EXAMPLES::
sage: _ = var('n a b x')
sage: jacobi_P(1,n,n,n)
(n + 1)*n
sage: jacobi_P(2,n,n,n)
1/4*(2*n - 1)*(n + 2)*(n + 1)^2
sage: jacobi_P(1,n,n,x)
(n + 1)*x
sage: jacobi_P(3,2,1,x)
21/2*x^3 + 7/2*x^2 - 7/2*x - 1/2
sage: jacobi_P(1,a,b,x)
1/2*a*x + 1/2*b*x + 1/2*a - 1/2*b + x
TESTS:
Check that :trac:`17192` is fixed::
sage: x = PolynomialRing(QQ, 'x').gen()
sage: jacobi_P(0,0,0,x)
1
sage: jacobi_P(-1,0,0,x)
1
sage: jacobi_P(-1,1,1,x)
Traceback (most recent call last):
...
ValueError: n must be greater than -1, got n = -1
sage: jacobi_P(-7,0,0,x)
231/16*x^6 - 315/16*x^4 + 105/16*x^2 - 5/16
sage: jacobi_P(-7,0,2,x)
Traceback (most recent call last):
...
ValueError: n must be greater than -1, got n = -7
"""
if SR(a).is_trivial_zero() and SR(b).is_trivial_zero():
return legendre_P(n, x)
if SR(n).is_numeric() and not (n > -1):
raise ValueError("n must be greater than -1, got n = {0}".format(n))
if not n in ZZ:
return
from sage.functions.other import gamma
s = sum(binomial(n,m) * gamma(a+b+n+m+1) / gamma(a+m+1) * ((x-1)/2)**m for m in range(n+1))
r = gamma(a+n+1) / factorial(n) / gamma(n+a+b+1) * s
return r.to_gamma().gamma_normalize().normalize()
def _evalf_(self, n, a, b, x, **kwds):
"""
EXAMPLES::
sage: jacobi_P(2, 1, 2, 1.2)
5.01000000000000
sage: jacobi_P(2, 1, 2, 1.2, hold=True).n(20)
5.0100
sage: jacobi_P(2, 1, 2, pi+I, hold=True).n(100)
41.103034125334442891187112674 + 31.486722862692829003857755524*I
"""
from sage.rings.complex_arb import ComplexBallField as CBF
the_parent = kwds.get('parent', None)
if the_parent is None:
the_parent = parent(x)
prec = the_parent.precision()
BF = CBF(prec+5)
ret = BF(x).jacobi_P(BF(n), BF(a), BF(b))
return SR(ret)._eval_self(the_parent)
jacobi_P = Func_jacobi_P()
class Func_ultraspherical(GinacFunction):
r"""
Return the ultraspherical (or Gegenbauer) polynomial gegenbauer(n,a,x),
.. MATH::
C_n^{a}(x)=\sum_{k=0}^{\lfloor n/2\rfloor} (-1)^k\frac{\Gamma(n-k+a)}
{\Gamma(a)k!(n-2k)!}(2x)^{n-2k}.
When `n` is a nonnegative integer, this formula gives a
polynomial in `z` of degree `n`, but all parameters are
permitted to be complex numbers. When `a = 1/2`, the
Gegenbauer polynomial reduces to a Legendre polynomial.
Computed using Pynac.
For numerical evaluation, consider using the `mpmath library,
<http://mpmath.org/doc/current/functions/orthogonal.html#gegenbauer-polynomials>`_,
as it also allows complex numbers (and negative `n` as well);
see the examples below.
REFERENCE:
- [AS1964]_ 22.5.27
EXAMPLES::
sage: ultraspherical(8, 101/11, x)
795972057547264/214358881*x^8 - 62604543852032/19487171*x^6...
sage: x = PolynomialRing(QQ, 'x').gen()
sage: ultraspherical(2,3/2,x)
15/2*x^2 - 3/2
sage: ultraspherical(1,1,x)
2*x
sage: t = PolynomialRing(RationalField(),"t").gen()
sage: gegenbauer(3,2,t)
32*t^3 - 12*t
sage: _=var('x');
sage: for N in range(100):
....: n = ZZ.random_element().abs() + 5
....: a = QQ.random_element().abs() + 5
....: assert ((n+1)*ultraspherical(n+1,a,x) - 2*x*(n+a)*ultraspherical(n,a,x) + (n+2*a-1)*ultraspherical(n-1,a,x)).expand().is_zero()
sage: ultraspherical(5,9/10,3.1416)
6949.55439044240
sage: ultraspherical(5,9/10,RealField(100)(pi))
6949.4695419382702451843080687
sage: _ = var('a n')
sage: gegenbauer(2,a,x)
2*(a + 1)*a*x^2 - a
sage: gegenbauer(3,a,x)
4/3*(a + 2)*(a + 1)*a*x^3 - 2*(a + 1)*a*x
sage: gegenbauer(3,a,x).expand()
4/3*a^3*x^3 + 4*a^2*x^3 + 8/3*a*x^3 - 2*a^2*x - 2*a*x
sage: gegenbauer(10,a,x).expand().coefficient(x,2)
1/12*a^6 + 5/4*a^5 + 85/12*a^4 + 75/4*a^3 + 137/6*a^2 + 10*a
sage: ex = gegenbauer(100,a,x)
sage: (ex.subs(a==55/98) - gegenbauer(100,55/98,x)).is_trivial_zero()
True
sage: gegenbauer(2,-3,x)
12*x^2 + 3
sage: gegenbauer(120,-99/2,3)
1654502372608570682112687530178328494861923493372493824
sage: gegenbauer(5,9/2,x)
21879/8*x^5 - 6435/4*x^3 + 1287/8*x
sage: gegenbauer(15,3/2,5)
3903412392243800
sage: derivative(gegenbauer(n,a,x),x)
2*a*gegenbauer(n - 1, a + 1, x)
sage: derivative(gegenbauer(3,a,x),x)
4*(a + 2)*(a + 1)*a*x^2 - 2*(a + 1)*a
sage: derivative(gegenbauer(n,a,x),a)
Traceback (most recent call last):
...
RuntimeError: derivative w.r.t. to the second index is not supported yet
Numerical evaluation with the mpmath library::
sage: from mpmath import gegenbauer as gegenbauer_mp
sage: from mpmath import mp
sage: mp.pretty = True; mp.dps=25
sage: gegenbauer_mp(-7,0.5,0.3)
0.1291811875
sage: gegenbauer_mp(2+3j, -0.75, -1000j)
(-5038991.358609026523401901 + 9414549.285447104177860806j)
TESTS:
Check that :trac:`17192` is fixed::
sage: x = PolynomialRing(QQ, 'x').gen()
sage: ultraspherical(0,1,x)
1
sage: ultraspherical(-1,1,x)
Traceback (most recent call last):
...
RuntimeError: gegenb_eval: The index n must be a nonnegative integer
sage: ultraspherical(-7,1,x)
Traceback (most recent call last):
...
RuntimeError: gegenb_eval: The index n must be a nonnegative integer
"""
def __init__(self):
r"""
Init method for the ultraspherical polynomials.
EXAMPLES::
sage: loads(dumps(ultraspherical))
gegenbauer
sage: ultraspherical(x, x, x)._sympy_()
gegenbauer(x, x, x)
"""
GinacFunction.__init__(self, "gegenbauer", nargs=3, latex_name=r"C",
conversions={'maxima':'ultraspherical', 'mathematica':'GegenbauerC',
'maple':'GegenbauerC', 'sympy':'gegenbauer'})
ultraspherical = Func_ultraspherical()
gegenbauer = Func_ultraspherical()
class Func_laguerre(OrthogonalFunction):
"""
REFERENCE:
- [AS1964]_ 22.5.16, page 778 and page 789.
"""
def __init__(self):
r"""
Init method for the Laguerre polynomials.
EXAMPLES::
sage: n,x = var('n,x')
sage: loads(dumps(laguerre))
laguerre
sage: laguerre(x, x)._sympy_()
laguerre(x, x)
sage: maxima(laguerre(1, x, hold=True))
1-_SAGE_VAR_x
sage: maxima(laguerre(n, laguerre(n, x)))
laguerre(_SAGE_VAR_n,laguerre(_SAGE_VAR_n,_SAGE_VAR_x))
"""
OrthogonalFunction.__init__(self, "laguerre", nargs=2, latex_name=r"L",
conversions={'maxima':'laguerre', 'mathematica':'LaguerreL',
'maple':'LaguerreL', 'sympy':'laguerre'})
def _eval_(self, n, x, *args, **kwds):
r"""
Return an evaluation of this Laguerre polynomial expression.
EXAMPLES::
sage: x = PolynomialRing(QQ, 'x').gen()
sage: laguerre(2,x)
1/2*x^2 - 2*x + 1
sage: laguerre(3,x)
-1/6*x^3 + 3/2*x^2 - 3*x + 1
sage: laguerre(2,2)
-1
sage: laguerre(-1, x)
e^x
sage: laguerre(-6, x)
1/120*(x^5 + 25*x^4 + 200*x^3 + 600*x^2 + 600*x + 120)*e^x
sage: laguerre(-9,2)
66769/315*e^2
"""
from sage.rings.integer import Integer
from sage.functions.log import exp
ret = self._eval_special_values_(n, x)
if ret is not None:
return ret
if isinstance(n, (Integer, int)):
if n >= 0 and not hasattr(x, 'prec'):
return self._pol_laguerre(n, x)
elif n < 0:
return exp(x)*laguerre(-n-1, -x)
def _eval_special_values_(self, n, x):
"""
Special values known.
EXAMPLES::
sage: laguerre(0, 0)
1
sage: laguerre(1, x)
-x + 1
"""
if n == 0 or x == 0:
return ZZ(1)
if n == 1:
return ZZ(1) - x
def _pol_laguerre(self, n, x):
"""
Fast creation of Laguerre polynomial.
EXAMPLES::
sage: laguerre(3,sin(x))
-1/6*sin(x)^3 + 3/2*sin(x)^2 - 3*sin(x) + 1
sage: R.<x> = PolynomialRing(QQ, 'x')
sage: laguerre(4,x)
1/24*x^4 - 2/3*x^3 + 3*x^2 - 4*x + 1
sage: laguerre(4,x+1)
1/24*(x + 1)^4 - 2/3*(x + 1)^3 + 3*(x + 1)^2 - 4*x - 3
sage: laguerre(10,1+I)
142511/113400*I + 95867/22680
"""
if hasattr(x, 'pyobject'):
try:
x = x.pyobject()
except TypeError:
pass
return SR(sum(binomial(n, k) * (-1)**k / factorial(k) * x**k
for k in range(n + 1)))
def _evalf_(self, n, x, **kwds):
"""
Return the evaluation of `laguerre(n,x)` with floating point `x`.
EXAMPLES::
sage: laguerre(100,RealField(300)(pi))
-0.638322077840648311606324...
sage: laguerre(10,1.+I)
4.22694003527337 + 1.25671075837743*I
sage: laguerre(-9, 2.)
1566.22186244286
"""
the_parent = kwds.get('parent', None)
if the_parent is None:
the_parent = parent(x)
import mpmath
from sage.libs.mpmath.all import call as mpcall
if n<0:
# work around mpmath issue 307
from sage.functions.log import exp
return exp(x) * mpcall(mpmath.laguerre, -n-1, 0, -x, parent=the_parent)
else:
return mpcall(mpmath.laguerre, n, 0, x, parent=the_parent)
def _derivative_(self, n, x, *args,**kwds):
"""
Return the derivative of `laguerre(n,x)`.
EXAMPLES::
sage: n=var('n')
sage: diff(laguerre(n,x), x)
-gen_laguerre(n - 1, 1, x)
TESTS::
sage: diff(laguerre(x,x))
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
"""
diff_param = kwds['diff_param']
if diff_param == 0:
raise NotImplementedError("Derivative w.r.t. to the index is not supported.")
if diff_param == 1:
return -gen_laguerre(n-1,1,x)
else:
raise ValueError("illegal differentiation parameter {}".format(diff_param))
laguerre = Func_laguerre()
class Func_gen_laguerre(OrthogonalFunction):
"""
REFERENCE:
- [AS1964]_ 22.5.16, page 778 and page 789.
"""
def __init__(self):
r"""
Init method for the Laguerre polynomials.
EXAMPLES::
sage: a,n,x = var('a, n, x')
sage: loads(dumps(gen_laguerre))
gen_laguerre
sage: gen_laguerre(x, x, x)._sympy_()
assoc_laguerre(x, x, x)
sage: maxima(gen_laguerre(1,2,x, hold=True))
3*(1-_SAGE_VAR_x/3)
sage: maxima(gen_laguerre(n, a, gen_laguerre(n, a, x)))
gen_laguerre(_SAGE_VAR_n,_SAGE_VAR_a,gen_laguerre(_SAGE_VAR_n,_SAGE_VAR_a,_SAGE_VAR_x))
"""
OrthogonalFunction.__init__(self, "gen_laguerre", nargs=3, latex_name=r"L",
conversions={'maxima':'gen_laguerre', 'mathematica':'LaguerreL',
'maple':'LaguerreL', 'sympy':'assoc_laguerre'})
def _eval_(self, n, a, x, *args, **kwds):
r"""
Return an evaluation of this Laguerre polynomial expression.
EXAMPLES::
sage: gen_laguerre(2, 1, x)
1/2*x^2 - 3*x + 3
sage: gen_laguerre(2, 1/2, x)
1/2*x^2 - 5/2*x + 15/8
sage: gen_laguerre(2, -1/2, x)
1/2*x^2 - 3/2*x + 3/8
sage: gen_laguerre(2, 0, x)
1/2*x^2 - 2*x + 1
sage: gen_laguerre(3, 0, x)
-1/6*x^3 + 3/2*x^2 - 3*x + 1
"""
from sage.rings.integer import Integer
ret = self._eval_special_values_(n, a, x)
if ret is not None:
return ret
if isinstance(n, Integer):
if n >= 0 and not hasattr(x, 'prec'):
return self._pol_gen_laguerre(n, a, x)
def _eval_special_values_(self, n, a, x):
"""
Special values known.
EXAMPLES::
sage: gen_laguerre(0, 1, pi)
1
sage: gen_laguerre(1, 2, x)
-x + 3
sage: gen_laguerre(3, 4, 0)
35
"""
if n == 0:
return ZZ(1)
if n == 1:
return ZZ(1) + a - x
if a == 0:
return laguerre(n, x)
if x == 0:
from sage.arith.all import binomial
return binomial(n+a, n)
def _pol_gen_laguerre(self, n, a, x):
"""
EXAMPLES::
sage: gen_laguerre(3, 1/2, sin(x))
-1/6*sin(x)^3 + 7/4*sin(x)^2 - 35/8*sin(x) + 35/16
sage: R.<x> = PolynomialRing(QQ, 'x')
sage: gen_laguerre(4, -1/2, x)
1/24*x^4 - 7/12*x^3 + 35/16*x^2 - 35/16*x + 35/128
sage: gen_laguerre(4, -1/2, x+1)
1/24*(x + 1)^4 - 7/12*(x + 1)^3 + 35/16*(x + 1)^2 - 35/16*x - 245/128
sage: gen_laguerre(10, 1, 1+I)
25189/2100*I + 11792/2835
"""
return sum(binomial(n + a, n - k) * (-1)**k / factorial(k) * x**k
for k in range(n + 1))
def _evalf_(self, n, a, x, **kwds):
"""
EXAMPLES::
sage: gen_laguerre(100,1,RealField(300)(pi))
-0.89430788373354541911...
sage: gen_laguerre(10,1/2,1.+I)
5.34469635574906 + 5.23754057922902*I
"""
the_parent = kwds.get('parent', None)
if the_parent is None:
the_parent = parent(x)
import mpmath
from sage.libs.mpmath.all import call as mpcall
return mpcall(mpmath.laguerre, n, a, x, parent=the_parent)
def _derivative_(self, n, a, x, diff_param):
"""
Return the derivative of `gen_laguerre(n,a,x)`.
EXAMPLES::
sage: (a,n)=var('a,n')
sage: diff(gen_laguerre(n,a,x), x)
-gen_laguerre(n - 1, a + 1, x)
sage: gen_laguerre(n,a,x).diff(a)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the second index is not supported.
TESTS::
sage: diff(gen_laguerre(n,a,x), n)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
"""
if diff_param == 0:
raise NotImplementedError("Derivative w.r.t. to the index is not supported.")
elif diff_param == 1:
raise NotImplementedError("Derivative w.r.t. to the second index is not supported.")
elif diff_param == 2:
return -gen_laguerre(n - 1, a + 1, x)
else:
raise ValueError("illegal differentiation parameter {}".format(diff_param))
gen_laguerre = Func_gen_laguerre()
| 32.13815
| 212
| 0.522367
|
d684d9fe58ab2fd4cee9a37c72fa9440f672f211
| 3,480
|
py
|
Python
|
src/python/nimbusml/ensemble/output_combiner/classifierweightedaverage.py
|
michaelgsharp/NimbusML
|
50031157265f49eec85d27fe67582d9ddaf01ef9
|
[
"MIT"
] | 134
|
2018-11-01T22:15:24.000Z
|
2019-05-04T11:30:08.000Z
|
src/python/nimbusml/ensemble/output_combiner/classifierweightedaverage.py
|
michaelgsharp/NimbusML
|
50031157265f49eec85d27fe67582d9ddaf01ef9
|
[
"MIT"
] | 226
|
2019-05-07T19:00:44.000Z
|
2021-01-06T07:59:48.000Z
|
src/python/nimbusml/ensemble/output_combiner/classifierweightedaverage.py
|
michaelgsharp/NimbusML
|
50031157265f49eec85d27fe67582d9ddaf01ef9
|
[
"MIT"
] | 43
|
2019-05-15T20:19:42.000Z
|
2022-03-30T10:26:07.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
ClassifierWeightedAverage
"""
__all__ = ["ClassifierWeightedAverage"]
from ...internal.core.ensemble.output_combiner.classifierweightedaverage import \
ClassifierWeightedAverage as core
from ...internal.utils.utils import trace
class ClassifierWeightedAverage(core):
"""
**Description**
Computes the weighted average of the outputs of the trained models
:param weightage_name: the metric type to be used to find the weights for
each model. Can be ``"AccuracyMicroAvg"`` or ``"AccuracyMacroAvg"``.
:param normalize: Specifies the type of automatic normalization used:
* ``"Auto"``: if normalization is needed, it is performed
automatically. This is the default choice.
* ``"No"``: no normalization is performed.
* ``"Yes"``: normalization is performed.
* ``"Warn"``: if normalization is needed, a warning
message is displayed, but normalization is not performed.
Normalization rescales disparate data ranges to a standard scale.
Feature
scaling ensures the distances between data points are proportional
and
enables various optimization methods such as gradient descent to
converge
much faster. If normalization is performed, a ``MinMax`` normalizer
is
used. It normalizes values in an interval [a, b] where ``-1 <= a <=
0``
and ``0 <= b <= 1`` and ``b - a = 1``. This normalizer preserves
sparsity by mapping zero to zero.
:param params: Additional arguments sent to compute engine.
.. seealso::
:py:class:`EnsembleClassifier
<nimbusml.ensemble.EnsembleClassifier>`
* Submodel selectors:
:py:class:`ClassifierAllSelector
<nimbusml.ensemble.sub_model_selector.ClassifierAllSelector>`,
:py:class:`ClassifierBestDiverseSelector
<nimbusml.ensemble.sub_model_selector.ClassifierBestDiverseSelector>`,
:py:class:`ClassifierBestPerformanceSelector
<nimbusml.ensemble.sub_model_selector.ClassifierBestPerformanceSelector>`
* Output combiners:
:py:class:`ClassifierAverage
<nimbusml.ensemble.output_combiner.ClassifierAverage>`,
:py:class:`ClassifierMedian
<nimbusml.ensemble.output_combiner.ClassifierMedian>`,
:py:class:`ClassifierStacking
<nimbusml.ensemble.output_combiner.ClassifierStacking>`,
:py:class:`ClassifierVoting
<nimbusml.ensemble.output_combiner.ClassifierVoting>`
.. index:: models, ensemble, classification
Example:
.. literalinclude:: /../nimbusml/examples/EnsembleClassifier.py
:language: python
"""
@trace
def __init__(
self,
weightage_name='AccuracyMicroAvg',
normalize=True,
**params):
core.__init__(
self,
weightage_name=weightage_name,
normalize=normalize,
**params)
def get_params(self, deep=False):
"""
Get the parameters for this operator.
"""
return core.get_params(self)
| 35.151515
| 94
| 0.628736
|
9e62f529cf80a194ea5dcf5f81d5aa89d251b971
| 967
|
py
|
Python
|
ucsmsdk/methodmeta/ApeCreateHVVnicMeta.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 78
|
2015-11-30T14:10:05.000Z
|
2022-02-13T00:29:08.000Z
|
ucsmsdk/methodmeta/ApeCreateHVVnicMeta.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 113
|
2015-11-20T09:42:46.000Z
|
2022-03-16T16:53:29.000Z
|
ucsmsdk/methodmeta/ApeCreateHVVnicMeta.py
|
Kego/ucsmsdk
|
244f283a5c295cf746110bb96686d079b19927ce
|
[
"Apache-2.0"
] | 86
|
2015-12-12T08:22:18.000Z
|
2022-01-23T03:56:34.000Z
|
"""This module contains the meta information of ApeCreateHVVnic ExternalMethod."""
from ..ucscoremeta import MethodMeta, MethodPropertyMeta
method_meta = MethodMeta("ApeCreateHVVnic", "apeCreateHVVnic", "Version142b")
prop_meta = {
"cookie": MethodPropertyMeta("Cookie", "cookie", "Xs:string", "Version142b", "InputOutput", False),
"in_blade_slot_id": MethodPropertyMeta("InBladeSlotId", "inBladeSlotId", "Xs:unsignedInt", "Version142b", "Input", False),
"in_chassis_id": MethodPropertyMeta("InChassisId", "inChassisId", "Xs:unsignedInt", "Version142b", "Input", False),
"in_config": MethodPropertyMeta("InConfig", "inConfig", "ConfigConfig", "Version142b", "Input", True),
"in_sw_id": MethodPropertyMeta("InSwId", "inSwId", "Xs:string", "Version142b", "Input", False),
}
prop_map = {
"cookie": "cookie",
"inBladeSlotId": "in_blade_slot_id",
"inChassisId": "in_chassis_id",
"inConfig": "in_config",
"inSwId": "in_sw_id",
}
| 43.954545
| 126
| 0.708376
|
6aecc6ca8042ae1d0875e15dd45cbe5eced1937e
| 3,722
|
py
|
Python
|
JumpscaleBuildersExtra/db/BuilderPostgresql.py
|
threefoldtech/jumpscaleX_builders
|
ce17481c803acb66f512894b4978c7c70afef884
|
[
"Apache-2.0"
] | 1
|
2020-04-16T09:24:30.000Z
|
2020-04-16T09:24:30.000Z
|
JumpscaleBuildersExtra/db/BuilderPostgresql.py
|
threefoldtech/jumpscaleX_builders
|
ce17481c803acb66f512894b4978c7c70afef884
|
[
"Apache-2.0"
] | 52
|
2019-09-05T11:43:56.000Z
|
2020-05-04T11:31:34.000Z
|
JumpscaleBuildersExtra/db/BuilderPostgresql.py
|
threefoldtech/jumpscaleX_builders
|
ce17481c803acb66f512894b4978c7c70afef884
|
[
"Apache-2.0"
] | null | null | null |
from Jumpscale import j
builder_method = j.baseclasses.builder_method
import time
class BuilderPostgresql(j.baseclasses.builder):
__jslocation__ = "j.builders.db.psql"
def _init(self, **kwargs):
self.DOWNLOAD_DIR = self._joinpaths(self.DIR_BUILD, "build")
self.DATA_DIR = self._replace("{DIR_VAR}/psql/data")
self.SOCKET_DIR = "/var/run/postgresql"
@builder_method()
def build(self):
postgres_url = "https://ftp.postgresql.org/pub/source/v9.6.13/postgresql-9.6.13.tar.gz"
j.builders.tools.file_download(postgres_url, to=self.DOWNLOAD_DIR, overwrite=False, expand=True)
j.builders.system.package.ensure(["build-essential", "zlib1g-dev", "libreadline-dev", "sudo"])
cmd = self._replace(
"""
cd {DOWNLOAD_DIR}/postgresql-9.6.13
./configure --prefix={DIR_BASE}
make
"""
)
self._execute(cmd)
@builder_method()
def install(self, port=5432):
"""
kosmos 'j.builders.db.psql.install()'
kosmos 'j.builders.db.psql.stop()'
:param port:
:return:
"""
"""
:param port:
:return:
"""
cmd = self._replace(
"""
cd {DOWNLOAD_DIR}/postgresql-9.6.13
make install
"""
)
self._execute(cmd)
self._remove(self.DATA_DIR)
self.init()
def init(self, **kwargs):
if not self.tools.group_exists("postgres"):
self._execute(
'adduser --system --quiet --home {DIR_BASE} --no-create-home \
--shell /bin/bash --group --gecos "PostgreSQL administrator" postgres'
)
c = self._replace(
"""
cd {DIR_BASE}
mkdir -p log
mkdir -p {DATA_DIR} {SOCKET_DIR}
chown -R postgres {DATA_DIR} {SOCKET_DIR}
sudo -u postgres {DIR_BIN}/initdb -D {DATA_DIR} -E utf8 --locale=en_US.UTF-8
"""
)
self._execute(c)
@property
def startup_cmds(self):
if not self._exists("{DATA_DIR}"):
self.init()
# run the db with the same user when running odoo server
cmd = j.servers.startupcmd.get("postgres")
cmd.cmd_start = self._replace("sudo -u postgres {DIR_BIN}/postgres -k {SOCKET_DIR} -D {DATA_DIR}")
cmd.cmd_stop = self._replace("sudo -u postgres {DIR_BIN}/pg_ctl stop -D {DATA_DIR}")
cmd.ports = [5432]
cmd.path = j.core.tools.text_replace("{DIR_BASE}/bin")
return [cmd]
def test(self):
"""
kosmos 'j.builders.db.psql.test()'
:return:
"""
self.stop()
self.start()
_, response, _ = self._execute("pg_isready -h localhost -p 5432", showout=False)
assert "accepting connections" in response
self.stop()
print("TEST OK")
@builder_method()
def sandbox(self):
self.PACKAGE_DIR = self._replace("{DIR_SANDBOX}{DIR_BASE}")
self._dir_ensure(self.PACKAGE_DIR)
# data dir
self._dir_ensure("%s/apps/psql/data" % self.PACKAGE_DIR)
self._execute(
"""
cd {DOWNLOAD_DIR}/postgresql-9.6.13
make install DESTDIR={DIR_SANDBOX}
"""
)
bins_dir = self._replace("{PACKAGE_DIR}/bin")
j.tools.sandboxer.libs_clone_under(bins_dir, self.DIR_SANDBOX)
# startup.toml
templates_dir = self._joinpaths(j.sal.fs.getDirName(__file__), "templates")
startup_path = self._replace("{DIR_SANDBOX}/.startup.toml")
self._copy(self._joinpaths(templates_dir, "postgres_startup.toml"), startup_path)
| 30.508197
| 107
| 0.573348
|
d902ce52d84ef4699f35f66bd47e45a13ee56545
| 1,632
|
py
|
Python
|
torch_geometric/utils/to_dense_batch.py
|
DL-85/pytorch_geometric
|
eb12a94a667e881c4a6bff26b0453428bcb72393
|
[
"MIT"
] | 8
|
2020-06-03T00:55:09.000Z
|
2022-01-23T16:06:56.000Z
|
torch_geometric/utils/to_dense_batch.py
|
chentingpc/pytorch_geometric
|
44c4c5069dbc4c8a96761a3b5a7e7b45c8352a53
|
[
"MIT"
] | null | null | null |
torch_geometric/utils/to_dense_batch.py
|
chentingpc/pytorch_geometric
|
44c4c5069dbc4c8a96761a3b5a7e7b45c8352a53
|
[
"MIT"
] | 6
|
2020-06-03T00:55:11.000Z
|
2022-03-16T01:14:36.000Z
|
import torch
from torch_scatter import scatter_add
def to_dense_batch(x, batch, fill_value=0):
r"""Given a sparse batch of node features
:math:`\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}` (with
:math:`N_i` indicating the number of nodes in graph :math:`i`), creates a
dense node feature tensor
:math:`\mathbf{X} \in \mathbb{R}^{B \times N_{\max} \times F}` (with
:math:`N_{\max} = \max_i^B N_i`).
In addition, a second tensor holding
:math:`[N_1, \ldots, N_B] \in \mathbb{N}^B` is returned.
Args:
x (Tensor): Node feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}`.
batch (LongTensor): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots,
B-1\}}^N`, which assigns each node to a specific example.
fill_value (float, optional): The value for invalid entries in the
resulting dense output tensor. (default: :obj:`0`)
:rtype: (:class:`Tensor`, :class:`LongTensor`)
"""
num_nodes = scatter_add(batch.new_ones(x.size(0)), batch, dim=0)
batch_size, max_num_nodes = num_nodes.size(0), num_nodes.max().item()
cum_nodes = torch.cat([batch.new_zeros(1), num_nodes.cumsum(dim=0)], dim=0)
index = torch.arange(batch.size(0), dtype=torch.long, device=x.device)
index = (index - cum_nodes[batch]) + (batch * max_num_nodes)
size = [batch_size * max_num_nodes] + list(x.size())[1:]
batch_x = x.new_full(size, fill_value)
batch_x[index] = x
size = [batch_size, max_num_nodes] + list(x.size())[1:]
batch_x = batch_x.view(size)
return batch_x, num_nodes
| 41.846154
| 79
| 0.634191
|
ba081391a3a0d0723e3225b723e08441b833bea8
| 36,682
|
py
|
Python
|
tf_verify/onnx_translator.py
|
cherrywoods/eran
|
c12c56c9be86da0e25ed04370b889d162a3d74cc
|
[
"Apache-2.0"
] | null | null | null |
tf_verify/onnx_translator.py
|
cherrywoods/eran
|
c12c56c9be86da0e25ed04370b889d162a3d74cc
|
[
"Apache-2.0"
] | null | null | null |
tf_verify/onnx_translator.py
|
cherrywoods/eran
|
c12c56c9be86da0e25ed04370b889d162a3d74cc
|
[
"Apache-2.0"
] | 1
|
2021-09-13T02:45:45.000Z
|
2021-09-13T02:45:45.000Z
|
# This file has been modified from the original file with the same name
# from the source licensed at the terms below.
# Modifications: formatting
"""
Copyright 2020 ETH Zurich, Secure, Reliable, and Intelligent Systems Lab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import onnx
from onnx import numpy_helper
from config import config
import warnings
def onnxshape_to_intlist(onnxshape):
"""
ONNX has its own wrapper for shapes. Our optimizer expects a list of ints.
Arguments
---------
onnxshape : TensorShapeProto
Return
------
output : list
list of ints corresponding to onnxshape
"""
result = list(map(lambda j: 1 if j.dim_value is None else int(j.dim_value), onnxshape.dim))
# No shape means a single value
if not result:
return [1]
# convert NCHW to NHWC
if len(result) == 4:
return [result[0], result[2], result[3], result[1]]
return result
def nchw_to_nhwc_shape(shape):
"""
Reorders dimensions of a 1D array from NCHW to NHWC, since ONNX uses NCHW, ELINA expects NHWC.
:param index: the array to be converted
:return: converted array
"""
assert len(shape) == 4, "Unexpected shape size"
return [shape[0], shape[2], shape[3], shape[1]]
def nchw_to_nhwc_index(index: int) -> int:
"""
Converts an single index from NCHW to NHWC, since ONNX uses NCHW, ELINA expects NHWC,
:param index: the index to be converted
:return: converted index
"""
assert 0 <= index <= 3, f"index out of range: {index}"
if index == 0: # batch (N)
return 0
elif index == 1: # channel (C)
return 3
else:
return index - 1
def nchw_to_nhwc(array):
"""
ONNX uses NCHW. ELINA expects NHWC
:param array: array to be converted
:return: converted array
"""
if array.ndim == 4:
return array.transpose(0, 2, 3, 1)
return array
def reshape_nhwc(shape_in, shape_out):
#print(shape_in, shape_out)
ndim_in = len(shape_in)
ndim_out = len(shape_out)
total_in = np.prod(shape_in[1:ndim_in])
total_out = np.prod(shape_out[1:ndim_out])
assert total_in == total_out, "Reshape doesn't have same number of neurons before and after"
array = np.asarray(range(total_in)).reshape(shape_in[1:ndim_in])
if array.ndim == 3:
array = array.transpose((2, 0, 1))
array = array.reshape(shape_out[1:ndim_out])
if array.ndim == 3:
return array.transpose((1, 2, 0))
else:
return array
def prepare_model(model):
"""
The constructor has produced a graph_def with the help of the functions graph_util.convert_variables_to_constants and graph_util.remove_training_nodes.
translate() takes that graph_def, imports it, and translates it into two lists which then can be processed by an Optimzer object.
Return
------
(operation_types, operation_resources) : (list, list)
A tuple with two lists, the first one has items of type str and the second one of type dict. In the first list the operation types are stored (like "Add", "MatMul", etc.).
In the second list we store the resources (matrices, biases, etc.) for those operations. It is organised as follows: operation_resources[i][domain] has the resources related to
operation_types[i] when analyzed with domain (domain is currently either 'deepzono' or 'deeppoly', as of 8/30/18)
"""
shape_map = {}
constants_map = {}
output_node_map = {}
input_node_map = {}
for initial in model.graph.initializer:
const = nchw_to_nhwc(numpy_helper.to_array(initial)).copy()
constants_map[initial.name] = const
shape_map[initial.name] = const.shape
placeholdernames = []
#print("graph ", model.graph.node)
for node_input in model.graph.input:
placeholdernames.append(node_input.name)
if node_input.name not in shape_map:
shape_map[node_input.name] = onnxshape_to_intlist(node_input.type.tensor_type.shape)
input_node_map[node_input.name] = node_input
for node in model.graph.node:
#print(node.op_type)
output_node_map[node.output[0]] = node
for node_input in node.input:
input_node_map[node_input] = node
if node.op_type == "Flatten":
#shape_map[node.output[0]] = shape_map[node.input[0]]
shape_map[node.output[0]] = [1,] + [np.prod(shape_map[node.input[0]][1:]),]
elif node.op_type == "Constant":
const = node.attribute
const = nchw_to_nhwc(numpy_helper.to_array(const[0].t)).copy()
constants_map[node.output[0]] = const
shape_map[node.output[0]] = const.shape
elif node.op_type in ["MatMul", "Gemm"]:
transA = 0
transB = 0
for attribute in node.attribute:
if 'transA' == attribute.name:
transA = attribute.i
elif 'transB' == attribute.name:
transB = attribute.i
input_shape_A = ([1] if len(shape_map[node.input[0]])==1 else []) + list(shape_map[node.input[0]])
input_shape_B = list(shape_map[node.input[1]]) + ([1] if len(shape_map[node.input[1]])==1 else [])
M = input_shape_A[transA]
N = input_shape_B[1 - transB]
shape_map[node.output[0]] = [M, N]
elif node.op_type in ["Add", "Sub", "Mul", "Div"]:
shape_map[node.output[0]] = shape_map[node.input[0]]
if node.input[0] in constants_map and node.input[1] in constants_map:
if node.op_type == "Add":
result = np.add(constants_map[node.input[0]], constants_map[node.input[1]])
elif node.op_type == "Sub":
result = np.subtract(constants_map[node.input[0]], constants_map[node.input[1]])
elif node.op_type == "Mul":
result = np.multiply(constants_map[node.input[0]], constants_map[node.input[1]])
elif node.op_type == "Div":
result = np.divide(constants_map[node.input[0]], constants_map[node.input[1]])
constants_map[node.output[0]] = result
elif node.op_type in ["Conv", "MaxPool", "AveragePool"]:
output_shape = []
input_shape = shape_map[node.input[0]]
require_kernel_shape = node.op_type in ["MaxPool", "AveragePool"]
if not require_kernel_shape:
filter_shape = shape_map[node.input[1]]
kernel_shape = filter_shape[1:-1]
strides = [1, 1]
padding = [0, 0, 0, 0]
auto_pad = 'NOTSET'
dilations = [1, 1]
group = 1
ceil_mode = 0
for attribute in node.attribute:
if attribute.name == 'strides':
strides = attribute.ints
elif attribute.name == 'pads':
padding = attribute.ints
elif attribute.name == 'auto_pad':
auto_pad = attribute.s
elif attribute.name == 'kernel_shape':
kernel_shape = attribute.ints
elif attribute.name == 'dilations':
dilations = attribute.ints
elif attribute.name == 'group':
group = attribute.i
elif attribute.name == 'ceil_mode':
ceil_mode = attribute.i
effective_kernel_shape = [(kernel_shape[i] - 1) * dilations[i] + 1 for i in range(len(kernel_shape))]
output_shape.append(input_shape[0])
for i in range(len(kernel_shape)):
effective_input_size = input_shape[1 + i]
effective_input_size += padding[i]
effective_input_size += padding[i + len(kernel_shape)]
if ceil_mode == 1:
strided_kernel_positions = int(np.ceil((effective_input_size - effective_kernel_shape[i]) / float(strides[i])))
else:
strided_kernel_positions = int(np.floor((effective_input_size - effective_kernel_shape[i]) / strides[i]))
output_shape.append(1 + strided_kernel_positions)
if require_kernel_shape:
output_shape.append(input_shape[3])
else:
output_shape.append(filter_shape[0])
shape_map[node.output[0]] = output_shape
elif node.op_type in ["Relu", "Sigmoid", "Tanh", "Softmax", "BatchNormalization", "LeakyRelu"]:
shape_map[node.output[0]] = shape_map[node.input[0]]
# Gather is for the moment solely for shapes
elif node.op_type == "Gather":
axis = 0
for attribute in node.attribute:
axis = attribute.i
if node.input[0] in constants_map and node.input[1] in constants_map:
data = constants_map[node.input[0]]
indexes = constants_map[node.input[1]]
constants_map[node.output[0]] = np.take(data, indexes, axis)
if node.input[0] in shape_map and node.input[1] in shape_map:
r = len(shape_map[node.input[0]])
q = len(shape_map[node.input[1]])
out_rank = q + r - 1
if out_rank == 0:
shape_map[node.output[0]] = shape_map[node.input[1]]
else:
output_shape = []
for i in range(out_rank):
if i < axis:
output_shape.append(shape_map[node.input[0]][i]) # i < axis < r
elif i >= axis and i < axis + q:
output_shape.append(shape_map[node.input[0]][i-axis]) # i - axis < q
else:
output_shape.append(shape_map[node.input[0]][i - q + 1]) # i < out_rank < q + r - 1
shape_map[node.output[0]] = output_shape
elif node.op_type == "Shape":
if node.input[0] in shape_map:
constants_map[node.output[0]] = shape_map[node.input[0]]
shape_map[node.output[0]] = [len(shape_map[node.input[0]])]
#elif node.op_type == "Cast":
#shape_map[node.output[0]] = shape_map[node.input[0]]
#print("CASTING ", node.input[0], shape_map[node.input[0]], shape_map[node.output[0]])
elif node.op_type == "Reshape":
#print("RESHAPE ", node.input, node.output)
if node.input[1] in constants_map:
total = 1
replace_index = -1
for index in range(len(constants_map[node.input[1]])):
if constants_map[node.input[1]][index] == -1:
replace_index = index
else:
total *= constants_map[node.input[1]][index]
if replace_index != -1:
constants_map[node.input[1]][replace_index] = np.prod(shape_map[node.input[0]]) / total
if len(constants_map[node.input[1]]) == 4:
shape_map[node.output[0]] = [constants_map[node.input[1]][0], constants_map[node.input[1]][2], constants_map[node.input[1]][3], constants_map[node.input[1]][1]]
else:
shape_map[node.output[0]] = constants_map[node.input[1]]
elif node.op_type == "Unsqueeze":
if node.input[0] in shape_map:
axis = node.attribute[0].ints
output_shape = list(shape_map[node.input[0]])
if node.input[0] in constants_map:
constants_map[node.output[0]] = constants_map[node.input[0]]
for i in axis:
output_shape.insert(i, 1)
if node.input[0] in constants_map:
constants_map[node.output[0]] = np.expand_dims(constants_map[node.output[0]], axis=i)
shape_map[node.output[0]] = output_shape
elif node.op_type == "Concat":
all_constant = True
n_dim = len(shape_map[node.input[0]])
if n_dim > 2:
axis = nchw_to_nhwc_index(node.attribute[0].i)
else:
axis = node.attribute[0].i
for node_input in node.input:
if not node_input in constants_map:
all_constant = False
break
if all_constant:
constants_map[node.output[0]] = np.concatenate([constants_map[input] for input in node.input], axis=axis)
all_shape_known = True
for node_input in node.input:
if not node_input in shape_map:
all_shape_known = False
break
assert all_shape_known, "Unknown shape for at least one node input!"
new_axis_size = 0
for node_input in node.input:
new_axis_size += shape_map[node_input][axis]
shape_map[node.output[0]] = [shape_map[node.input[0]][i] if i != axis else new_axis_size for i in range(len(shape_map[node.input[0]]))]
if not all_constant:
assert axis == n_dim-1, "ELINA currently only supports concatenation on the channel dimension"
elif node.op_type == "Tile":
repeats = nchw_to_nhwc_shape(constants_map[node.input[1]])
input_shape = list(shape_map[node.input[0]])
assert len(repeats) == len(input_shape), "Expecting one repeat factor per dimension"
output_shape = [factor * size for factor, size in zip(repeats, input_shape)]
shape_map[node.output[0]] = output_shape
repeat_index = np.where(np.array(repeats) != 1)[0]
assert len(repeat_index) == 1, "ELINA backend currently only supports repeats for one dimension"
repeat_index = repeat_index.item()
assert repeat_index == 1, "ELINA backend currently only supports repeats for the first dimension"
assert input_shape[0] == 1, "ELINA backend currently only supports repeats for dimensions of size 1"
elif node.op_type == "Expand":
if node.input[1] in constants_map:
if len(constants_map[node.input[1]]) == 4:
shape_map[node.output[0]] = [constants_map[node.input[1]][0], constants_map[node.input[1]][2], constants_map[node.input[1]][3], constants_map[node.input[1]][1]]
else:
shape_map[node.output[0]] = constants_map[node.input[1]]
result = np.zeros(shape_map[node.output[0]]) + constants_map[node.input[0]]
constants_map[node.output[0]] = result
elif node.op_type == "Pad":
input_shape = np.array(shape_map[node.input[0]])
for attribute in node.attribute:
if attribute.name == "pads":
padding = np.array(attribute.ints)
if attribute.name == "mode":
assert attribute.s == bytes(b'constant'), "only zero padding supported"
if attribute.name == "value":
assert attribute.f == 0, "only zero padding supported"
output_shape = np.copy(input_shape)
input_dim = len(input_shape)
assert len(padding) == 2* input_dim
for i in range(2,input_dim): # only pad spatial dimensions
output_shape[i-1] += padding[i]+padding[i+input_dim]
shape_map[node.output[0]] = list(output_shape)
else:
assert 0, f"Operations of type {node.op_type} are not yet supported."
#print('const_map')
#print(constants_map)
#print('shape_map')
#print(shape_map)
return shape_map, constants_map, output_node_map, input_node_map, placeholdernames
class ONNXTranslator:
"""
This class is used to turn a ONNX model into two lists that then can be processed by an Optimizer object
"""
def __init__(self, model, is_gpupoly):
"""
This constructor takes a reference to a ONNX Model and checks model, infers intermediate shapes and sets up maps from name to type and node or constant value
graph_util.convert_variables_to_constants and graph_util.remove_training_nodes to cleanse the graph of any nodes that are linked to training. This leaves us with
the nodes you need for inference.
In the resulting graph there should only be tf.Operations left that have one of the following types [Const, MatMul, Add, BiasAdd, Conv2D, Reshape, MaxPool, AveragePool, Placeholder, Relu, Sigmoid, Tanh, LeakyRelu]
If the input should be a Keras model we will ignore operations with type Pack, Shape, StridedSlice, and Prod such that the Flatten layer can be used.
Arguments
---------
model : onnx.ModelProto
"""
if issubclass(model.__class__, onnx.ModelProto):
onnx.checker.check_model(model)
self.model = model
self.nodes = self.model.graph.node
self.is_gpupoly = is_gpupoly
self.shape_map, self.constants_map, self.output_node_map, self.input_node_map, self.placeholdernames = prepare_model(model)
else:
assert 0, 'not onnx model'
def find_input(self):
inputs_dir = {x.name: x for x in self.model.graph.input}
all_inputs = [x for y in self.nodes for x in y.input]
[all_inputs.remove(x) for y in self.nodes for x in y.output if x in all_inputs]
[all_inputs.remove(x.name) for x in self.model.graph.initializer if x.name in all_inputs]
assert all_inputs[0] in inputs_dir
return inputs_dir[all_inputs[0]]
@staticmethod
def clean_shape(shape_raw):
'''
Onnx translator expects the inputs and outputs of each node to not have 0-sized dimensions.
These can occur, if other formats are converted to onnx instead of directly exporting an onnx model.
This function handles such occurances, setting the 0-sized dimension to 1.
Arguments
--------
shape_raw : A shape in form of a list
'''
shape_cleaned = [1 if x == 0 else x for x in shape_raw]
if 0 in shape_raw:
warnings.warn(f"0-sized dimension encountered: {shape_raw} and changed to: {shape_cleaned}",RuntimeWarning)
return shape_cleaned
def translate(self):
"""
The constructor has produced a graph_def with the help of the functions graph_util.convert_variables_to_constants and graph_util.remove_training_nodes.
translate() takes that graph_def, imports it, and translates it into two lists which then can be processed by an Optimzer object.
Return
------
(operation_types, operation_resources) : (list, list)
A tuple with two lists, the first one has items of type str and the second one of type dict. In the first list the operation types are stored (like "Add", "MatMul", etc.).
In the second list we store the resources (matrices, biases, etc.) for those operations. It is organised as follows: operation_resources[i][domain] has the resources related to
operation_types[i] when analyzed with domain (domain is currently either 'deepzono' or 'deeppoly', as of 8/30/18)
"""
operation_types = ["Placeholder"]
# placeholder = self.model.graph.input[0]
placeholder = self.find_input()
in_out_placeholder = ([], placeholder.name, self.clean_shape(onnxshape_to_intlist(placeholder.type.tensor_type.shape)))
operation_resources = [{'deepzono':in_out_placeholder, 'deeppoly':in_out_placeholder}]
reshape_map = {}
operations_to_be_ignored = ["Pack", "Shape", "StridedSlice", "Prod", "Unsqueeze", "Softmax", "Concat", "Flatten", "BatchNormalization"]
padding_merger_dict = {}
### Check if there Are Add/Sub and Div/Mul layers that can be interpreted as normalization layer
stop_norm_layers = ["MatMul","Gemm","Conv","MaxPool","Relu","Sigmoid","Tanh","LeakyRelu"]
stop_norm_layer = len(self.nodes)
extract_mean = False
extract_std = False
for node_idx, node in enumerate(self.nodes):
if node.op_type in stop_norm_layers or (extract_mean and extract_std):
stop_norm_layer = node_idx
break
if node.op_type in ["Add","Sub"]:
extract_mean = True
elif node.op_type in ["Div", "Mul"]:
extract_std = True
extract_norm = extract_std and extract_mean
for node_idx, node in enumerate(self.nodes):
# print("node ", node.op_type)
if node.op_type == "Constant":
continue
elif node.op_type in operations_to_be_ignored:
input_name = node.input[0]
output_name = node.output[0]
if input_name in reshape_map:
reshape_map[output_name] = reshape_map[input_name]
else:
reshape_map[output_name] = input_name
continue
operation_types.append(node.op_type)
# take means and stds out of the network
if extract_norm and node_idx <= stop_norm_layer and len(operation_types) == 2 and node.op_type in ["Add", "Sub", "Mul", "Div"] and node.output[0] not in self.constants_map:
constant = self.add_resources(node)[0].reshape(-1)
if node.op_type == "Add":
config.mean = np.multiply(constant, -1)
print(f"Mean of {config.mean} extracted from network")
elif node.op_type == "Sub":
config.mean = constant
print(f"Mean of {config.mean} extracted from network")
elif node.op_type == "Mul":
config.std = np.divide(1, constant)
print(f"Std of {config.std} extracted from network")
elif node.op_type == "Div":
config.std = constant
print(f"Std of {config.std} extracted from network")
self.ignore_node(node, operation_types, reshape_map)
continue
input_onnx_names = []
for name in node.input:
kind = self.get_kind(name)
if name in reshape_map:
name = reshape_map[name]
if kind == 'Constant':
continue
input_onnx_names.append(name)
shape = self.get_shape(node.output[0])
shape = self.clean_shape(shape)
in_out_info = (input_onnx_names, node.output[0], shape)
if node.op_type == "MatMul":
deeppoly_res = self.matmul_resources(node) + in_out_info
deepzono_res = deeppoly_res
operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})
elif node.op_type == "Gemm":
deeppoly_res = self.gemm_resources(node) + in_out_info
deepzono_res = deeppoly_res
operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})
elif node.op_type in ["Add", "Mul"]:
left_type = self.get_kind(node.input[0])
right_type = self.get_kind(node.input[1])
if left_type == 'Constant' and right_type == 'Constant':
operation_types.pop()
elif left_type == 'Constant' or right_type == 'Constant':
deeppoly_res = self.add_resources(node) + in_out_info
deepzono_res = deeppoly_res
operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})
else:
if node.op_type != "Add":
assert 0, "we don't support residual operations other then add"
operation_types[-1] = "Resadd"
operation_resources.append({'deepzono':in_out_info, 'deeppoly':in_out_info})
elif node.op_type == "Sub":
left_type = self.get_kind(node.input[0])
right_type = self.get_kind(node.input[1])
if left_type == 'Constant' and right_type == 'Constant':
assert 0, "we don't support the subraction of two constants yet"
elif left_type == 'Constant' or right_type == 'Constant':
deeppoly_res = self.sub_resources(node) + in_out_info
deepzono_res = deeppoly_res
operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})
else:
assert 0, "we don't support the ressub yet"
operation_types[-1] = "Ressub"
operation_resources.append({'deepzono':in_out_info, 'deeppoly':in_out_info})
elif node.op_type == "Conv":
filters, bias, image_shape, strides, pad_top, pad_left, pad_bottom, pad_right, kernel_shape = self.conv_resources(node)
if node.name in padding_merger_dict:
image_shape, in_out_info, pad_top, pad_left, pad_bottom, pad_right = self.merge_padding(node, padding_merger_dict, in_out_info, pad_top, pad_left, pad_bottom, pad_right)
deeppoly_res = (filters, bias, image_shape, strides, pad_top, pad_left, pad_bottom, pad_right) + in_out_info
deepzono_res = deeppoly_res
operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})
elif node.op_type == "Pad":
image_shape, pad_top, pad_left, pad_bottom, pad_right = self.pad_resources(node)
deeppoly_res = (image_shape, pad_top, pad_left, pad_bottom, pad_right) + in_out_info
deepzono_res = deeppoly_res
consequent_nodes = [node_i for node_i in self.nodes if node.output[0] in node_i.input]
can_be_merged = all([node_i.op_type in ["Conv"] for node_i in consequent_nodes])
if can_be_merged:
padding_merger_dict.update({node_i.name: deeppoly_res for node_i in consequent_nodes})
self.ignore_node(node, operation_types, reshape_map)
else:
operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})
elif node.op_type == "MaxPool" or node.op_type == "AveragePool":
image_shape, kernel_shape, strides, padding, dilations, pad_top, pad_left, pad_bottom, pad_right, ceil_mode, storage_order = self.pool_resources(node)
if node.name in padding_merger_dict:
image_shape, in_out_info, pad_top, pad_left, pad_bottom, pad_right = self.merge_padding(node, padding_merger_dict, in_out_info, pad_top, pad_left, pad_bottom, pad_right)
deeppoly_res = (image_shape, kernel_shape, strides, pad_top, pad_left, pad_bottom, pad_right) + in_out_info
# TODO padding is expected to be string in tf. dilations, auto_pad, ceil_mode, storage_order are unused at the moment
deepzono_res = deeppoly_res
operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})
elif node.op_type == "Placeholder":
assert 0, "Placeholder is not in the ONNX graph"
elif node.op_type in ["Relu", "Sigmoid", "Tanh", "LeakyRelu"]:
deeppoly_res = self.nonlinearity_resources(node) + in_out_info
deepzono_res = deeppoly_res
operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})
# Gather is for the moment solely for shapes
elif node.op_type == "Gather":
only_shape, image_shape, indexes, axis = self.gather_resources(node)
if only_shape:
self.ignore_node(node, operation_types, reshape_map)
else:
deeppoly_res = (image_shape, indexes, axis) + in_out_info
deepzono_res = deeppoly_res
operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})
elif node.op_type == "Expand":
only_shape, image_shape, to_expand = self.expand_resources(node)
if only_shape:
operation_types.pop()
else:
deeppoly_res = (image_shape, indexes, axis) + in_out_info
deepzono_res = deeppoly_res
operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})
elif node.op_type == "Reshape":
if node.output[0] in self.input_node_map and self.input_node_map[node.output[0]].op_type in ["MatMul", "Gemm"]:
self.ignore_node(node, operation_types, reshape_map)
elif node.output[0] in self.input_node_map and self.input_node_map[node.output[0]].op_type in ["Relu", "Sigmoid", "Tanh", "LeakyRelu"] and self.input_node_map[self.input_node_map[node.output[0]].output[0]].op_type == "Reshape":
# ignore this reshape even in the shape_map
self.shape_map[node.output[0]] = self.shape_map[node.input[0]]
self.shape_map[self.input_node_map[node.output[0]].output[0]] = self.shape_map[node.input[0]]
self.ignore_node(node, operation_types, reshape_map)
else:
shape_in = self.get_shape(node.input[0])
shape_out = self.get_shape(node.output[0])
if len(shape_in) == 2 and len(shape_out) == 2:
self.ignore_node(node, operation_types, reshape_map)
else:
indexes = reshape_nhwc(shape_in, shape_out)
deeppoly_res = (indexes,) + in_out_info
deepzono_res = deeppoly_res
operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})
elif node.op_type == "Concat":
n_dim = len(self.shape_map[node.input[0]])
if n_dim > 2:
axis = nchw_to_nhwc_index(node.attribute[0].i)
else:
axis = node.attribute[0].i
assert axis == n_dim - 1, "ELINA backend currently only supports concatenation on the channel dimension"
channels = []
for input_node in node.input:
channels.append(self.get_shape(input_node)[axis])
# width = shape[1]
# height = shape[2]
operation_resources.append({'deeppoly': (channels,) + in_out_info})
elif node.op_type == "Tile":
repeats = nchw_to_nhwc_shape(self.constants_map[node.input[1]])
repeat_factor = repeats[repeats != 1].item()
operation_resources.append({'deeppoly': (repeat_factor,) + in_out_info})
else:
assert 0, "Operations of type " + node.op_type + " are not yet supported."
assert all([0 not in y[-1] for x in operation_resources for y in x.values()]), "Ensure inputs and outpus include no dimensions of size 0"
return operation_types, operation_resources
def ignore_node(self, node, operation_types, reshape_map):
operation_types.pop()
input_name = node.input[0]
#print("ignore ", len(node.input), reshape_map)
output_name = node.output[0]
if input_name in reshape_map:
reshape_map[output_name] = reshape_map[input_name]
else:
reshape_map[output_name] = input_name
def merge_padding(self, node, padding_merger_dict, in_out_info, pad_top, pad_left, pad_bottom, pad_right):
image_shape, m_pad_top, m_pad_left, m_pad_bottom, m_pad_right, input_node, _, _ = padding_merger_dict[node.name]
in_out_info = (input_node, in_out_info[1], in_out_info[2])
pad_top += m_pad_top
pad_left += m_pad_left
pad_bottom += m_pad_bottom
pad_right += m_pad_right
return image_shape, in_out_info, pad_top, pad_left, pad_bottom, pad_right
def get_kind(self, name):
if name in self.constants_map:
kind = 'Constant'
elif name in self.placeholdernames:
kind = 'Placeholder'
else:
kind = self.output_node_map[name].op_type
return kind
def get_shape(self, name):
if name in self.shape_map:
return self.shape_map[name]
def matmul_resources(self, node):
"""
checks which one of the direct ancestor tf.Operations is a constant and returns the underlying onnx as a numpy.ndarray inside a tuple. The matrix is manipulated in a way that it can be
used as the left multiplier in the matrix multiplication.
Arguments
---------
node : ONNX.Node
must have op_type "MatMul"
Return
------
output : tuple
tuple with the matrix (of type numpy.ndarray) as its only item
"""
inputs = node.input
left = inputs[0]
right = inputs[1]
if left in self.constants_map:
matrix = self.constants_map[left]
matrix = self.reshape_adjust(right, matrix, True)
else:
matrix = self.constants_map[right].transpose()
matrix = self.reshape_adjust(left, matrix)
return matrix,
def reshape_adjust(self, element, matrix, is_right=False):
if self.get_kind(element) in ['Reshape', 'Flatten'] and not self.is_gpupoly: #TODO check whether it should be triggered for Flatten layers to
shape_in = self.get_shape(self.output_node_map[element].input[0])
shape_out = self.get_shape(self.output_node_map[element].output[0])
if config.debug:
print('reshape adjust ', str(shape_in), 'to', str(shape_out))
indexes = reshape_nhwc(shape_in, shape_out)
#indexes = indexes[0]
inverse_perm = np.arange(len(indexes))[np.argsort(indexes)]
if is_right:
matrix = matrix[inverse_perm, :]
else:
matrix = matrix[:, inverse_perm]
return matrix
def gemm_resources(self, node):
"""
checks which one of the direct ancestor tf.Operations is a constant and returns the underlying onnx as a numpy.ndarray inside a tuple. The matrix is manipulated in a way that it can be
used as the left multiplier in the matrix multiplication.
Arguments
---------
node : ONNX.Node
must have op_type "Gemm"
Return
------
output : tuple
tuple with the matrix and bias (of type numpy.ndarray) and is_left used to calculate the output shape
"""
inputs = node.input
left = inputs[0]
right = inputs[1]
bias = self.constants_map[inputs[2]]
transA = False
transB = False
alpha = 1.0
beta = 1.0
for att in node.attribute:
if 'transA' == att.name:
transA = att.i == 1
elif 'transB' == att.name:
transB = att.i == 1
elif 'alpha' == att.name:
alpha = att.f
elif 'beta' == att.name:
beta = att.f
else:
assert 0, "Unkown attribute " + att.name + " for operation type " + node.op_type
if left in self.constants_map:
matrix = self.constants_map[left] if not transA else self.constants_map[left].transpose()
matrix = self.reshape_adjust(right, matrix, True)
else:
matrix = self.constants_map[right].transpose() if not transB else self.constants_map[right]
matrix = self.reshape_adjust(left, matrix)
return matrix * alpha, bias * beta
def add_resources(self, node):
"""
checks which one of the direct ancestor tf.Operations is a constant and returns the underlying onnx as a numpy.ndarray inside a tuple.
Arguments
---------
node : ONNX.Node
must have op_type "Add"
Return
------
output : tuple
tuple with the addend (of type numpy.ndarray) as its only item
"""
inputs = node.input
left = inputs[0]
right = inputs[1]
if left in self.constants_map:
addend = self.constants_map[left]
else:
addend = self.constants_map[right]
return addend,
def sub_resources(self, node):
"""
checks which one of the direct ancestors is a constant and returns the underlying onnx as a numpy.ndarray and a bool is_minuend, whether the returned ndarray is the minuend, inside a tuple.
Arguments
---------
node : ONNX.Node
must have op_type "Sub"
Return
------
output : tuple
tuple with the addend (of type numpy.ndarray) and left_constant
"""
inputs = node.input
left = inputs[0]
right = inputs[1]
if left in self.constants_map:
addend = self.constants_map[left]
is_minuend = True
else:
addend = self.constants_map[right]
is_minuend = False
return addend, is_minuend
def conv_resources(self, node):
"""
Extracts the filter, the stride of the filter, and the padding from node as well as the shape of the input coming into node
Arguments
---------
node : ONNX.Node
must have op_type "Conv"
Return
------
output : tuple
has 4 entries (numpy.ndarray, numpy.ndarray, numpy.ndarray, str)
"""
inputs = node.input
image = inputs[0]
filters = self.constants_map[node.input[1]].transpose(1, 2, 3, 0)
if len(node.input) == 3:
bias = self.constants_map[node.input[2]]
else:
bias = np.zeros(filters.shape[3])
image_shape = self.get_shape(image)[1:]
pads = [0, 0, 0, 0]
for attribute in node.attribute:
if attribute.name == 'strides':
strides = attribute.ints
elif attribute.name == 'pads':
pads = attribute.ints
elif attribute.name == 'kernel_shape':
kernel_shape = attribute.ints
pad_top = pads[0]
pad_left = pads[1]
pad_bottom = pads[2]
pad_right = pads[3]
# assert pad_top == pad_bottom, 'different padding for top and bottom is not supported in ERAN'
# assert pad_left == pad_right, 'different padding for left and right is not supported in ERAN'
return filters, bias, image_shape, strides, pad_top, pad_left, pad_bottom, pad_right, kernel_shape
def pad_resources(self, node):
"""
Extracts the padding from node as well as the shape of the input coming into node
Arguments
---------
node : ONNX.Node
must have op_type "Pad"
Return
------
output : tuple
has 4 entries (numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray)
"""
inputs = node.input
image = inputs[0]
image_shape = self.get_shape(image)[1:]
pads = [0, 0, 0, 0]
for attribute in node.attribute:
if attribute.name == 'pads':
pads = attribute.ints
pad_top = pads[2]
pad_left = pads[3]
pad_bottom = pads[6]
pad_right = pads[7]
return image_shape, pad_top, pad_left, pad_bottom, pad_right
def pool_resources(self, node):
"""
Extracts the incoming image size (heigth, width, channels), the size of the maxpool/averagepool window (heigth, width), and the strides of the window (heigth, width)
Arguments
---------
node : ONNX.Node
must have op_type "MaxPool" or "AveragePool"
Return
------
output : tuple
has 4 entries - (list, numpy.ndarray, numpy.ndarray, numpy.ndarray, int, int, str)
"""
image = node.input[0]
image_shape = self.get_shape(image)[1:]
padding = 'NOTSET'
ceil_mode = 0
storage_order = 0
pads = [0, 0, 0, 0]
dilations = None
for attribute in node.attribute:
if attribute.name == 'kernel_shape':
kernel_shape = attribute.ints
if attribute.name == 'strides':
strides = attribute.ints
elif attribute.name == 'pads':
pads = attribute.ints
elif attribute.name == 'dilations':
dilations = attribute.ints
elif attribute.name == 'auto_pad':
padding = attribute.s
elif attribute.name == 'ceil_mode':
ceil_mode = attribute.i
elif attribute.name == 'storage_order':
storage_order = attribute.i
pad_top = pads[0]
pad_left = pads[1]
pad_bottom = pads[2]
pad_right = pads[3]
assert pad_top == pad_bottom, 'different padding for top and bottom is not supported in ERAN'
assert pad_left == pad_right, 'different padding for left and right is not supported in ERAN'
return image_shape, kernel_shape, strides, padding, dilations, pad_top, pad_left, pad_bottom, pad_right, ceil_mode, storage_order
def nonlinearity_resources(self, op):
"""
This function only outputs an empty tuple, to make the code look more consistent
Return
------
output : tuple
but is empty
"""
return ()
def gather_resources(self, node):
"""
Extracts the indexes in the image which have to be gathered.
Arguments
---------
node : ONNX.Node
must have op_type "Gather"
Return
------
output : tuple
has 4 entries - (list, numpy.ndarray, numpy.ndarray, numpy.ndarray, int, int, str)
"""
inputs = node.input
image = inputs[0]
if node.output[0] in self.constants_map:
only_shape = True
image_shape, indexes, axis = None, None, None
else:
only_shape = False
image_shape = self.get_shape(image)[1:]
indexes = self.constants_map[node.input[1]]
axis = node.attribute[0].i
return only_shape, image_shape, indexes, axis
def expand_resources(self, node):
if node.output[0] in self.constants_map:
only_shape = True
image_shape, to_expand = None, None
else:
assert 0, "Implementation for 'Expand' is missing."
return only_shape, image_shape, to_expand
| 37.20284
| 231
| 0.70326
|
92a1bf6a5ab8733d3c7c7a5662dfd0385ea89b80
| 889
|
py
|
Python
|
buildbuild/api/tests/test_api_user_list_search.py
|
buildbuild/buildbuild
|
ebf9e8abf7d5e018c6c5a8fc640d5cc0caaa43ac
|
[
"BSD-3-Clause"
] | 5
|
2015-07-22T08:13:45.000Z
|
2018-12-10T05:21:41.000Z
|
buildbuild/api/tests/test_api_user_list_search.py
|
buildbuild/buildbuild
|
ebf9e8abf7d5e018c6c5a8fc640d5cc0caaa43ac
|
[
"BSD-3-Clause"
] | 2
|
2020-06-05T18:39:48.000Z
|
2021-06-10T20:36:06.000Z
|
buildbuild/api/tests/test_api_user_list_search.py
|
buildbuild/buildbuild
|
ebf9e8abf7d5e018c6c5a8fc640d5cc0caaa43ac
|
[
"BSD-3-Clause"
] | 1
|
2015-07-22T08:13:49.000Z
|
2015-07-22T08:13:49.000Z
|
from django.test import TestCase
from django.test.client import Client
from users.models import User
class TestAPIUserListSearch(TestCase):
def setUp(self):
self.test_string = "test_string"
self.user_with_test_string = User.objects.create_user(
email="test_user_with_" + self.test_string + "@example.com",
password="test_password",
)
self.user_without_test_string = User.objects.create_user(
email="test_user@example.com",
password="test_password",
)
self.client = Client()
self.response = self.client.get("/api/users/?search=" + self.test_string)
def test_api_user_list_search_should_return_valid_result(self):
self.assertContains(self.response, self.user_with_test_string.email)
self.assertNotContains(self.response, self.user_without_test_string.email)
| 35.56
| 82
| 0.697413
|
ce598dcc9c7a257ea610930771a65de9a2cdcbf3
| 4,061
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/splom/_stream.py
|
eranws/plotly.py
|
5b0e8d3ccab55fe1a6e4ba123cfc9d718a9ffc5a
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/splom/_stream.py
|
eranws/plotly.py
|
5b0e8d3ccab55fe1a6e4ba123cfc9d718a9ffc5a
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/splom/_stream.py
|
eranws/plotly.py
|
5b0e8d3ccab55fe1a6e4ba123cfc9d718a9ffc5a
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "splom"
_path_str = "splom.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.splom.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.splom.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 29.007143
| 82
| 0.537799
|
570066141e2b5281e8044580b27857f1aaa2453d
| 8,663
|
py
|
Python
|
neutron/plugins/ml2/drivers/macvtap/agent/macvtap_neutron_agent.py
|
hashsos/hashcloudos-neutron
|
76ec5ca105043be6bf7220b5c5684190ddf14952
|
[
"Apache-2.0"
] | null | null | null |
neutron/plugins/ml2/drivers/macvtap/agent/macvtap_neutron_agent.py
|
hashsos/hashcloudos-neutron
|
76ec5ca105043be6bf7220b5c5684190ddf14952
|
[
"Apache-2.0"
] | null | null | null |
neutron/plugins/ml2/drivers/macvtap/agent/macvtap_neutron_agent.py
|
hashsos/hashcloudos-neutron
|
76ec5ca105043be6bf7220b5c5684190ddf14952
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from neutron_lib import constants
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from neutron.agent.linux import ip_lib
from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import topics
from neutron.conf.plugins.ml2.drivers import macvtap as config
from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb
from neutron.plugins.ml2.drivers.agent import _common_agent as ca
from neutron.plugins.ml2.drivers.macvtap import macvtap_common
LOG = logging.getLogger(__name__)
MACVTAP_AGENT_BINARY = "neutron-macvtap-agent"
MACVTAP_FS = "/sys/class/net/"
EXTENSION_DRIVER_TYPE = 'macvtap'
config.register_macvtap_opts()
class MacvtapRPCCallBack(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
amb.CommonAgentManagerRpcCallBackBase):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def network_delete(self, context, **kwargs):
LOG.debug("network_delete received")
network_id = kwargs.get('network_id')
if network_id not in self.network_map:
LOG.error("Network %s is not available.", network_id)
return
segment = self.network_map.get(network_id)
if segment and segment.network_type == constants.TYPE_VLAN:
if_mappings = self.agent.mgr.interface_mappings
vlan_device_name = macvtap_common.get_vlan_device_name(
if_mappings[segment.physical_network],
str(segment.segmentation_id))
ip_dev = ip_lib.IPDevice(vlan_device_name)
if ip_dev.exists():
LOG.debug("Delete %s", ip_dev.name)
ip_dev.link.delete()
else:
LOG.debug("Cannot delete vlan device %s; it does not exist",
vlan_device_name)
def port_update(self, context, **kwargs):
port = kwargs['port']
LOG.debug("port_update received for port %s ", port)
mac = port['mac_address']
# Put the device name in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.updated_devices.add(mac)
class MacvtapManager(amb.CommonAgentManagerBase):
def __init__(self, interface_mappings):
self.interface_mappings = interface_mappings
self.validate_interface_mappings()
self.mac_device_name_mappings = dict()
def validate_interface_mappings(self):
for physnet, interface in self.interface_mappings.items():
if not ip_lib.device_exists(interface):
LOG.error("Interface %(intf)s for physical network "
"%(net)s does not exist. Agent terminated!",
{'intf': interface, 'net': physnet})
sys.exit(1)
def ensure_port_admin_state(self, device, admin_state_up):
LOG.debug("Setting admin_state_up to %s for device %s",
admin_state_up, device)
dev = ip_lib.IPDevice(self.mac_device_name_mappings[device])
if admin_state_up:
dev.link.set_up()
else:
dev.link.set_down()
def get_agent_configurations(self):
return {'interface_mappings': self.interface_mappings}
def get_agent_id(self):
devices = ip_lib.IPWrapper().get_devices(True)
if devices:
mac = ip_lib.get_device_mac(devices[0].name)
return 'macvtap%s' % mac.replace(":", "")
else:
LOG.error("Unable to obtain MAC address for unique ID. "
"Agent terminated!")
sys.exit(1)
def get_devices_modified_timestamps(self, devices):
# TODO(kevinbenton): this should be implemented to detect
# rapid Nova instance rebuilds.
return {}
def get_all_devices(self):
devices = set()
all_device_names = os.listdir(MACVTAP_FS)
# Refresh the mac_device_name mapping
self.mac_device_name_mappings = dict()
for device_name in all_device_names:
if device_name.startswith(constants.MACVTAP_DEVICE_PREFIX):
mac = ip_lib.get_device_mac(device_name)
self.mac_device_name_mappings[mac] = device_name
devices.add(mac)
return devices
def get_extension_driver_type(self):
return EXTENSION_DRIVER_TYPE
def get_rpc_callbacks(self, context, agent, sg_agent):
return MacvtapRPCCallBack(context, agent, sg_agent)
def get_rpc_consumers(self):
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
return consumers
def plug_interface(self, network_id, network_segment, device,
device_owner):
# Setting ALLMULTICAST Flag on macvtap device to allow the guest
# receiving traffic for arbitrary multicast addresses.
# The alternative would be to let libvirt instantiate the macvtap
# device with the 'trustGuestRxFilters' option. But doing so, the guest
# would be able to change its mac address and therefore the mac
# address of the macvtap device.
dev = ip_lib.IPDevice(self.mac_device_name_mappings[device])
dev.link.set_allmulticast_on()
return True
def setup_arp_spoofing_protection(self, device, device_details):
pass
def delete_arp_spoofing_protection(self, devices):
pass
def delete_unreferenced_arp_protection(self, current_devices):
pass
def parse_interface_mappings():
if not cfg.CONF.macvtap.physical_interface_mappings:
LOG.error("No physical_interface_mappings provided, but at least "
"one mapping is required. Agent terminated!")
sys.exit(1)
try:
interface_mappings = helpers.parse_mappings(
cfg.CONF.macvtap.physical_interface_mappings)
LOG.info("Interface mappings: %s", interface_mappings)
return interface_mappings
except ValueError as e:
LOG.error("Parsing physical_interface_mappings failed: %s. "
"Agent terminated!", e)
sys.exit(1)
def validate_firewall_driver():
fw_driver = cfg.CONF.SECURITYGROUP.firewall_driver
supported_fw_drivers = ['neutron.agent.firewall.NoopFirewallDriver',
'noop']
if fw_driver not in supported_fw_drivers:
LOG.error('Unsupported configuration option for "SECURITYGROUP.'
'firewall_driver"! Only the NoopFirewallDriver is '
'supported by macvtap agent, but "%s" is configured. '
'Set the firewall_driver to "noop" and start the '
'agent again. Agent terminated!',
fw_driver)
sys.exit(1)
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
validate_firewall_driver()
interface_mappings = parse_interface_mappings()
manager = MacvtapManager(interface_mappings)
polling_interval = cfg.CONF.AGENT.polling_interval
quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
agent = ca.CommonAgentLoop(manager, polling_interval,
quitting_rpc_timeout,
constants.AGENT_TYPE_MACVTAP,
MACVTAP_AGENT_BINARY)
LOG.info("Agent initialized successfully, now running... ")
launcher = service.launch(cfg.CONF, agent)
launcher.wait()
| 38.674107
| 79
| 0.667667
|
a7292cdb07f34eb575717e849de9009232f3242a
| 19,214
|
py
|
Python
|
gnomad_mitochondria/pipeline/combine_vcfs.py
|
broadinstitute/gnomad-mitochondria
|
31b81dfb1e679e37b21f432bda22bdb15c55779e
|
[
"MIT"
] | 1
|
2021-09-05T10:56:12.000Z
|
2021-09-05T10:56:12.000Z
|
gnomad_mitochondria/pipeline/combine_vcfs.py
|
broadinstitute/gnomad-mitochondria
|
31b81dfb1e679e37b21f432bda22bdb15c55779e
|
[
"MIT"
] | 13
|
2021-08-25T13:44:53.000Z
|
2022-01-10T15:23:49.000Z
|
gnomad_mitochondria/pipeline/combine_vcfs.py
|
broadinstitute/gnomad-mitochondria
|
31b81dfb1e679e37b21f432bda22bdb15c55779e
|
[
"MIT"
] | 2
|
2021-10-06T06:46:28.000Z
|
2021-12-13T20:42:21.000Z
|
import argparse
import logging
import math
import os
import hail as hl
from hail.utils.java import info
from typing import Dict
META_DICT = {
"filter": {
"artifact_prone_site": {
"Description": "Variant overlaps an artifact-prone site"
}
},
"format": {
"DP": {"Description": "Depth of coverage", "Number": "1", "Type": "Integer"},
"FT": {
"Description": "Sample-level genotype filters",
"Number": ".",
"Type": "String",
},
"HL": {"Description": "Heteroplasmy level", "Number": "1", "Type": "Float"},
"MQ": {"Description": "Mapping quality", "Number": "1", "Type": "Float"},
"TLOD": {
"Description": "Log 10 likelihood ratio score of variant existing versus not existing",
"Number": "1",
"Type": "Float",
},
},
}
logging.basicConfig(
level=logging.INFO,
format="%(levelname)s: %(asctime)s: %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
logger = logging.getLogger("combine_mitochondria_vcfs_into_mt")
logger.setLevel(logging.INFO)
logger.info("Setting hail flag to avoid array index out of bounds error...")
# Setting this flag isn't generally recommended, but is needed (since at least Hail version 0.2.75) to avoid an array index out of bounds error until changes are made in future versions of Hail
# TODO: reassess if this flag is still needed for future versions of Hail
hl._set_flags(no_whole_stage_codegen="1")
def collect_vcf_paths(
participant_data: str, vcf_col_name: str, participants_to_subset: str = None,
) -> Dict[str, str]:
"""
Create dictionary of VCF paths for only the samples specified in participants_to_subset.
.. note::
Participant data should be a tab-delimited file with at minimum columns for:
- 'entity:participant_id': sample name with prohibited characters replaced with underscores
- 's': sample name
- path to the Mutect2 VCF output, where name of this column is supplied to the `vcf_col_name` parameter
:param participant_data: Participant data (the downloaded data tab from Terra)
:param vcf_col_name: Name of column that contains VCF output
:param participants_to_subset: Path to file of participant_ids to which the data should be subset
:return: Dictionary with sample name as key and path to VCF as value
"""
vcf_paths = {}
# Load in data from Terra
participant_ht = hl.import_table(participant_data)
# Remove participants that don't have VCF output
participant_ht.filter(participant_ht[vcf_col_name] != "")
# Subset participants if specified
if participants_to_subset:
participants_of_interest = hl.import_table(
participants_to_subset
).participant.collect()
participant_ht = participant_ht.filter(
hl.literal(participants_of_interest).contains(
participant_ht["entity:participant_id"]
)
)
# Add the vcf path to a dictionary with sample name as key
df = participant_ht.to_pandas()
for _, row in df.iterrows():
vcf_paths[row["s"]] = row[vcf_col_name]
return vcf_paths
def multi_way_union_mts(mts: list, temp_dir: str, chunk_size: int) -> hl.MatrixTable:
"""
Hierarchically join together MatrixTables in the provided list.
:param mts: List of MatrixTables to join together
:param temp_dir: Path to temporary directory for intermediate results
:param chunk_size: Number of MatrixTables to join per chunk (the number of individual VCFs that should be combined at a time)
:return: Joined MatrixTable
"""
# Convert the MatrixTables to tables where entries are an array of structs
staging = [mt.localize_entries("__entries", "__cols") for mt in mts]
stage = 0
while len(staging) > 1:
# Calculate the number of jobs to run based on the chunk size
n_jobs = int(math.ceil(len(staging) / chunk_size))
info(f"multi_way_union_mts: stage {stage}: {n_jobs} total jobs")
next_stage = []
for i in range(n_jobs):
# Grab just the tables for the given job
to_merge = staging[chunk_size * i : chunk_size * (i + 1)]
info(
f"multi_way_union_mts: stage {stage} / job {i}: merging {len(to_merge)} inputs"
)
# Multiway zip join will produce an __entries annotation, which is an array where each element is a struct containing the __entries annotation (array of structs) for that sample
merged = hl.Table.multi_way_zip_join(to_merge, "__entries", "__cols")
# Flatten __entries while taking into account different entry lengths at different samples/variants (samples lacking a variant will be NA)
merged = merged.annotate(
__entries=hl.flatten(
hl.range(hl.len(merged.__entries)).map(
# Coalesce will return the first non-missing argument, so if the entry info is not missing, use that info, but if it is missing, create an entries struct with the correct element type for each null entry annotation (such as int32 for DP)
lambda i: hl.coalesce(
merged.__entries[i].__entries,
hl.range(hl.len(merged.__cols[i].__cols)).map(
lambda j: hl.null(
merged.__entries.__entries.dtype.element_type.element_type
)
),
)
)
)
)
# Flatten col annotation from array<struct{__cols: array<struct{s: str}>} to array<struct{s: str}>
merged = merged.annotate_globals(
__cols=hl.flatten(merged.__cols.map(lambda x: x.__cols))
)
next_stage.append(
merged.checkpoint(
os.path.join(temp_dir, f"stage_{stage}_job_{i}.ht"), overwrite=True
)
)
info(f"Completed stage {stage}")
stage += 1
staging.clear()
staging.extend(next_stage)
# Unlocalize the entries, and unfilter the filtered entries and populate fields with missing values
return (
staging[0]
._unlocalize_entries("__entries", "__cols", list(mts[0].col_key))
.unfilter_entries()
)
def join_mitochondria_vcfs_into_mt(
vcf_paths: Dict[str, str], temp_dir: str, chunk_size: int = 100
) -> hl.MatrixTable:
"""
Reformat and join individual mitochondrial VCFs into one MatrixTable.
:param vcf_paths: Dictionary of samples to combine (sample as key, path to VCF as value)
:param temp_dir: Path to temporary directory for intermediate results
:param chunk_size: Number of MatrixTables to join per chunk (the number of individual VCFs that should be combined at a time)
:return: Joined MatrixTable of samples given in vcf_paths dictionary
"""
mt_list = []
for sample, vcf_path in vcf_paths.items():
try:
mt = hl.import_vcf(vcf_path, reference_genome="GRCh38")
except Exception as e:
raise ValueError(
f"vcf path {vcf_path} does not exist for sample {sample}"
) from e
# Because the vcfs are split, there is only one AF value, although misinterpreted as an array because Number=A in VCF header
# Second value of MMQ is the value of the mapping quality for the alternate allele
# Add FT annotation for sample genotype filters (pull these from filters annotations of the single-sample VCFs)
mt = mt.select_entries("DP", HL=mt.AF[0])
mt = mt.annotate_entries(
MQ=hl.float(mt.info["MMQ"][1]),
TLOD=mt.info["TLOD"][0],
FT=hl.if_else(hl.len(mt.filters) == 0, {"PASS"}, mt.filters),
)
# Use GRCh37 reference as most external resources added in downstream scripts use GRCh37 contig names
# (although note that the actual sequences of the mitochondria in both GRCh37 and GRCh38 are the same)
mt = mt.key_rows_by(
locus=hl.locus("MT", mt.locus.position, reference_genome="GRCh37"),
alleles=mt.alleles,
)
mt = mt.key_cols_by(s=sample)
mt = mt.select_rows()
mt_list.append(mt)
combined_mt = multi_way_union_mts(mt_list, temp_dir, chunk_size)
return combined_mt
def remove_genotype_filters(
mt: hl.MatrixTable,
filters_to_remove: set = {
"possible_numt",
"mt_many_low_hets",
"FAIL",
"blacklisted_site",
},
) -> hl.MatrixTable:
"""
Remove unneeded sample-level genotype filters (in FT field of the VCF) specified by the filters_to_remove parameter.
By default, remove the 'possible_numt', 'mt_many_low_hets', and 'FAIL' filters because these filters were found to have low performance.
Also remove the 'blacklisted_site' filter because this filter did not always behave as expected in early GATK versions. This filter can be reimplemented with the apply_mito_artifact_filter function.
:param mt: MatrixTable containing genotype filters in the FT field of the VCF that should be removed
:param filters_to_remove: List of genptype filters (in FT field of VCF) that should be removed from the entries
:return: MatrixTable with specific genotype filters (in FT field of VCF) removed
"""
mt = mt.annotate_entries(FT=mt.FT.difference(filters_to_remove))
# If no filters exist after removing those specified above, set the FT field to PASS
mt = mt.annotate_entries(FT=hl.if_else(hl.len(mt.FT) == 0, {"PASS"}, mt.FT))
return mt
def determine_hom_refs(
mt: hl.MatrixTable, coverage_mt_path: str, minimum_homref_coverage: int = 100
) -> hl.MatrixTable:
"""
Use coverage to distinguish between homref and missing sites.
:param mt: MatrixTable from initial multi-sample merging, without homref sites determined
:param coverage_mt_path: MatrixTable of sample level coverage at each position (per-sample and per-base; can be generated by running annotate_coverage.py)
:param minimum_homref_coverage: Minimum depth of coverage required to call a genotype homoplasmic reference rather than missing
:return: MatrixTable with missing genotypes converted to homref depending on coverage
"""
# Convert coverage to build GRCh37 to match contig names
# Note: the mitochondrial reference genome is the same for GRCh38 and GRCh37
coverages = hl.read_matrix_table(coverage_mt_path)
coverages = coverages.key_rows_by(
locus=hl.locus("MT", coverages.locus.position, reference_genome="GRCh37")
)
mt = mt.annotate_entries(
DP=hl.if_else(hl.is_missing(mt.HL), coverages[mt.locus, mt.s].coverage, mt.DP)
)
hom_ref_expr = hl.is_missing(mt.HL) & (mt.DP > minimum_homref_coverage)
mt = mt.annotate_entries(
HL=hl.if_else(hom_ref_expr, 0.0, mt.HL),
FT=hl.if_else(hom_ref_expr, {"PASS"}, mt.FT),
DP=hl.if_else(
hl.is_missing(mt.HL) & (mt.DP <= minimum_homref_coverage),
hl.null(hl.tint32),
mt.DP,
),
)
return mt
def apply_mito_artifact_filter(
mt: hl.MatrixTable, artifact_prone_sites_path: str,
) -> hl.MatrixTable:
"""
Add in artifact_prone_site filter.
:param mt: MatrixTable to be annotated with artifact_prone_sites filter
:param artifact_prone_sites_path: Path to BED file of artifact_prone_sites to flag in the filters column
:return: MatrixTable with artifact_prone_sites filter
"""
# Apply "artifact_prone_site" filter to any SNP or deletion that spans a known problematic site
bed = hl.import_bed(artifact_prone_sites_path)
bed = bed.annotate(target="artifact")
# Create a region annotation containing the interval that the variant overlaps (for SNP will be one position, but will be longer for deletions based on the length of the deletion)
mt = mt.annotate_rows(
region=hl.interval(
hl.locus("MT", mt.locus.position, reference_genome="GRCh37"),
hl.locus(
"MT",
mt.locus.position + hl.len(mt.alleles[0]) - 1,
reference_genome="GRCh37",
),
includes_end=True,
)
)
# Annotate if the start of the variant overlaps an interval in the bed file
mt = mt.annotate_rows(start_overlaps=bed.index(mt.region.start, all_matches=True))
# Annotate if the end of the variant overlaps an interval in the bed file
mt = mt.annotate_rows(end_overlaps=bed.index(mt.region.end, all_matches=True))
# Create struct containing locus and allele (need to the check if any position of the allele overlaps an artifact-prone site, not just the locus)
mt_temp = mt.annotate_rows(variant=hl.struct(locus=mt.locus, alleles=mt.alleles))
mt_temp = mt_temp.key_rows_by(mt_temp.region)
# Need to account for cases where the start and end of the variant interval don't fall within a bed interval, but start before and after the interval (the bed interval falls completely within the variant interval)
bed_temp = bed.annotate(
contained_mt_alleles=mt_temp.index_rows(
bed.interval.start, all_matches=True
).variant
)
# Explode so that each allele is on its own row and create locus and allele annotations
bed_temp = bed_temp.explode(bed_temp.contained_mt_alleles).rename(
{"contained_mt_alleles": "contained_mt_allele"}
)
bed_temp = bed_temp.annotate(
locus=bed_temp.contained_mt_allele.locus,
alleles=bed_temp.contained_mt_allele.alleles,
)
bed_temp = bed_temp.key_by(bed_temp.locus, bed_temp.alleles)
# Annotate back onto the original mt cases where the bed interval falls completely within the variant interval
mt = mt.annotate_rows(start_and_end_span=bed_temp[mt.locus, mt.alleles].target)
# Add artifact-prone site filter to any SNP/deletion that starts within, ends within, or completely overlaps an artifact-prone site
mt = mt.annotate_rows(
filters=hl.if_else(
(hl.len(mt.start_overlaps) > 0)
| (hl.len(mt.end_overlaps) > 0)
| (hl.is_defined(mt.start_and_end_span)),
{"artifact_prone_site"},
{"PASS"},
)
)
mt = mt.drop("region", "start_overlaps", "end_overlaps", "start_and_end_span")
return mt
def main(args): # noqa: D103
participant_data = args.participant_data
coverage_mt_path = args.coverage_mt_path
vcf_col_name = args.vcf_col_name
artifact_prone_sites_path = args.artifact_prone_sites_path
output_bucket = args.output_bucket
file_name = args.file_name
minimum_homref_coverage = args.minimum_homref_coverage
chunk_size = args.chunk_size
output_path_mt = f"{output_bucket}/raw_combined.mt"
if args.overwrite == False and hl.hadoop_exists(output_path_mt):
logger.warning(
"Overwrite is set to False but file already exists at %s, script will run but output will not be written",
output_path_mt,
)
logger.info("Collecting VCF paths for samples to subset...")
vcf_paths = collect_vcf_paths(
participant_data, vcf_col_name, args.participants_to_subset
)
logger.info("Combining VCFs...")
combined_mt = join_mitochondria_vcfs_into_mt(vcf_paths, args.temp_dir, chunk_size)
combined_mt = combined_mt.checkpoint(output_path_mt, overwrite=args.overwrite)
logger.info("Removing select sample-level filters...")
combined_mt = remove_genotype_filters(combined_mt)
logger.info("Determining homoplasmic reference sites...")
combined_mt = determine_hom_refs(
combined_mt, coverage_mt_path, minimum_homref_coverage
)
logger.info("Applying artifact_prone_site fiter...")
combined_mt = apply_mito_artifact_filter(combined_mt, artifact_prone_sites_path)
logger.info("Writing combined MT and VCF...")
# Set the file names for output files
out_vcf = f"{output_bucket}/{file_name}.vcf.bgz"
out_mt = f"{output_bucket}/{file_name}.mt"
combined_mt = combined_mt.checkpoint(out_mt, overwrite=args.overwrite)
# For the VCF output, join FT values by semicolon
combined_mt = combined_mt.annotate_entries(
FT=hl.str(";").join(hl.array(combined_mt.FT))
)
hl.export_vcf(combined_mt, out_vcf, metadata=META_DICT)
if __name__ == "__main__":
p = argparse.ArgumentParser(
description="This script combines individual mitochondria VCF files into one MatrixTable, determines homoplasmic reference sites, and applies an artifact_prone_site filter"
)
p.add_argument(
"-p",
"--participant-data",
help="Participant data (the downloaded data tab from Terra), should be a tab-delimited file with at minimum columns for 'entity:participant_id' (sample name with prohibited characters replaced with underscores), 's' (sample name), and VCF output (path to the Mutect2 VCF output, where the name of this column is supplied to the `vcf_col_name` parameter)",
required=True,
)
p.add_argument(
"-c",
"--coverage-mt-path",
help="Path to MatrixTable of sample-level coverage (per-sample and per-base, can be generated by running annotate_coverage.py)",
required=True,
)
p.add_argument(
"-v",
"--vcf-col-name",
help="Name of column in participant data file that contains the path to the VCF output by Mutect2",
required=True,
)
p.add_argument(
"-a",
"--artifact-prone-sites-path",
help="Path to BED file of artifact-prone sites to flag in the FILTER column",
required=True,
)
p.add_argument(
"-o",
"--output-bucket",
help="Path to bucket to which results should be written",
required=True,
)
p.add_argument(
"-t",
"--temp-dir",
help="Temporary directory to use for intermediate outputs",
required=True,
)
p.add_argument(
"-f",
"--file-name",
help="File name to use for output files (will be used the the .vcf.bgz and .mt outputs)",
required=True,
)
p.add_argument(
"-s",
"--participants-to-subset",
help="Path to txt file of participant_ids to which the data should be subset (file should contain header (named 'participant') and one line for each participant_id matching the 'entity:participant_id's supplied in Terra",
)
p.add_argument(
"--minimum-homref-coverage",
help="Minimum depth of coverage required to call a genotype homoplasmic reference rather than missing",
type=int,
default=100,
)
p.add_argument(
"--chunk-size",
help="Chunk size to use for combining VCFs (the number of individual VCFs that should be combined at a time)",
type=int,
default=100,
)
p.add_argument("--overwrite", help="Overwrites existing files", action="store_true")
args = p.parse_args()
main(args)
| 41.860566
| 363
| 0.667586
|
a9c34b3aab0944ea80b9086e566ab285537a5b0d
| 3,160
|
py
|
Python
|
scripts/cas_sql_2.py
|
snpham/jacaranda
|
2a69f699d0a6a936aa4ce80c7561198b7e87f287
|
[
"Apache-2.0"
] | null | null | null |
scripts/cas_sql_2.py
|
snpham/jacaranda
|
2a69f699d0a6a936aa4ce80c7561198b7e87f287
|
[
"Apache-2.0"
] | null | null | null |
scripts/cas_sql_2.py
|
snpham/jacaranda
|
2a69f699d0a6a936aa4ce80c7561198b7e87f287
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import aiohttp
from urllib.parse import urlencode
import numpy as np
from bs4 import BeautifulSoup
import urllib.request, urllib.parse, urllib.error
import re
def remove_sql_comments(sql):
"""Strip SQL comments starting with --"""
return ' \n'.join(map(lambda x: x.split('--')[0], sql.split('\n')))
if __name__ == '__main__':
pass
async def fetch(session, url):
async with session.get(url) as resp:
return await resp.text()
# Catch HTTP errors/exceptions here
async def fetch_concurrent(urls):
loop = asyncio.get_event_loop()
async with aiohttp.ClientSession() as session:
tasks = []
for u in urls:
tasks.append(loop.create_task(fetch(session, u)))
for result in asyncio.as_completed(tasks):
page = await result
#Do whatever you want with results
soup = BeautifulSoup(page)
# print(soup('textarea')[1].contents[0][:-1])
attributes = soup('textarea')[1].contents[0][:-1]
# saving
attributes = np.array(re.split('\n', attributes))[1:]
full_csv = 'inputs/SDSS/sdss_full_hyp.csv'
script = str(soup.find_all('script')[1])
link = re.findall(r'\(p[^()]*\)', script)[0]
print(link, len(attributes))
# print(soup.find_all('script')[1])
with open(full_csv, 'a') as f:
for row in attributes:
print(row, file=f)
NOBJECTS = 10000
# CASjobs
url = 'https://skyserver.sdss.org/dr16/en/tools/chart/f_sql.aspx'
# range_i = [0, 14, 14.5, 15]
# range1 = list(np.arange(range_i[-1]+0.1, 16, 0.1))
# ranges = np.hstack([range_i, range1])
# range2 = list(np.arange(ranges[-1]+0.01, 17.5, 0.01))
# ranges = np.hstack([ranges, range2])
# range3 = list(np.arange(ranges[-1]+0.001, 100, 0.001))
# ranges = np.hstack([ranges, range3])
ranges = list(np.arange(27.911+0.1, 100, 1))
chunks = [ranges[x:x+20] for x in range(0, len(ranges), 20)]
for chunk in chunks:
print(chunk)
urls = []
count = 0
for ii, range in enumerate(chunk, 0):
if ii == 0:
pass
else:
query_text = ('\n'.join((f"SELECT TOP {NOBJECTS}",
" p.objid, p.ra, p.dec,",
" p.u, p.g, p.r, p.i, p.z,",
" p.mCr4_u, p.mCr4_g, p.mCr4_r, p.mCr4_i, p.mCr4_z,",
" p.petroR50_u, p.petroR50_g, p.petroR50_r, p.petroR50_i, p.petroR50_z,",
" p.petroR90_u, p.petroR90_g, p.petroR90_r, p.petroR90_i, p.petroR90_z,",
" s.class, s.subclass, s.z, s.zerr",
" FROM PhotoObj AS p",
" JOIN SpecObj AS s ON s.bestobjid = p.objid",
" WHERE ",
f" (p.u BETWEEN {chunk[ii-1]} AND {chunk[ii]})",
"")))
query_text = remove_sql_comments(query_text)
params = urlencode(dict(cmd=query_text, format='html'))
urls.append(url + '?%s' % params)
# print(urls)
asyncio.run(fetch_concurrent(urls))
| 32.57732
| 89
| 0.549684
|
a8036184377c9693e088c4f96d52bce12dd0ec1f
| 545
|
py
|
Python
|
aleph/migrate/versions/850a674e3ccf_drop_unique_on_cache.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 1
|
2017-07-28T12:54:09.000Z
|
2017-07-28T12:54:09.000Z
|
aleph/migrate/versions/850a674e3ccf_drop_unique_on_cache.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 7
|
2017-08-16T12:49:23.000Z
|
2018-02-16T10:22:11.000Z
|
aleph/migrate/versions/850a674e3ccf_drop_unique_on_cache.py
|
gazeti/aleph
|
f6714c4be038471cfdc6408bfe88dc9e2ed28452
|
[
"MIT"
] | 6
|
2017-07-26T12:29:53.000Z
|
2017-08-18T09:35:50.000Z
|
"""Drop unique on cache.
Revision ID: 850a674e3ccf
Revises: dfd8b2480e1b
Create Date: 2016-05-20 19:05:42.367783
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '850a674e3ccf'
down_revision = 'dfd8b2480e1b'
def upgrade():
op.drop_constraint(u'cache_key_key', 'cache', type_='unique')
op.create_index(op.f('ix_cache_key'), 'cache', ['key'], unique=False)
def downgrade():
op.drop_index(op.f('ix_cache_key'), table_name='cache')
op.create_unique_constraint(u'cache_key_key', 'cache', ['key'])
| 23.695652
| 73
| 0.717431
|
d628eca9fe7211401d9ad0dd42dd82bd5e06b3c0
| 5,761
|
py
|
Python
|
tests/serverless/runner/test_runner.py
|
ismailyenigul/checkov
|
b65daa796e166568fdd02591ab5232e567f4cd36
|
[
"Apache-2.0"
] | 5
|
2021-07-29T18:08:40.000Z
|
2022-03-21T04:39:32.000Z
|
tests/serverless/runner/test_runner.py
|
ismailyenigul/checkov
|
b65daa796e166568fdd02591ab5232e567f4cd36
|
[
"Apache-2.0"
] | null | null | null |
tests/serverless/runner/test_runner.py
|
ismailyenigul/checkov
|
b65daa796e166568fdd02591ab5232e567f4cd36
|
[
"Apache-2.0"
] | 2
|
2021-08-23T13:25:36.000Z
|
2021-11-05T21:44:52.000Z
|
import dis
import inspect
import os
import unittest
from pathlib import Path
from checkov.runner_filter import RunnerFilter
from checkov.serverless.runner import Runner
class TestRunnerValid(unittest.TestCase):
def test_record_relative_path_with_relative_dir(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_dir_path = os.path.join(current_dir, "resources")
# this is the relative path to the directory to scan (what would actually get passed to the -d arg)
dir_rel_path = os.path.relpath(scan_dir_path)
runner = Runner()
checks_allowlist = ['CKV_AWS_49']
report = runner.run(root_folder=dir_rel_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='serverless', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertTrue(len(all_checks) > 0) # ensure that the assertions below are going to do something
for record in all_checks:
# no need to join with a '/' because the CFN runner adds it to the start of the file path
self.assertEqual(record.repo_file_path, f'/{dir_rel_path}{record.file_path}')
def test_record_relative_path_with_abs_dir(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_dir_path = os.path.join(current_dir, "resources")
dir_rel_path = os.path.relpath(scan_dir_path)
dir_abs_path = os.path.abspath(scan_dir_path)
runner = Runner()
checks_allowlist = ['CKV_AWS_49']
report = runner.run(root_folder=dir_abs_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='serverless', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertTrue(len(all_checks) > 0) # ensure that the assertions below are going to do something
for record in all_checks:
# no need to join with a '/' because the CFN runner adds it to the start of the file path
self.assertEqual(record.repo_file_path, f'/{dir_rel_path}{record.file_path}')
def test_record_relative_path_with_relative_file(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_file_path = os.path.join(current_dir, "resources", "serverless.yml")
# this is the relative path to the file to scan (what would actually get passed to the -f arg)
file_rel_path = os.path.relpath(scan_file_path)
runner = Runner()
checks_allowlist = ['CKV_AWS_49']
report = runner.run(root_folder=None, external_checks_dir=None, files=[file_rel_path],
runner_filter=RunnerFilter(framework='serverless', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertTrue(len(all_checks) > 0) # ensure that the assertions below are going to do something
for record in all_checks:
# no need to join with a '/' because the CFN runner adds it to the start of the file path
self.assertEqual(record.repo_file_path, f'/{file_rel_path}')
def test_record_relative_path_with_abs_file(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_file_path = os.path.join(current_dir, "resources", "serverless.yml")
file_rel_path = os.path.relpath(scan_file_path)
file_abs_path = os.path.abspath(scan_file_path)
runner = Runner()
checks_allowlist = ['CKV_AWS_49']
report = runner.run(root_folder=None, external_checks_dir=None, files=[file_abs_path],
runner_filter=RunnerFilter(framework='serverless', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertTrue(len(all_checks) > 0) # ensure that the assertions below are going to do something
for record in all_checks:
# no need to join with a '/' because the CFN runner adds it to the start of the file path
self.assertEqual(record.repo_file_path, f'/{file_rel_path}')
def test_wrong_check_imports(self):
wrong_imports = ["arm", "cloudformation", "dockerfile", "helm", "kubernetes", "terraform"]
check_imports = []
checks_path = Path(inspect.getfile(Runner)).parent.joinpath("checks")
for file in checks_path.rglob("*.py"):
with file.open() as f:
instructions = dis.get_instructions(f.read())
import_names = [instr.argval for instr in instructions if "IMPORT_NAME" == instr.opname]
for import_name in import_names:
wrong_import = next((import_name for x in wrong_imports if x in import_name), None)
if wrong_import:
check_imports.append({file.name: wrong_import})
assert len(check_imports) == 0, f"Wrong imports were added: {check_imports}"
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| 46.088
| 107
| 0.678702
|
adac2bc920d5210824c74a26677cf0bfc4379759
| 53,574
|
py
|
Python
|
src/sage/quadratic_forms/binary_qf.py
|
tobiasdiez/sagetrac-mirror
|
20ca179bed4ff605de4111362b02c8e2acca617e
|
[
"BSL-1.0"
] | 1,742
|
2015-01-04T07:06:13.000Z
|
2022-03-30T11:32:52.000Z
|
src/sage/quadratic_forms/binary_qf.py
|
tobiasdiez/sagetrac-mirror
|
20ca179bed4ff605de4111362b02c8e2acca617e
|
[
"BSL-1.0"
] | 66
|
2015-03-19T19:17:24.000Z
|
2022-03-16T11:59:30.000Z
|
src/sage/quadratic_forms/binary_qf.py
|
tobiasdiez/sagetrac-mirror
|
20ca179bed4ff605de4111362b02c8e2acca617e
|
[
"BSL-1.0"
] | 495
|
2015-01-10T10:23:18.000Z
|
2022-03-24T22:06:11.000Z
|
"""
Binary Quadratic Forms with Integer Coefficients
This module provides a specialized class for working with a binary quadratic
form `a x^2 + b x y + c y^2`, stored as a triple of integers `(a, b, c)`.
EXAMPLES::
sage: Q = BinaryQF([1, 2, 3])
sage: Q
x^2 + 2*x*y + 3*y^2
sage: Q.discriminant()
-8
sage: Q.reduced_form()
x^2 + 2*y^2
sage: Q(1, 1)
6
TESTS::
sage: Q == loads(dumps(Q))
True
AUTHORS:
- Jon Hanke (2006-08-08):
- Appended to add the methods :func:`BinaryQF_reduced_representatives`,
:meth:`~BinaryQF.is_reduced`, and ``__add__`` on 8-3-2006 for Coding Sprint
#2.
- Added Documentation and :meth:`~BinaryQF.complex_point` method on 8-8-2006.
- Nick Alexander: add doctests and clean code for Doc Days 2
- William Stein (2009-08-05): composition; some ReSTification.
- William Stein (2009-09-18): make immutable.
- Justin C. Walker (2011-02-06):
- Add support for indefinite forms.
"""
# ****************************************************************************
# Copyright (C) 2006-2009 William Stein and Jon Hanke
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from functools import total_ordering
from sage.libs.pari.all import pari_gen
from sage.rings.all import ZZ, is_fundamental_discriminant
from sage.arith.all import gcd
from sage.structure.sage_object import SageObject
from sage.matrix.matrix_space import MatrixSpace
from sage.matrix.constructor import Matrix
from sage.misc.cachefunc import cached_method
@total_ordering
class BinaryQF(SageObject):
r"""
A binary quadratic form over `\ZZ`.
INPUT:
One of the following:
- ``a`` -- either a 3-tuple of integers, or a quadratic
homogeneous polynomial in two variables with integer
coefficients
- ``a``, ``b``, ``c`` -- three integers
OUTPUT:
the binary quadratic form a*x^2 + b*x*y + c*y^2.
EXAMPLES::
sage: b = BinaryQF([1, 2, 3])
sage: b.discriminant()
-8
sage: b1 = BinaryQF(1, 2, 3)
sage: b1 == b
True
sage: R.<x, y> = ZZ[]
sage: BinaryQF(x^2 + 2*x*y + 3*y^2) == b
True
sage: BinaryQF(1, 0, 1)
x^2 + y^2
"""
def __init__(self, a, b=None, c=None):
r"""
Create a binary quadratic form `ax^2 + bxy + cy^2`.
INPUT:
One of the following:
- ``a`` -- either a 3-tuple of integers, or a quadratic
homogeneous polynomial in two variables with integer
coefficients
- ``a``, ``b``, ``c`` -- three integers
EXAMPLES::
sage: Q = BinaryQF([1, 2, 3]); Q
x^2 + 2*x*y + 3*y^2
sage: Q = BinaryQF([1, 2])
Traceback (most recent call last):
...
TypeError: binary quadratic form must be given by a quadratic homogeneous bivariate integer polynomial or its coefficients
sage: R.<x, y> = ZZ[]
sage: f = x^2 + 2*x*y + 3*y^2
sage: BinaryQF(f)
x^2 + 2*x*y + 3*y^2
sage: BinaryQF(f + x)
Traceback (most recent call last):
...
TypeError: binary quadratic form must be given by a quadratic homogeneous bivariate integer polynomial or its coefficients
TESTS::
sage: BinaryQF(0)
0
"""
from sage.rings.polynomial.multi_polynomial_element import is_MPolynomial
if b is None and c is None:
if (isinstance(a, (list, tuple))
and len(a) == 3):
a, b, c = a
elif a == 0:
a = b = c = 0
elif (is_MPolynomial(a) and a.is_homogeneous() and a.base_ring() == ZZ
and a.degree() == 2 and a.parent().ngens() == 2):
x, y = a.parent().gens()
a, b, c = [a.monomial_coefficient(mon) for mon in [x**2, x*y, y**2]]
elif isinstance(a, pari_gen) and a.type() in ('t_QFI', 't_QFR'):
# a has 3 or 4 components
a, b, c = a[0], a[1], a[2]
try:
self._a = ZZ(a)
self._b = ZZ(b)
self._c = ZZ(c)
except TypeError:
raise TypeError('binary quadratic form must be given by a quadratic homogeneous bivariate integer polynomial or its coefficients')
self._poly = None
def _pari_init_(self):
"""
Convert this quadratic form to PARI.
EXAMPLES::
sage: f = BinaryQF([2, 3, 4]); f
2*x^2 + 3*x*y + 4*y^2
sage: f._pari_init_()
'Qfb(2,3,4)'
sage: pari(f)
Qfb(2, 3, 4)
sage: type(pari(f))
<... 'cypari2.gen.Gen'>
sage: gp(f)
Qfb(2, 3, 4)
sage: type(gp(f))
<class 'sage.interfaces.gp.GpElement'>
"""
return 'Qfb(%s,%s,%s)' % (self._a, self._b, self._c)
def __mul__(self, right):
"""
Gauss composition or right action by a 2x2 integer matrix.
The result need not be reduced.
EXAMPLES:
We explicitly compute in the group of classes of positive
definite binary quadratic forms of discriminant -23::
sage: R = BinaryQF_reduced_representatives(-23, primitive_only=False); R
[x^2 + x*y + 6*y^2, 2*x^2 - x*y + 3*y^2, 2*x^2 + x*y + 3*y^2]
sage: R[0] * R[0]
x^2 + x*y + 6*y^2
sage: R[1] * R[1]
4*x^2 + 3*x*y + 2*y^2
sage: (R[1] * R[1]).reduced_form()
2*x^2 + x*y + 3*y^2
sage: (R[1] * R[1] * R[1]).reduced_form()
x^2 + x*y + 6*y^2
sage: q1 = BinaryQF(1, 1, 4)
sage: M = Matrix(ZZ, [[1, 3], [0, 1]])
sage: q1*M
x^2 + 7*x*y + 16*y^2
sage: q1.matrix_action_right(M)
x^2 + 7*x*y + 16*y^2
sage: N = Matrix(ZZ, [[1, 0], [1, 0]])
sage: q1*(M*N) == q1.matrix_action_right(M).matrix_action_right(N)
True
"""
# Either a "right" action by
# ...or Gaussian composition
if isinstance(right, BinaryQF):
return BinaryQF(self.__pari__().qfbcompraw(right))
# ...or a 2x2 matrix...
if (isinstance(right.parent(), MatrixSpace)
and right.nrows() == right.ncols() == 2):
aa = right[0, 0]
bb = right[0, 1]
cc = right[1, 0]
dd = right[1, 1]
A = self.polynomial()(aa, cc)
C = self.polynomial()(bb, dd)
B = self.polynomial()(aa + bb, cc + dd) - A - C
qf = BinaryQF(A, B, C)
return qf
raise TypeError("right operand must be a binary quadratic form or 2x2 matrix")
def __getitem__(self, n):
"""
Return the `n`-th component of this quadratic form.
If this form is `a x^2 + b x y + c y^2`, the 0-th component is `a`,
the 1-st component is `b`, and `2`-nd component is `c`.
Indexing is like lists -- negative indices and slices are allowed.
EXAMPLES::
sage: Q = BinaryQF([2, 3, 4])
sage: Q[0]
2
sage: Q[2]
4
sage: Q[:2]
(2, 3)
sage: tuple(Q)
(2, 3, 4)
sage: list(Q)
[2, 3, 4]
"""
return (self._a, self._b, self._c)[n]
def __call__(self, *args):
r"""
Evaluate this quadratic form at a point.
INPUT:
- args -- x and y values, as a pair x, y or a list, tuple, or
vector
EXAMPLES::
sage: Q = BinaryQF([2, 3, 4])
sage: Q(1, 2)
24
TESTS::
sage: Q = BinaryQF([2, 3, 4])
sage: Q([1, 2])
24
sage: Q((1, 2))
24
sage: Q(vector([1, 2]))
24
"""
if len(args) == 1:
args = args[0]
x, y = args
return (self._a * x + self._b * y) * x + self._c * y**2
def __hash__(self):
r"""
TESTS::
sage: hash(BinaryQF([2, 2, 3]))
802
sage: hash(BinaryQF([2, 3, 2]))
562
sage: hash(BinaryQF([3, 2, 2]))
547
"""
return hash(self._a) ^ (hash(self._b) << 4) ^ (hash(self._c) << 8)
def __eq__(self, right):
"""
Return ``True`` if ``self`` and ``right`` are identical.
This means that they have the same coefficients.
EXAMPLES::
sage: P = BinaryQF([2, 2, 3])
sage: Q = BinaryQF([2, 2, 3])
sage: R = BinaryQF([1, 2, 3])
sage: P == Q # indirect doctest
True
sage: P == R # indirect doctest
False
TESTS::
sage: P == P
True
sage: Q == P
True
sage: R == P
False
sage: P == 2
False
"""
if not isinstance(right, BinaryQF):
return False
return (self._a, self._b, self._c) == (right._a, right._b, right._c)
def __ne__(self, right):
"""
Return ``True`` if ``self`` and ``right`` are not identical.
This means that they have different coefficients.
EXAMPLES::
sage: P = BinaryQF([2, 2, 3])
sage: Q = BinaryQF([2, 2, 3])
sage: R = BinaryQF([1, 2, 3])
sage: P != Q # indirect doctest
False
sage: P != R # indirect doctest
True
"""
return not (self == right)
def __lt__(self, right):
"""
Compare the coefficients of ``self`` and ``right``.
This is done lexicographically.
EXAMPLES::
sage: P = BinaryQF([2, 2, 3])
sage: Q = BinaryQF([1, 2, 3])
sage: P < Q
False
sage: Q < P
True
sage: Q <= P
True
"""
if not isinstance(right, BinaryQF):
return False
return (self._a, self._b, self._c) < (right._a, right._b, right._c)
def __add__(self, Q):
"""
Return the component-wise sum of two forms.
Given `a_1 x^2 + b_1 x y + c_1 y^2` and `a_2 x^2 + b_2 x y +
c_2 y^2`, this returns the form `(a_1 + a_2) x^2 + (b_1 + b_2)
x y + (c_1 + c_2) y^2.`
EXAMPLES::
sage: P = BinaryQF([2, 2, 3]); P
2*x^2 + 2*x*y + 3*y^2
sage: Q = BinaryQF([-1, 2, 2]); Q
-x^2 + 2*x*y + 2*y^2
sage: P + Q
x^2 + 4*x*y + 5*y^2
sage: P + Q == BinaryQF([1, 4, 5]) # indirect doctest
True
TESTS::
sage: Q + P == BinaryQF([1, 4, 5]) # indirect doctest
True
"""
return BinaryQF([self._a + Q._a, self._b + Q._b, self._c + Q._c])
def __sub__(self, Q):
"""
Return the component-wise difference of two forms.
Given two forms `a_1 x^2 + b_1 x y + c_1 y^2` and `a_2 x^2 +
b_2 x y + c_2 y^2`, this returns the form `(a_1 - a_2) x^2 +
(b_1 - b_2) x y + (c_1 - c_2) y^2.`
EXAMPLES::
sage: P = BinaryQF([2, 2, 3]); P
2*x^2 + 2*x*y + 3*y^2
sage: Q = BinaryQF([-1, 2, 2]); Q
-x^2 + 2*x*y + 2*y^2
sage: P - Q
3*x^2 + y^2
sage: P - Q == BinaryQF([3, 0, 1]) # indirect doctest
True
TESTS::
sage: Q - P == BinaryQF([3, 0, 1]) # indirect doctest
False
sage: Q - P != BinaryQF([3, 0, 1]) # indirect doctest
True
"""
return BinaryQF([self._a - Q._a, self._b - Q._b, self._c - Q._c])
def __neg__(self):
r"""
Return the negative of this binary quadratic form.
EXAMPLES::
sage: Q = BinaryQF([1,-2,3])
sage: -Q
-x^2 + 2*x*y - 3*y^2
sage: -Q == BinaryQF([0,0,0]) - Q
True
"""
return BinaryQF([-self._a, -self._b, -self._c])
def _repr_(self):
"""
Display the quadratic form.
EXAMPLES::
sage: Q = BinaryQF([1, 2, 3]); Q # indirect doctest
x^2 + 2*x*y + 3*y^2
sage: Q = BinaryQF([-1, 2, 3]); Q
-x^2 + 2*x*y + 3*y^2
sage: Q = BinaryQF([0, 0, 0]); Q
0
"""
return repr(self.polynomial())
def _latex_(self):
"""
Return latex representation of this binary quadratic form.
EXAMPLES::
sage: f = BinaryQF((778, 1115, 400)); f
778*x^2 + 1115*x*y + 400*y^2
sage: latex(f) # indirect doctest
778 x^{2} + 1115 x y + 400 y^{2}
"""
return self.polynomial()._latex_()
def content(self):
"""
Return the content of the form, i.e., the gcd of the coefficients.
EXAMPLES::
sage: Q = BinaryQF(22, 14, 10)
sage: Q.content()
2
sage: Q = BinaryQF(4, 4, -15)
sage: Q.content()
1
"""
return gcd([self._a, self._b, self._c])
def polynomial(self):
"""
Return ``self`` as a homogeneous 2-variable polynomial.
EXAMPLES::
sage: Q = BinaryQF([1, 2, 3])
sage: Q.polynomial()
x^2 + 2*x*y + 3*y^2
sage: Q = BinaryQF([-1, -2, 3])
sage: Q.polynomial()
-x^2 - 2*x*y + 3*y^2
sage: Q = BinaryQF([0, 0, 0])
sage: Q.polynomial()
0
"""
# Note: Caching in _poly seems to give a very slight
# improvement (~0.2 usec) in 'timeit()' runs. Not sure it
# is worth the instance variable.
if self._poly is None:
self._poly = self(ZZ['x, y'].gens())
return self._poly
@cached_method
def discriminant(self):
"""
Return the discriminant of ``self``.
Given a form `ax^2 + bxy + cy^2`, this returns `b^2 - 4ac`.
EXAMPLES::
sage: Q = BinaryQF([1, 2, 3])
sage: Q.discriminant()
-8
"""
return self._b**2 - 4 * self._a * self._c
def determinant(self):
"""
Return the determinant of the matrix associated to ``self``.
The determinant is used by Gauss and by Conway-Sloane, for
whom an integral quadratic form has coefficients `(a, 2b, c)`
with `a`, `b`, `c` integers.
OUTPUT:
The determinant of the matrix::
[ a b/2]
[b/2 c]
as a rational
REMARK:
This is just `-D/4` where `D` is the discriminant. The return
type is rational even if `b` (and hence `D`) is even.
EXAMPLES::
sage: q = BinaryQF(1, -1, 67)
sage: q.determinant()
267/4
"""
return self._a*self._c - (self._b**2)/4
# for consistency with general quadratic form code
det = determinant
@cached_method
def has_fundamental_discriminant(self):
"""
Return if the discriminant `D` of this form is a fundamental
discriminant (i.e. `D` is the smallest element of its
squareclass with `D = 0` or `1` modulo `4`).
EXAMPLES::
sage: Q = BinaryQF([1, 0, 1])
sage: Q.discriminant()
-4
sage: Q.has_fundamental_discriminant()
True
sage: Q = BinaryQF([2, 0, 2])
sage: Q.discriminant()
-16
sage: Q.has_fundamental_discriminant()
False
"""
return is_fundamental_discriminant(self.discriminant())
@cached_method
def is_primitive(self):
r"""
Checks if the form `ax^2 + bxy + cy^2` satisfies
`\gcd(a, b, c) = 1`, i.e., is primitive.
EXAMPLES::
sage: Q = BinaryQF([6, 3, 9])
sage: Q.is_primitive()
False
sage: Q = BinaryQF([1, 1, 1])
sage: Q.is_primitive()
True
sage: Q = BinaryQF([2, 2, 2])
sage: Q.is_primitive()
False
sage: rqf = BinaryQF_reduced_representatives(-23*9, primitive_only=False)
sage: [qf.is_primitive() for qf in rqf]
[True, True, True, False, True, True, False, False, True]
sage: rqf
[x^2 + x*y + 52*y^2,
2*x^2 - x*y + 26*y^2,
2*x^2 + x*y + 26*y^2,
3*x^2 + 3*x*y + 18*y^2,
4*x^2 - x*y + 13*y^2,
4*x^2 + x*y + 13*y^2,
6*x^2 - 3*x*y + 9*y^2,
6*x^2 + 3*x*y + 9*y^2,
8*x^2 + 7*x*y + 8*y^2]
sage: [qf for qf in rqf if qf.is_primitive()]
[x^2 + x*y + 52*y^2,
2*x^2 - x*y + 26*y^2,
2*x^2 + x*y + 26*y^2,
4*x^2 - x*y + 13*y^2,
4*x^2 + x*y + 13*y^2,
8*x^2 + 7*x*y + 8*y^2]
"""
return gcd([self._a, self._b, self._c]) == 1
@cached_method
def is_zero(self):
"""
Return if ``self`` is identically zero.
EXAMPLES::
sage: Q = BinaryQF(195751, 37615, 1807)
sage: Q.is_zero()
False
sage: Q = BinaryQF(0, 0, 0)
sage: Q.is_zero()
True
"""
return self.content() == 0
@cached_method
def is_weakly_reduced(self):
r"""
Check if the form `ax^2 + bxy + cy^2` satisfies
`|b| \leq a \leq c`, i.e., is weakly reduced.
EXAMPLES::
sage: Q = BinaryQF([1, 2, 3])
sage: Q.is_weakly_reduced()
False
sage: Q = BinaryQF([2, 1, 3])
sage: Q.is_weakly_reduced()
True
sage: Q = BinaryQF([1, -1, 1])
sage: Q.is_weakly_reduced()
True
"""
if self.discriminant() >= 0:
raise ValueError("only defined for negative discriminant")
return (abs(self._b) <= self._a) and (self._a <= self._c)
@cached_method
def is_reducible(self):
r"""
Return if this form is reducible and cache the result.
A binary form `q` is called reducible if it is the product of
two linear forms `q = (a x + b y) (c x + d y)`, or
equivalently if its discriminant is a square.
EXAMPLES::
sage: q = BinaryQF([1, 0, -1])
sage: q.is_reducible()
True
"""
return self.discriminant().is_square()
def _reduce_indef(self, transformation=False):
"""
Reduce an indefinite, non-reduced form.
INPUT:
- ``transformation`` -- bool (default: ``False``); if ``True``,
return both the reduced form and a matrix transforming
``self`` into the reduced form.
TESTS::
sage: f = BinaryQF(-1, 0, 3)
sage: f._reduce_indef(transformation=False)
-x^2 + 2*x*y + 2*y^2
sage: red, trans = f._reduce_indef(transformation=True)
sage: red
-x^2 + 2*x*y + 2*y^2
sage: trans
[-1 1]
[ 0 -1]
sage: red == f*trans
True
sage: f = BinaryQF(0, 5, 24)
sage: red, trans = f._reduce_indef(transformation=True)
sage: red == f*trans
True
"""
if transformation:
U = Matrix(ZZ, 2, 2, [1, 0, 0, 1])
d = self.discriminant().sqrt(prec=53)
Q = self
while not Q.is_reduced():
a = Q._a
b = Q._b
c = Q._c
cabs = c.abs()
# rho(f) as defined in [BUVO2007]_ p. 112 equation (6.12)
if cabs != 0:
if cabs >= d:
s = c.sign() * ((cabs + b) / (2 * cabs)).floor()
else:
s = c.sign() * ((d + b) / (2 * cabs)).floor()
if transformation:
T = Matrix(ZZ, 2, 2, [0, -1, 1, s])
U = U * T
Q = BinaryQF(c, -b + 2*s*c, c*s*s - b*s + a)
else:
if b < 0:
Q = BinaryQF(a, -b, c)
if transformation:
T = Matrix(ZZ, 2, 2, [1, 0, 0, -1])
U = U * T
else:
q, r = a.quo_rem(b)
if 2*r > b:
q, r = a.quo_rem(-b)
q = -q
if transformation:
T = Matrix(ZZ, 2, 2, [1, 0, -q, 1])
U = U * T
Q = BinaryQF(r, b, c)
if transformation:
return Q, U
return Q
@cached_method
def reduced_form(self, transformation=False, algorithm="default"):
"""
Return a reduced form equivalent to ``self``.
INPUT:
- ``self`` -- binary quadratic form of non-square discriminant
- ``transformation`` -- boolean (default: False): if ``True``, return
both the reduced form and a matrix transforming ``self`` into the
reduced form. Currently only implemented for indefinite forms.
- ``algorithm`` -- String. The algorithm to use: Valid options are:
* ``'default'`` -- Let Sage pick an algorithm (default).
* ``'pari'`` -- use PARI
* ``'sage'`` -- use Sage
.. SEEALSO::
:meth:`is_reduced`
EXAMPLES::
sage: a = BinaryQF([33, 11, 5])
sage: a.is_reduced()
False
sage: b = a.reduced_form(); b
5*x^2 - x*y + 27*y^2
sage: b.is_reduced()
True
sage: a = BinaryQF([15, 0, 15])
sage: a.is_reduced()
True
sage: b = a.reduced_form(); b
15*x^2 + 15*y^2
sage: b.is_reduced()
True
Examples of reducing indefinite forms::
sage: f = BinaryQF(1, 0, -3)
sage: f.is_reduced()
False
sage: g = f.reduced_form(); g
x^2 + 2*x*y - 2*y^2
sage: g.is_reduced()
True
sage: q = BinaryQF(1, 0, -1)
sage: q.reduced_form()
x^2 + 2*x*y
sage: BinaryQF(1, 9, 4).reduced_form(transformation=True)
(
[ 0 -1]
4*x^2 + 7*x*y - y^2, [ 1 2]
)
sage: BinaryQF(3, 7, -2).reduced_form(transformation=True)
(
[1 0]
3*x^2 + 7*x*y - 2*y^2, [0 1]
)
sage: BinaryQF(-6, 6, -1).reduced_form(transformation=True)
(
[ 0 -1]
-x^2 + 2*x*y + 2*y^2, [ 1 -4]
)
"""
if self.is_reduced():
if transformation:
return self, Matrix(ZZ, 2, 2, [1, 0, 0, 1])
else:
return self
if algorithm == "default":
if self.is_reducible() or (self.discriminant() > 0 and transformation):
algorithm = 'sage'
elif not transformation:
algorithm = 'pari'
else:
raise NotImplementedError('reduction of definite binary '
'quadratic forms with transformation=True is not '
'implemented')
if algorithm == 'sage':
if self.discriminant() <= 0:
raise NotImplementedError('reduction of definite binary '
'quadratic forms is not implemented in Sage')
return self._reduce_indef(transformation)
elif algorithm == 'pari':
if transformation:
raise NotImplementedError('transformation=True is not '
'supported using PARI')
elif self.is_reducible():
raise NotImplementedError('reducible forms are not '
'supported using PARI')
return BinaryQF(self.__pari__().qfbred())
else:
raise ValueError('unknown implementation for binary quadratic form '
'reduction: %s' % algorithm)
# Buchmann/Vollmer cycle algorithm
def _RhoTau(self):
"""
Apply Rho and Tau operators to this form, returning a new form `Q`.
EXAMPLES::
sage: f = BinaryQF(1, 8, -3)
sage: f._RhoTau()
3*x^2 + 4*x*y - 5*y^2
"""
d = self.discriminant().sqrt(prec=53)
a = self._a
b = self._b
c = self._c
cabs = c.abs()
sign = c.sign()
if cabs >= d:
s = sign * ((cabs+b) / (2*cabs)).floor()
else:
s = sign * ((d+b) / (2*cabs)).floor()
Q = BinaryQF(-c, -b + 2*s*c, -(a - b*s + c*s*s))
return Q
def _Rho(self):
"""
Apply the Rho operator to this form, returning a new form `Q`.
EXAMPLES::
sage: f = BinaryQF(1, 8, -3)
sage: f._Rho()
-3*x^2 + 4*x*y + 5*y^2
"""
d = self.discriminant().sqrt(prec=53)
a = self._a
b = self._b
c = self._c
cabs = c.abs()
sign = c.sign()
if cabs >= d:
s = sign * ((cabs+b) / (2*cabs)).floor()
else:
s = sign * ((d+b) / (2*cabs)).floor()
Q = BinaryQF(c, -b + 2*s*c, a - b*s + c*s*s)
return Q
def _Tau(self):
"""
Apply the Tau operator to this form, returning a new form `Q`.
EXAMPLES::
sage: f = BinaryQF(1, 8, -3)
sage: f._Tau()
-x^2 + 8*x*y + 3*y^2
"""
a = self._a
b = self._b
c = self._c
Q = BinaryQF(-a, b, -c)
return Q
def cycle(self, proper=False):
"""
Return the cycle of reduced forms to which ``self`` belongs.
This is based on Algorithm 6.1 of [BUVO2007]_.
INPUT:
- ``self`` -- reduced, indefinite form of non-square discriminant
- ``proper`` -- boolean (default: ``False``); if ``True``, return the
proper cycle
The proper cycle of a form `f` consists of all reduced forms that are
properly equivalent to `f`. This is useful when testing for proper
equivalence (or equivalence) between indefinite forms.
The cycle of `f` is a technical tool that is used when computing the proper
cycle. Our definition of the cycle is slightly different from the one
in [BUVO2007]_. In our definition, the cycle consists of all reduced
forms `g`, such that the `a`-coefficient of `g` has the same sign as the
`a`-coefficient of `f`, and `g` can be obtained from `f` by performing a
change of variables, and then multiplying by the determinant of the
change-of-variables matrix. It is important to note that `g` might not be
equivalent to `f` (because of multiplying by the determinant). However,
either 'g' or '-g' must be equivalent to `f`. Also note that the cycle
does contain `f`. (Under the definition in [BUVO2007]_, the cycle might
not contain `f`, because all forms in the cycle are required to have
positive `a`-coefficient, even if the `a`-coefficient of `f` is negative.)
EXAMPLES::
sage: Q = BinaryQF(14, 17, -2)
sage: Q.cycle()
[14*x^2 + 17*x*y - 2*y^2,
2*x^2 + 19*x*y - 5*y^2,
5*x^2 + 11*x*y - 14*y^2]
sage: Q.cycle(proper=True)
[14*x^2 + 17*x*y - 2*y^2,
-2*x^2 + 19*x*y + 5*y^2,
5*x^2 + 11*x*y - 14*y^2,
-14*x^2 + 17*x*y + 2*y^2,
2*x^2 + 19*x*y - 5*y^2,
-5*x^2 + 11*x*y + 14*y^2]
sage: Q = BinaryQF(1, 8, -3)
sage: Q.cycle()
[x^2 + 8*x*y - 3*y^2,
3*x^2 + 4*x*y - 5*y^2,
5*x^2 + 6*x*y - 2*y^2,
2*x^2 + 6*x*y - 5*y^2,
5*x^2 + 4*x*y - 3*y^2,
3*x^2 + 8*x*y - y^2]
sage: Q.cycle(proper=True)
[x^2 + 8*x*y - 3*y^2,
-3*x^2 + 4*x*y + 5*y^2,
5*x^2 + 6*x*y - 2*y^2,
-2*x^2 + 6*x*y + 5*y^2,
5*x^2 + 4*x*y - 3*y^2,
-3*x^2 + 8*x*y + y^2]
sage: Q = BinaryQF(1, 7, -6)
sage: Q.cycle()
[x^2 + 7*x*y - 6*y^2,
6*x^2 + 5*x*y - 2*y^2,
2*x^2 + 7*x*y - 3*y^2,
3*x^2 + 5*x*y - 4*y^2,
4*x^2 + 3*x*y - 4*y^2,
4*x^2 + 5*x*y - 3*y^2,
3*x^2 + 7*x*y - 2*y^2,
2*x^2 + 5*x*y - 6*y^2,
6*x^2 + 7*x*y - y^2]
TESTS:
Check an example in :trac:`28989`::
sage: Q = BinaryQF(1, 1, -1)
sage: Q.cycle(proper=True)
[x^2 + x*y - y^2, -x^2 + x*y + y^2]
This is Example 6.10.6 of [BUVO2007]_::
sage: Q = BinaryQF(1, 7, -6)
sage: Q.cycle()
[x^2 + 7*x*y - 6*y^2,
6*x^2 + 5*x*y - 2*y^2,
2*x^2 + 7*x*y - 3*y^2,
3*x^2 + 5*x*y - 4*y^2,
4*x^2 + 3*x*y - 4*y^2,
4*x^2 + 5*x*y - 3*y^2,
3*x^2 + 7*x*y - 2*y^2,
2*x^2 + 5*x*y - 6*y^2,
6*x^2 + 7*x*y - y^2]
sage: Q.cycle(proper=True)
[x^2 + 7*x*y - 6*y^2,
-6*x^2 + 5*x*y + 2*y^2,
2*x^2 + 7*x*y - 3*y^2,
-3*x^2 + 5*x*y + 4*y^2,
4*x^2 + 3*x*y - 4*y^2,
-4*x^2 + 5*x*y + 3*y^2,
3*x^2 + 7*x*y - 2*y^2,
-2*x^2 + 5*x*y + 6*y^2,
6*x^2 + 7*x*y - y^2,
-x^2 + 7*x*y + 6*y^2,
6*x^2 + 5*x*y - 2*y^2,
-2*x^2 + 7*x*y + 3*y^2,
3*x^2 + 5*x*y - 4*y^2,
-4*x^2 + 3*x*y + 4*y^2,
4*x^2 + 5*x*y - 3*y^2,
-3*x^2 + 7*x*y + 2*y^2,
2*x^2 + 5*x*y - 6*y^2,
-6*x^2 + 7*x*y + y^2]
This is Example 6.10.7 of [BUVO2007]_::
sage: Q = BinaryQF(1, 8, -3)
sage: Q.cycle()
[x^2 + 8*x*y - 3*y^2,
3*x^2 + 4*x*y - 5*y^2,
5*x^2 + 6*x*y - 2*y^2,
2*x^2 + 6*x*y - 5*y^2,
5*x^2 + 4*x*y - 3*y^2,
3*x^2 + 8*x*y - y^2]
sage: Q.cycle(proper=True)
[x^2 + 8*x*y - 3*y^2,
-3*x^2 + 4*x*y + 5*y^2,
5*x^2 + 6*x*y - 2*y^2,
-2*x^2 + 6*x*y + 5*y^2,
5*x^2 + 4*x*y - 3*y^2,
-3*x^2 + 8*x*y + y^2]
sage: Q.cycle(proper=True) # should be the same as the previous one
[x^2 + 8*x*y - 3*y^2,
-3*x^2 + 4*x*y + 5*y^2,
5*x^2 + 6*x*y - 2*y^2,
-2*x^2 + 6*x*y + 5*y^2,
5*x^2 + 4*x*y - 3*y^2,
-3*x^2 + 8*x*y + y^2]
Try an example where a is negative::
sage: Q = BinaryQF(-1, 8, 3)
sage: Q.cycle(proper=True)
[-x^2 + 8*x*y + 3*y^2,
3*x^2 + 4*x*y - 5*y^2,
-5*x^2 + 6*x*y + 2*y^2,
2*x^2 + 6*x*y - 5*y^2,
-5*x^2 + 4*x*y + 3*y^2,
3*x^2 + 8*x*y - y^2]
"""
if not (self.is_indef() and self.is_reduced()):
raise ValueError("%s must be indefinite and reduced" % self)
if self.discriminant().is_square():
# Buchmann/Vollmer assume the discriminant to be non-square
raise NotImplementedError('computation of cycles is only '
'implemented for non-square discriminants')
if proper:
# Prop 6.10.5 in Buchmann Vollmer
C = list(self.cycle(proper=False)) # make a copy so we can modify it
if len(C) % 2:
C += C
for i in range(len(C)//2):
C[2*i+1] = C[2*i+1]._Tau()
return C
if not hasattr(self, '_cycle_list'):
C = [self]
Q1 = self._RhoTau()
while not self == Q1:
C.append(Q1)
Q1 = Q1._RhoTau()
self._cycle_list = C
return self._cycle_list
def is_positive_definite(self):
"""
Return ``True`` if ``self`` is positive definite, i.e., has
negative discriminant with `a > 0`.
EXAMPLES::
sage: Q = BinaryQF(195751, 37615, 1807)
sage: Q.is_positive_definite()
True
sage: Q = BinaryQF(195751, 1212121, -1876411)
sage: Q.is_positive_definite()
False
"""
return self.discriminant() < 0 and self._a > 0
is_posdef = is_positive_definite
def is_negative_definite(self):
"""
Return ``True`` if ``self`` is negative definite, i.e., has
negative discriminant with `a < 0`.
EXAMPLES::
sage: Q = BinaryQF(-1, 3, -5)
sage: Q.is_positive_definite()
False
sage: Q.is_negative_definite()
True
"""
return self.discriminant() < 0 and self._a < 0
is_negdef = is_negative_definite
def is_indefinite(self):
"""
Return if ``self`` is indefinite, i.e., has positive discriminant.
EXAMPLES::
sage: Q = BinaryQF(1, 3, -5)
sage: Q.is_indef()
True
"""
return self.discriminant() > 0
is_indef = is_indefinite
def is_singular(self):
"""
Return if ``self`` is singular, i.e., has zero discriminant.
EXAMPLES::
sage: Q = BinaryQF(1, 3, -5)
sage: Q.is_singular()
False
sage: Q = BinaryQF(1, 2, 1)
sage: Q.is_singular()
True
"""
return self.discriminant().is_zero()
def is_nonsingular(self):
"""
Return if this form is nonsingular, i.e., has non-zero discriminant.
EXAMPLES::
sage: Q = BinaryQF(1, 3, -5)
sage: Q.is_nonsingular()
True
sage: Q = BinaryQF(1, 2, 1)
sage: Q.is_nonsingular()
False
"""
return not self.discriminant().is_zero()
def is_equivalent(self, other, proper=True):
"""
Return if ``self`` is equivalent to ``other``.
INPUT:
- ``proper`` -- bool (default: ``True``); if ``True`` use proper
equivalence
- ``other`` -- a binary quadratic form
EXAMPLES::
sage: Q3 = BinaryQF(4, 4, 15)
sage: Q2 = BinaryQF(4, -4, 15)
sage: Q2.is_equivalent(Q3)
True
sage: a = BinaryQF([33, 11, 5])
sage: b = a.reduced_form(); b
5*x^2 - x*y + 27*y^2
sage: a.is_equivalent(b)
True
sage: a.is_equivalent(BinaryQF((3, 4, 5)))
False
Some indefinite examples::
sage: Q1 = BinaryQF(9, 8, -7)
sage: Q2 = BinaryQF(9, -8, -7)
sage: Q1.is_equivalent(Q2, proper=True)
False
sage: Q1.is_equivalent(Q2, proper=False)
True
TESTS:
We check that :trac:`25888` is fixed::
sage: Q1 = BinaryQF(3, 4, -2)
sage: Q2 = BinaryQF(-2, 4, 3)
sage: Q1.is_equivalent(Q2) == Q2.is_equivalent(Q1)
True
sage: Q1.is_equivalent(Q2, proper=False) == Q2.is_equivalent(Q1, proper=False)
True
sage: Q1.is_equivalent(Q2, proper=True)
True
We check that the first part of :trac:`29028` is fixed::
sage: Q = BinaryQF(0, 2, 0)
sage: Q.discriminant()
4
sage: Q.is_equivalent(Q, proper=True)
True
sage: Q.is_equivalent(Q, proper=False)
True
A test for rational forms::
sage: Q1 = BinaryQF(0, 4, 2)
sage: Q2 = BinaryQF(2, 4, 0)
sage: Q1.is_equivalent(Q2, proper=False)
True
Test another part of :trac:`28989`::
sage: Q1, Q2 = BinaryQF(1, 1, -1), BinaryQF(-1, 1, 1)
sage: Q1.is_equivalent(Q2, proper=True)
True
"""
if type(other) != type(self):
raise TypeError("%s is not a BinaryQF" % other)
if self.discriminant() != other.discriminant():
return False
if self.is_indef():
# First, reduce self and other
selfred = self.reduced_form()
otherred = other.reduced_form()
if self.discriminant().is_square():
# make sure we terminate in a form
# with c = 0
while selfred[2] != 0:
selfred = selfred._Rho()
while otherred[2] != 0:
otherred = otherred._Rho()
b = selfred._b
a = selfred._a
ao = otherred._a
assert otherred._b == b
# p. 359 of Conway-Sloane [CS1999]_
# but `2b` in their notation is `b` in our notation
is_properly_equiv = ((a-ao) % b == 0)
if proper:
return is_properly_equiv
else:
g = gcd(a, b)
return is_properly_equiv or ((gcd(ao,b) == g) and ((a*ao - g**2) % (b*g) == 0))
proper_cycle = otherred.cycle(proper=True)
is_prop = selfred in proper_cycle
if proper or is_prop:
return is_prop
# note that our definition of improper equivalence
# differs from that of Buchmann and Vollmer
# their action is det f * q(f(x, y))
# ours is q(f(x, y))
# an improper equivalence in our convention
selfred = BinaryQF(selfred._c, selfred._b, selfred._a)
assert selfred.is_reduced()
return selfred in proper_cycle
# Else we're dealing with definite forms.
if self.is_posdef() and not other.is_posdef():
return False
if self.is_negdef() and not other.is_negdef():
return False
Q1 = self.reduced_form()
Q2 = other.reduced_form()
if Q1 == Q2:
return True
if not proper:
Q1e = BinaryQF(self._c, self._b, self._a).reduced_form()
return Q1e == Q2
return False
@cached_method
def is_reduced(self):
r"""
Return if ``self`` is reduced.
Let `f = a x^2 + b xy + c y^2` be a binary quadratic form of
discriminant `D`.
- If `f` is positive definite (`D < 0` and `a > 0`), then `f`
is reduced if and only if `|b|\leq a \leq c`, and `b\geq 0`
if either `a = b` or `a = c`.
- If `f` is negative definite (`D < 0` and `a < 0`), then `f`
is reduced if and only if the positive definite form with
coefficients `(-a, b, -c)` is reduced.
- If `f` is indefinite (`D > 0`), then `f` is reduced if and
only if `|\sqrt{D} - 2|a|| < b < \sqrt{D}`
or `a = 0` and `-b < 2c \leq b`
or `c = 0` and `-b < 2a \leq b`
EXAMPLES::
sage: Q = BinaryQF([1, 2, 3])
sage: Q.is_reduced()
False
sage: Q = BinaryQF([2, 1, 3])
sage: Q.is_reduced()
True
sage: Q = BinaryQF([1, -1, 1])
sage: Q.is_reduced()
False
sage: Q = BinaryQF([1, 1, 1])
sage: Q.is_reduced()
True
Examples using indefinite forms::
sage: f = BinaryQF(-1, 2, 2)
sage: f.is_reduced()
True
sage: BinaryQF(1, 9, 4).is_reduced()
False
sage: BinaryQF(1, 5, -1).is_reduced()
True
"""
D = self.discriminant()
a = self._a
b = self._b
c = self._c
if D < 0 and a > 0:
return ((-a < b <= a < c)
or (ZZ(0) <= b <= a == c))
elif D < 0 and self._a < 0:
return ((a < b <= -a < -c)
or (ZZ(0) <= b <= -a == -c))
else:
d = D.sqrt(prec=53)
return (((d - 2*a.abs()).abs() < b < d)
or (0 == a and -b < 2*c <= b)
or (0 == c and -b < 2*a <= b))
def complex_point(self):
r"""
Return the point in the complex upper half-plane associated to ``self``.
This form, `ax^2 + b xy + cy^2`, must be definite with
negative discriminant `b^2 - 4 a c < 0`.
OUTPUT:
- the unique complex root of `a x^2 + b x + c` with positive
imaginary part
EXAMPLES::
sage: Q = BinaryQF([1, 0, 1])
sage: Q.complex_point()
1.00000000000000*I
"""
if self.discriminant() >= 0:
raise ValueError("only defined for negative discriminant")
Q1 = ZZ['x']([self._c, self._b, self._a])
return [z for z in Q1.complex_roots() if z.imag() > 0][0]
def matrix_action_left(self, M):
r"""
Return the binary quadratic form resulting from the left action
of the 2-by-2 matrix `M` on ``self``.
Here the action of the matrix `M = \begin{pmatrix} a & b \\ c & d
\end{pmatrix}` on the form `Q(x, y)` produces the form `Q(ax+cy,
bx+dy)`.
EXAMPLES::
sage: Q = BinaryQF([2, 1, 3]); Q
2*x^2 + x*y + 3*y^2
sage: M = matrix(ZZ, [[1, 2], [3, 5]])
sage: Q.matrix_action_left(M)
16*x^2 + 83*x*y + 108*y^2
"""
v, w = M.rows()
a1 = self(v)
c1 = self(w)
b1 = self(v + w) - a1 - c1
return BinaryQF([a1, b1, c1])
def matrix_action_right(self, M):
r"""
Return the binary quadratic form resulting from the right action
of the 2-by-2 matrix `M` on ``self``.
Here the action of the matrix `M = \begin{pmatrix} a & b \\ c & d
\end{pmatrix}` on the form `Q(x, y)` produces the form `Q(ax+by,
cx+dy)`.
EXAMPLES::
sage: Q = BinaryQF([2, 1, 3]); Q
2*x^2 + x*y + 3*y^2
sage: M = matrix(ZZ, [[1, 2], [3, 5]])
sage: Q.matrix_action_right(M)
32*x^2 + 109*x*y + 93*y^2
"""
v, w = M.columns()
a1 = self(v)
c1 = self(w)
b1 = self(v + w) - a1 - c1
return BinaryQF([a1, b1, c1])
def small_prime_value(self, Bmax=1000):
r"""
Returns a prime represented by this (primitive positive definite) binary form.
INPUT:
- ``Bmax`` -- a positive bound on the representing integers.
OUTPUT:
A prime number represented by the form.
.. NOTE::
This is a very elementary implementation which just substitutes
values until a prime is found.
EXAMPLES::
sage: [Q.small_prime_value() for Q in BinaryQF_reduced_representatives(-23, primitive_only=True)]
[23, 2, 2]
sage: [Q.small_prime_value() for Q in BinaryQF_reduced_representatives(-47, primitive_only=True)]
[47, 2, 2, 3, 3]
"""
from sage.sets.all import Set
from sage.arith.srange import xsrange
B = 10
while True:
llist = list(Set([self(x, y) for x in xsrange(-B, B) for y in xsrange(B)]))
llist = sorted([l for l in llist if l.is_prime()])
if llist:
return llist[0]
if B >= Bmax:
raise ValueError("Unable to find a prime value of %s" % self)
B += 10
def solve_integer(self, n):
r"""
Solve `Q(x, y) = n` in integers `x` and `y` where `Q` is this
quadratic form.
INPUT:
- ``n`` -- a positive integer
OUTPUT:
A tuple `(x, y)` of integers satisfying `Q(x, y) = n`, or ``None``
if no solution exists.
ALGORITHM: :pari:`qfbsolve`
EXAMPLES::
sage: Q = BinaryQF([1, 0, 419])
sage: Q.solve_integer(773187972)
(4919, 1337)
::
sage: Qs = BinaryQF_reduced_representatives(-23, primitive_only=True)
sage: Qs
[x^2 + x*y + 6*y^2, 2*x^2 - x*y + 3*y^2, 2*x^2 + x*y + 3*y^2]
sage: [Q.solve_integer(3) for Q in Qs]
[None, (0, -1), (0, -1)]
sage: [Q.solve_integer(5) for Q in Qs]
[None, None, None]
sage: [Q.solve_integer(6) for Q in Qs]
[(1, -1), (1, -1), (-1, -1)]
TESTS:
The returned solutions are correct (random inputs)::
sage: Q = BinaryQF([randrange(-10^3, 10^3) for _ in 'abc'])
sage: n = randrange(-10^9, 10^9)
sage: xy = Q.solve_integer(n)
sage: xy is None or Q(*xy) == 0
True
"""
n = ZZ(n)
if self.is_negative_definite(): # not supported by PARI
return (-self).solve_integer(-n)
flag = 2 # single solution, possibly imprimitive
sol = self.__pari__().qfbsolve(n, flag)
return tuple(map(ZZ, sol)) if sol else None
def BinaryQF_reduced_representatives(D, primitive_only=False, proper=True):
r"""
Return representatives for the classes of binary quadratic forms
of discriminant `D`.
INPUT:
- ``D`` -- (integer) a discriminant
- ``primitive_only`` -- (boolean; default: ``True``): if ``True``, only
return primitive forms.
- ``proper`` -- (boolean; default: ``True``)
OUTPUT:
(list) A lexicographically-ordered list of inequivalent reduced
representatives for the (im)proper equivalence classes of binary quadratic
forms of discriminant `D`. If ``primitive_only`` is ``True`` then
imprimitive forms (which only exist when `D` is not fundamental) are
omitted; otherwise they are included.
EXAMPLES::
sage: BinaryQF_reduced_representatives(-4)
[x^2 + y^2]
sage: BinaryQF_reduced_representatives(-163)
[x^2 + x*y + 41*y^2]
sage: BinaryQF_reduced_representatives(-12)
[x^2 + 3*y^2, 2*x^2 + 2*x*y + 2*y^2]
sage: BinaryQF_reduced_representatives(-16)
[x^2 + 4*y^2, 2*x^2 + 2*y^2]
sage: BinaryQF_reduced_representatives(-63)
[x^2 + x*y + 16*y^2, 2*x^2 - x*y + 8*y^2, 2*x^2 + x*y + 8*y^2, 3*x^2 + 3*x*y + 6*y^2, 4*x^2 + x*y + 4*y^2]
The number of inequivalent reduced binary forms with a fixed negative
fundamental discriminant D is the class number of the quadratic field
`\QQ(\sqrt{D})`::
sage: len(BinaryQF_reduced_representatives(-13*4))
2
sage: QuadraticField(-13*4, 'a').class_number()
2
sage: p = next_prime(2^20); p
1048583
sage: len(BinaryQF_reduced_representatives(-p))
689
sage: QuadraticField(-p, 'a').class_number()
689
sage: BinaryQF_reduced_representatives(-23*9)
[x^2 + x*y + 52*y^2,
2*x^2 - x*y + 26*y^2,
2*x^2 + x*y + 26*y^2,
3*x^2 + 3*x*y + 18*y^2,
4*x^2 - x*y + 13*y^2,
4*x^2 + x*y + 13*y^2,
6*x^2 - 3*x*y + 9*y^2,
6*x^2 + 3*x*y + 9*y^2,
8*x^2 + 7*x*y + 8*y^2]
sage: BinaryQF_reduced_representatives(-23*9, primitive_only=True)
[x^2 + x*y + 52*y^2,
2*x^2 - x*y + 26*y^2,
2*x^2 + x*y + 26*y^2,
4*x^2 - x*y + 13*y^2,
4*x^2 + x*y + 13*y^2,
8*x^2 + 7*x*y + 8*y^2]
TESTS::
sage: BinaryQF_reduced_representatives(73)
[4*x^2 + 3*x*y - 4*y^2]
sage: BinaryQF_reduced_representatives(76, primitive_only=True)
[-3*x^2 + 4*x*y + 5*y^2,
3*x^2 + 4*x*y - 5*y^2]
sage: BinaryQF_reduced_representatives(136)
[-5*x^2 + 4*x*y + 6*y^2,
-2*x^2 + 8*x*y + 9*y^2,
2*x^2 + 8*x*y - 9*y^2,
5*x^2 + 4*x*y - 6*y^2]
sage: BinaryQF_reduced_representatives(136, proper=False)
[-2*x^2 + 8*x*y + 9*y^2, 2*x^2 + 8*x*y - 9*y^2, 5*x^2 + 4*x*y - 6*y^2]
Check that the primitive_only keyword does something::
sage: BinaryQF_reduced_representatives(148, proper=False, primitive_only=False)
[x^2 + 12*x*y - y^2, 4*x^2 + 6*x*y - 7*y^2, 6*x^2 + 2*x*y - 6*y^2]
sage: BinaryQF_reduced_representatives(148, proper=False, primitive_only=True)
[x^2 + 12*x*y - y^2, 4*x^2 + 6*x*y - 7*y^2]
sage: BinaryQF_reduced_representatives(148, proper=True, primitive_only=True)
[-7*x^2 + 6*x*y + 4*y^2, x^2 + 12*x*y - y^2, 4*x^2 + 6*x*y - 7*y^2]
sage: BinaryQF_reduced_representatives(148, proper=True, primitive_only=False)
[-7*x^2 + 6*x*y + 4*y^2,
x^2 + 12*x*y - y^2,
4*x^2 + 6*x*y - 7*y^2,
6*x^2 + 2*x*y - 6*y^2]
Test another part of :trac:`29028`::
sage: BinaryQF_reduced_representatives(10^2, proper=False, primitive_only=False)
[-4*x^2 + 10*x*y,
-3*x^2 + 10*x*y,
-2*x^2 + 10*x*y,
-x^2 + 10*x*y,
10*x*y,
x^2 + 10*x*y,
2*x^2 + 10*x*y,
5*x^2 + 10*x*y]
sage: BinaryQF_reduced_representatives(10^2, proper=False, primitive_only=True)
[-3*x^2 + 10*x*y, -x^2 + 10*x*y, x^2 + 10*x*y]
sage: BinaryQF_reduced_representatives(10^2, proper=True, primitive_only=True)
[-3*x^2 + 10*x*y, -x^2 + 10*x*y, x^2 + 10*x*y, 3*x^2 + 10*x*y]
sage: BinaryQF_reduced_representatives(10^2, proper=True, primitive_only=False)
[-4*x^2 + 10*x*y,
-3*x^2 + 10*x*y,
-2*x^2 + 10*x*y,
-x^2 + 10*x*y,
10*x*y,
x^2 + 10*x*y,
2*x^2 + 10*x*y,
3*x^2 + 10*x*y,
4*x^2 + 10*x*y,
5*x^2 + 10*x*y]
"""
D = ZZ(D)
# For a fundamental discriminant all forms are primitive so we need not check:
if primitive_only:
primitive_only = not is_fundamental_discriminant(D)
form_list = []
from sage.arith.srange import xsrange
D4 = D % 4
if D4 == 2 or D4 == 3:
raise ValueError("%s is not a discriminant" % D)
if D > 0: # Indefinite
if D.is_square():
b = D.sqrt()
c = ZZ(0)
# -b/2 < a <= b/2
for a in xsrange((-b/2).floor() + 1, (b/2).floor() + 1):
if (not primitive_only) or (gcd([a,b,c]) == 1):
form_list.append(BinaryQF(a, b, c))
# We follow the description of Buchmann/Vollmer 6.7.1. They
# enumerate all reduced forms. We only want representatives.
else:
sqrt_d = D.sqrt(prec=53)
for b in xsrange(1, sqrt_d.floor() + 1):
if (D - b) % 2:
continue
A = (D - b**2) / 4
Low_a = ((sqrt_d - b) / 2).ceil()
High_a = (A.sqrt(prec=53)).floor()
for a in xsrange(Low_a, High_a + 1):
if a == 0:
continue
c = -A/a
if c in ZZ:
if (not primitive_only) or gcd([a, b, c]) == 1:
Q = BinaryQF(a, b, c)
Q1 = BinaryQF(-a, b, -c)
form_list.append(Q)
form_list.append(Q1)
if a.abs() != c.abs():
Q = BinaryQF(c, b, a)
Q1 = BinaryQF(-c, b, -a)
form_list.append(Q)
form_list.append(Q1)
else: # Definite
# Only iterate over positive a and over b of the same
# parity as D such that 4a^2 + D <= b^2 <= a^2
for a in xsrange(1, 1+((-D)//3).isqrt()):
a4 = 4*a
s = D + a*a4
w = 1+(s-1).isqrt() if s > 0 else 0
if w%2 != D%2:
w += 1
for b in xsrange(w, a+1, 2):
t = b*b-D
if t % a4 == 0:
c = t // a4
if (not primitive_only) or gcd([a, b, c]) == 1:
if b>0 and a>b and c>a:
form_list.append(BinaryQF([a, -b, c]))
form_list.append(BinaryQF([a, b, c]))
if not proper or D > 0:
# TODO:
# instead of filtering, enumerate only improper classes to start with
# filter for equivalence classes
form_list_new = []
for q in form_list:
if not any(q.is_equivalent(q1, proper=proper) for q1 in form_list_new):
form_list_new.append(q)
form_list = form_list_new
form_list.sort()
return form_list
| 31.274956
| 142
| 0.467839
|
16363f5681e4c40f69c01d1e8a911b7b087c1e55
| 9,438
|
py
|
Python
|
conf/base.py
|
code-review-doctor/lite-frontend-1
|
cb3b885bb389ea33ef003c916bea7b03a36d86bb
|
[
"MIT"
] | null | null | null |
conf/base.py
|
code-review-doctor/lite-frontend-1
|
cb3b885bb389ea33ef003c916bea7b03a36d86bb
|
[
"MIT"
] | null | null | null |
conf/base.py
|
code-review-doctor/lite-frontend-1
|
cb3b885bb389ea33ef003c916bea7b03a36d86bb
|
[
"MIT"
] | null | null | null |
import os
from environ import Env
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from django_log_formatter_ecs import ECSFormatter
from django.urls import reverse_lazy
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ENV_FILE = os.path.join(BASE_DIR, ".env")
if os.path.exists(ENV_FILE):
Env.read_env(ENV_FILE)
env = Env()
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("DJANGO_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", False)
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", default=[])
# django-allow-cidr
ALLOWED_CIDR_NETS = ["10.0.0.0/8"]
WSGI_APPLICATION = "conf.wsgi.application"
INSTALLED_APPS = [
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.contenttypes",
"django.contrib.auth",
"django.contrib.humanize",
"svg",
"lite_forms",
"health_check",
"health_check.cache",
"health_check.storage",
"core.api",
"core.forms",
"crispy_forms",
"crispy_forms_gds",
"core.feedback",
"formtools",
"core.cookies",
]
MIDDLEWARE = [
"allow_cidr.middleware.AllowCIDRMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.gzip.GZipMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"core.auth.middleware.AuthbrokerClientMiddleware",
"core.middleware.UploadFailedMiddleware",
"core.middleware.RequestsSessionMiddleware",
"core.middleware.NoCacheMiddleware",
"core.middleware.ValidateReturnToMiddleware",
"core.middleware.XRobotsTagMiddleware",
]
if not DEBUG:
MIDDLEWARE += ["core.middleware.AuthBrokerTokenIntrospectionMiddleware"]
FEATURE_CSP_MIDDLEWARE_ENABLED = env.bool("FEATURE_CSP_MIDDLEWARE_ENABLED", True)
if FEATURE_CSP_MIDDLEWARE_ENABLED:
MIDDLEWARE += [
"csp.middleware.CSPMiddleware",
]
SESSION_COOKIE_SECURE = env.bool("SESSION_COOKIE_SECURE", True)
SESSION_COOKIE_NAME = env.str("SESSION_COOKIE_NAME", default="exporter")
TOKEN_SESSION_KEY = env.str("TOKEN_SESSION_KEY")
# messages
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
CRISPY_ALLOWED_TEMPLATE_PACKS = ["gds"]
CRISPY_TEMPLATE_PACK = "gds"
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-gb"
TIME_ZONE = "Europe/London"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/assets/"
# Authbroker config
AUTHBROKER_URL = env.str("AUTHBROKER_URL")
AUTHBROKER_CLIENT_ID = env.str("AUTHBROKER_CLIENT_ID")
AUTHBROKER_CLIENT_SECRET = env.str("AUTHBROKER_CLIENT_SECRET")
HAWK_AUTHENTICATION_ENABLED = env.bool("HAWK_AUTHENTICATION_ENABLED", False)
HAWK_RECEIVER_NONCE_EXPIRY_SECONDS = 60
LOGIN_URL = reverse_lazy("auth:login")
DATA_DIR = os.path.dirname(BASE_DIR)
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
# Cache static files
STATICFILES_STORAGE = env.str("STATICFILES_STORAGE", "whitenoise.storage.CompressedManifestStaticFilesStorage")
# File Upload
# https://github.com/uktrade/s3chunkuploader
S3_DOCUMENT_ROOT_DIRECTORY = ""
S3_APPEND_DATETIME_ON_UPLOAD = True
S3_PREFIX_QUERY_PARAM_NAME = ""
S3_DOWNLOAD_LINK_EXPIRY_SECONDS = 180
STREAMING_CHUNK_SIZE = 8192
S3_MIN_PART_SIZE = 5 * 1024 * 1024
MAX_UPLOAD_SIZE = 50 * 1024 * 1024
FILE_UPLOAD_HANDLERS = env.list("FILE_UPLOAD_HANDLERS", default=["core.file_handler.SafeS3FileUploadHandler"])
ACCEPTED_FILE_UPLOAD_MIME_TYPES = env.list(
"ACCEPTED_FILE_UPLOAD_MIME_TYPES",
default=(
# Default file-types supported by LITE are pdf, doc, docx,
# rtf, jpeg, png and tiff
"application/pdf",
"application/msword",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"application/rtf",
"application/xml",
"text/xml",
"text/plain",
"text/csv",
"image/jpeg",
"image/png",
"image/tiff",
),
)
# AWS
VCAP_SERVICES = env.json("VCAP_SERVICES", {})
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY")
AWS_REGION = env.str("AWS_REGION")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME")
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {"format": "{asctime} {levelname} {message}", "style": "{"},
"ecs_formatter": {"()": ECSFormatter},
},
"handlers": {
"stdout": {"class": "logging.StreamHandler", "formatter": "simple"},
"ecs": {"class": "logging.StreamHandler", "formatter": "ecs_formatter"},
},
"root": {"handlers": ["stdout", "ecs"], "level": env.str("LOG_LEVEL", "info").upper()},
}
additional_logger_config = env.json("ADDITIONAL_LOGGER_CONFIG", default=None)
if additional_logger_config:
LOGGING["loggers"] = additional_logger_config
# Enable security features in hosted environments
SECURE_HSTS_ENABLED = env.bool("SECURE_HSTS_ENABLED", False)
SECURE_HSTS_SECONDS = 60 * 60 * 24 * 365 if SECURE_HSTS_ENABLED else None # 1 year
SECURE_BROWSER_XSS_FILTER = not DEBUG
SECURE_CONTENT_TYPE_NOSNIFF = not DEBUG
SESSION_COOKIE_SECURE = not DEBUG
SESSION_EXPIRE_SECONDS = env.int("SESSION_EXPIRE_SECONDS", default=60 * 60)
CSRF_COOKIE_SECURE = not DEBUG
CSRF_COOKIE_HTTPONLY = not DEBUG
X_FRAME_OPTIONS = "SAMEORIGIN"
# Content Security Policy
CSP_DEFAULT_SRC = env.tuple("CSP_DEFAULT_SRC", default=("'self'",))
CSP_STYLE_SRC = env.tuple("CSP_STYLE_SRC", default=("'self'",))
CSP_SCRIPT_SRC = env.tuple("CSP_SCRIPT_SRC", default=("'self'",))
CSP_FONT_SRC = env.tuple("CSP_FONT_SRC", default=("'self'",))
CSP_REPORT_ONLY = env.bool("CSP_REPORT_ONLY", False)
CSP_INCLUDE_NONCE_IN = env.tuple("CSP_INCLUDE_NONCE_IN", default=("script-src",))
if DEBUG:
import pkg_resources
try:
pkg_resources.get_distribution("django_extensions")
except pkg_resources.DistributionNotFound:
pass
else:
INSTALLED_APPS.append("django_extensions")
try:
pkg_resources.get_distribution("django_pdb")
except pkg_resources.DistributionNotFound:
pass
else:
INSTALLED_APPS.append("django_pdb")
POST_MORTEM = False
MIDDLEWARE.append("django_pdb.middleware.PdbMiddleware")
# Sentry
if env.str("SENTRY_DSN", ""):
sentry_sdk.init(
dsn=env.str("SENTRY_DSN"),
environment=env.str("SENTRY_ENVIRONMENT"),
integrations=[DjangoIntegration()],
send_default_pii=True,
)
LITE_API_URL = env.str("LITE_API_URL")
PERMISSIONS_FINDER_URL = env.str("PERMISSIONS_FINDER_URL")
if env.str("DIRECTORY_SSO_API_CLIENT_BASE_URL", ""):
DIRECTORY_SSO_API_CLIENT_API_KEY = env("DIRECTORY_SSO_API_CLIENT_API_KEY")
DIRECTORY_SSO_API_CLIENT_BASE_URL = env("DIRECTORY_SSO_API_CLIENT_BASE_URL")
DIRECTORY_SSO_API_CLIENT_DEFAULT_TIMEOUT = 30
DIRECTORY_SSO_API_CLIENT_SENDER_ID = "lite"
FEATURE_DEBUG_TOOLBAR_ON = env.bool("FEATURE_DEBUG_TOOLBAR_ON", False)
if FEATURE_DEBUG_TOOLBAR_ON:
INSTALLED_APPS += ["debug_toolbar", "requests_panel"]
DEBUG_TOOLBAR_PANELS = [
"requests_panel.panel.RequestsDebugPanel",
"debug_toolbar.panels.request.RequestPanel",
"debug_toolbar.panels.timer.TimerPanel",
"debug_toolbar.panels.templates.TemplatesPanel",
"debug_toolbar.panels.staticfiles.StaticFilesPanel",
"debug_toolbar.panels.cache.CachePanel",
"debug_toolbar.panels.logging.LoggingPanel",
"debug_toolbar.panels.profiling.ProfilingPanel",
"debug_toolbar.panels.redirects.RedirectsPanel",
]
INTERNAL_IPS = [
"127.0.0.1",
]
index = MIDDLEWARE.index("django.middleware.gzip.GZipMiddleware")
MIDDLEWARE.insert(index + 1, "debug_toolbar.middleware.DebugToolbarMiddleware")
AUTHBROKER_TOKEN_INTROSPECTION_TTL = env.int("AUTHBROKER_TOKEN_INTROSPECTION_TTL", default=60 * 5)
# Gov.uk Notify
NOTIFY_KEY = env.str("NOTIFY_KEY", default="notify-test")
NOTIFY_FEEDBACK_TEMPLATE_ID = env.str("NOTIFY_FEEDBACK_TEMPLATE_ID")
NOTIFY_FEEDBACK_EMAIL = env.str("NOTIFY_FEEDBACK_EMAIL")
# GA/GTM KEY
GTM_ID = env.str("GTM_ID", default="")
| 31.46
| 111
| 0.735113
|
85c81fcd1d7bfe4e82af08eef9f6c10e871a2198
| 544
|
py
|
Python
|
jetson_nano/request_identify.py
|
hixio-mh/BatBot
|
b83bd2a9348baf708a8d009a4872d27f40177b61
|
[
"MIT"
] | 32
|
2019-12-02T08:26:31.000Z
|
2022-01-13T06:56:19.000Z
|
jetson_nano/request_identify.py
|
resslerruntime/BatBot
|
b83bd2a9348baf708a8d009a4872d27f40177b61
|
[
"MIT"
] | 15
|
2020-01-28T22:17:38.000Z
|
2022-03-12T00:02:01.000Z
|
jetson_nano/request_identify.py
|
resslerruntime/BatBot
|
b83bd2a9348baf708a8d009a4872d27f40177b61
|
[
"MIT"
] | 17
|
2019-11-26T14:46:05.000Z
|
2021-07-05T12:46:58.000Z
|
#!/usr/bin/python3
import socket
import os
import sys
try:
image_name = sys.argv[1]
except IndexError:
image_name = '/tmp/capture.jpg'
def client(message):
host = socket.gethostbyname("localhost") # get local machine name
port = 9310 # Make sure it's within the > 1024 $$ <65535 range
s = socket.socket()
s.connect((host, port))
s.send(message.encode('utf-8'))
data = s.recv(1024).decode('utf-8')
print('I see: ' + data)
s.close()
if __name__ == '__main__':
client(image_name)
| 18.758621
| 72
| 0.621324
|
2eca262cc586ce149145c73f24e884daae3ceecd
| 29,439
|
py
|
Python
|
mlps/core/data/cnvrtr/functions/UserAgentInfo.py
|
seculayer/automl-mlps
|
80569909ec1c25db1ceafbb85b27d069d1a66aa3
|
[
"Apache-2.0"
] | null | null | null |
mlps/core/data/cnvrtr/functions/UserAgentInfo.py
|
seculayer/automl-mlps
|
80569909ec1c25db1ceafbb85b27d069d1a66aa3
|
[
"Apache-2.0"
] | 2
|
2022-03-31T07:39:59.000Z
|
2022-03-31T07:40:18.000Z
|
mlps/core/data/cnvrtr/functions/UserAgentInfo.py
|
seculayer/AutoAPE-mlps
|
80569909ec1c25db1ceafbb85b27d069d1a66aa3
|
[
"Apache-2.0"
] | 1
|
2021-11-03T09:09:07.000Z
|
2021-11-03T09:09:07.000Z
|
# -*- coding: utf-8 -*-
# Author : Manki Baek
# e-mail : bmg8551@seculayer.co.kr
# Powered by Seculayer © 2017 AI-TF Team
from mlps.core.data.cnvrtr.object.ua import UserAgentParser
from mlps.core.data.cnvrtr.ConvertAbstract import ConvertAbstract
class UserAgentInfo(ConvertAbstract):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.num_feat = 1
def apply(self, data, reuse=False):
try:
parsed_dict = UserAgentParser.Parse(data)
os_family = str(parsed_dict["os"]["family"])
user_agent_family = str(parsed_dict["user_agent"]["family"])
device_family = str(parsed_dict["device"]["family"])
device_family = device_family.split(" ")[0]
device_family = device_family.split("-")[0]
# device_brand = str(parsed_dict["device"]["brand"])
result = [os_family + "|" + user_agent_family + "|" + device_family]# + "|" + device_brand]
return result
except Exception as e:
# self.LOGGER.error(e)
return ["Other|Other|Other"] * self.max_len
if __name__ == "__main__":
# file_name = "/home/seculayer/mkbaek/src/apeflow/tools/test/data/list_of_user_agent.txt" # 4769 row
#
# data_list = list()
# with open(file_name, "r") as f:
# for line in f:
# data_list.append(line[:-1])
data_list = [
"vb wininet",
"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Mozilla/5.0 (Linux; U; Android 2.3.5; es-es; SAMSUNG GT-I9001/I9001BUKP2 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Opera/9.80 (Windows NT 6.2; Win64; x64) Presto/2.12 Version/12.16",
"Mozilla/5.0 (iPhone; CPU iPhone OS 613 like Mac OS X) AppleWebKit/536.26 (KHTML like Gecko) Version/6.0 Mobile/10B329 Safari/8536.25",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.11 (KHTML like Gecko) Chrome/23.0.1271.95 Safari/537.11",
"Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.143 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:32.0) Gecko/20100101 Firefox/32.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/31.0.1650.63 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/35.0.1916.153 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.120 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML like Gecko) Chrome/23.0.1271.95 Safari/537.11",
"Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.31 (KHTML like Gecko) Chrome/26.0.1410.64 Safari/537.31",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.125 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.143 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/30.0.1599.101 Safari/537.36",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/27.0.1453.110 Safari/537.36",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.103 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.120 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:26.0) Gecko/20100101 Firefox/26.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/39.0.2171.95 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.11 (KHTML like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/31.0.1650.63 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:32.0) Gecko/20100101 Firefox/32.0",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/31.0.1650.63 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.143 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:27.0) Gecko/20100101 Firefox/27.0",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; WOW64; Trident/6.0)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/33.0.1750.154 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/38.0.2125.111 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:25.0) Gecko/20100101 Firefox/25.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.103 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/35.0.1916.114 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 1094) AppleWebKit/537.77.4 (KHTML like Gecko) Version/7.0.5 Safari/537.77.4",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.120 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/35.0.1916.153 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 1094) AppleWebKit/537.78.2 (KHTML like Gecko) Version/7.0.6 Safari/537.78.2",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.143 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/31.0.1650.57 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.124 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.103 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0",
"Mozilla/5.0 (iPhone; CPU iPhone OS 712 like Mac OS X) AppleWebKit/537.51.2 (KHTML like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53",
"Mozilla/5.0 (iPad; CPU OS 613 like Mac OS X) AppleWebKit/536.26 (KHTML like Gecko) Version/6.0 Mobile/10B329 Safari/8536.25",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.31 (KHTML like Gecko) Chrome/26.0.1410.64 Safari/537.31",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.31 (KHTML like Gecko) Chrome/26.0.1410.64 Safari/537.31",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:32.0) Gecko/20100101 Firefox/32.0",
"Mozilla/5.0 (iPhone; CPU iPhone OS 613 like Mac OS X) AppleWebKit/536.26 (KHTML like Gecko) Version/6.0 Mobile/10B329 Safari/8536.25",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 1094) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.94 Safari/537.36",
"Mozilla/5.0 (X11; Ubuntu; Linux x8664; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0",
"Mozilla/5.0 (iPad; CPU OS 511 like Mac OS X) AppleWebKit/534.46 (KHTML like Gecko) Version/5.1 Mobile/9B206 Safari/7534.48.3",
"Mozilla/5.0 (X11; Ubuntu; Linux x8664; rv:32.0) Gecko/20100101 Firefox/32.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 1094) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.143 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.125 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 1094) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.120 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/32.0.1700.107 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0",
"Mozilla/5.0 (X11; Linux x8664) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.143 Safari/537.36",
"Mozilla/5.0 (X11; Linux x8664) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.94 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/28.0.1500.95 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; rv:26.0) Gecko/20100101 Firefox/26.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 1010) AppleWebKit/600.1.8 (KHTML like Gecko) Version/8.0 Safari/600.1.8",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML like Gecko) Chrome/23.0.1271.95 Safari/537.11",
"Mozilla/5.0 (Windows NT 5.1; rv:12.0) Gecko/20100101 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/34.0.1847.116 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/30.0.1599.101 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.11 (KHTML like Gecko) Chrome/23.0.1271.97 Safari/537.11",
"Mozilla/5.0 (Windows NT 5.1; rv:27.0) Gecko/20100101 Firefox/27.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/39.0.2171.95 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/27.0.1453.116 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20100101 Firefox/21.0",
"Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0",
"Mozilla/5.0 (Windows NT 5.1; rv:22.0) Gecko/20100101 Firefox/22.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0",
"Mozilla/5.0 (Windows NT 6.1; rv:32.0) Gecko/20100101 Firefox/32.0",
"Mozilla/5.0 (Windows NT 5.1; rv:19.0) Gecko/20100101 Firefox/19.0",
"Mozilla/5.0 (Windows NT 5.1; rv:25.0) Gecko/20100101 Firefox/25.0",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/30.0.1599.101 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/34.0.1847.131 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/28.0.1500.72 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/33.0.1750.154 Safari/537.36",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2)",
"Mozilla/5.0 (Windows NT 6.1; rv:30.0) Gecko/20100101 Firefox/30.0",
"Mozilla/5.0 (Windows NT 5.1; rv:24.0) Gecko/20100101 Firefox/24.0",
"Mozilla/5.0 (Windows NT 5.1; rv:23.0) Gecko/20100101 Firefox/23.0",
"Mozilla/5.0 (Windows NT 6.1; rv:27.0) Gecko/20100101 Firefox/27.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/35.0.1916.114 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/29.0.1547.66 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; rv:24.0) Gecko/20100101 Firefox/24.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:36.0) Gecko/20100101 Firefox/36.0",
"Mozilla/5.0 (Windows NT 5.1; rv:20.0) Gecko/20100101 Firefox/20.0",
"Mozilla/5.0 (Windows NT 6.1; rv:25.0) Gecko/20100101 Firefox/25.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/29.0.1547.76 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; rv:29.0) Gecko/20100101 Firefox/29.0",
"Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.4 (KHTML like Gecko) Chrome/22.0.1229.94 Safari/537.4",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)",
"Mozilla/5.0 (Windows NT 5.1; rv:30.0) Gecko/20100101 Firefox/30.0",
"Mozilla/5.0 (iPhone; CPU iPhone OS 704 like Mac OS X) AppleWebKit/537.51.1 (KHTML like Gecko) Version/7.0 Mobile/11B554a Safari/9537.53",
"Mozilla/5.0 (Windows NT 6.1; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML like Gecko) Chrome/23.0.1271.97 Safari/537.11",
"Mozilla/5.0 (Windows NT 5.1; rv:17.0) Gecko/20100101 Firefox/17.0",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/35.0.1916.153 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; rv:29.0) Gecko/20100101 Firefox/29.0",
"Mozilla/5.0 (iPhone; CPU iPhone OS 711 like Mac OS X) AppleWebKit/537.51.2 (KHTML like Gecko) Version/7.0 Mobile/11D201 Safari/9537.53",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/30.0.1599.69 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20100101 Firefox/21.0",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/29.0.1547.76 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.22 (KHTML like Gecko) Chrome/25.0.1364.172 Safari/537.22",
"Mozilla/5.0 (Windows NT 6.1; rv:28.0) Gecko/20100101 Firefox/28.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/29.0.1547.76 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; rv:22.0) Gecko/20100101 Firefox/22.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/31.0.1650.57 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/27.0.1453.116 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; rv:28.0) Gecko/20100101 Firefox/28.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/39.0.2171.71 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; rv:32.0) Gecko/20100101 Firefox/32.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/28.0.1500.72 Safari/537.36",
"Mozilla/5.0 (compatible; proximic; +http://www.proximic.com/info/spider.php)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/33.0.1750.146 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; rv:23.0) Gecko/20100101 Firefox/23.0"
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Opera/9.80 (Windows NT 6.2; Win64; x64) Presto/2.12 Version/12.16",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.11 (KHTML like Gecko) Chrome/23.0.1271.95 Safari/537.11",
"Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.143 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:32.0) Gecko/20100101 Firefox/32.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/31.0.1650.63 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/35.0.1916.153 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.120 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML like Gecko) Chrome/23.0.1271.95 Safari/537.11",
"Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.31 (KHTML like Gecko) Chrome/26.0.1410.64 Safari/537.31",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.125 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.143 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/30.0.1599.101 Safari/537.36",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/27.0.1453.110 Safari/537.36",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.103 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.120 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:26.0) Gecko/20100101 Firefox/26.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/39.0.2171.95 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.11 (KHTML like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/31.0.1650.63 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:32.0) Gecko/20100101 Firefox/32.0",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/31.0.1650.63 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.143 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:27.0) Gecko/20100101 Firefox/27.0",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; WOW64; Trident/6.0)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/33.0.1750.154 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/38.0.2125.111 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:25.0) Gecko/20100101 Firefox/25.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.103 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/35.0.1916.114 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 1094) AppleWebKit/537.77.4 (KHTML like Gecko) Version/7.0.5 Safari/537.77.4",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.120 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML like Gecko) Chrome/35.0.1916.153 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 1094) AppleWebKit/537.78.2 (KHTML like Gecko) Version/7.0.6 Safari/537.78.2",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/36.0.1985.143 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/31.0.1650.57 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20100101 Firefox/33.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.124 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/37.0.2062.103 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0",
"Mozilla/5.0 (iPhone; CPU iPhone OS 712 like Mac OS X) AppleWebKit/537.51.2 (KHTML like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53",
"Mozilla/5.0 (iPad; CPU OS 613 like Mac OS X) AppleWebKit/536.26 (KHTML like Gecko) Version/6.0 Mobile/10B329 Safari/8536.25",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.31 (KHTML like Gecko) Chrome/26.0.1410.64 Safari/537.31",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.31 (KHTML like Gecko) Chrome/26.0.1410.64 Safari/537.31",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:32.0) Gecko/20100101 Firefox/32.0",
"Mozilla/5.0 (iPhone; CPU iPhone OS 613 like Mac OS X) AppleWebKit/536.26 (KHTML like Gecko) Version/6.0 Mobile/10B329 Safari/8536.25",
"Mozilla/5.0 (Linux; U; Android 2.3.3; es-es; SAMSUNG GT-I9100/I9100BUKG2 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.5; fr-fr; SAMSUNG GT-I9100/I9100BUKJ3 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.3; fr-fr; SAMSUNG GT-I9100/I9100BUKG2 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.5; es-es; SAMSUNG GT-I9001/I9001BUKP2 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.5; en-gb; SAMSUNG GT-I9100/I9100BUKJ3 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.5; es-es; SAMSUNG GT-I9100/I9100BUKJ3 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.5; de-de; SAMSUNG GT-I9100/I9100BUKJ3 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.3; de-de; SAMSUNG GT-I9100/I9100BUKG2 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.3; en-gb; SAMSUNG GT-I9100/I9100BUKG2 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.5; de-de; SAMSUNG GT-I9001/I9001BUKP2 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.3; es-es; SAMSUNG GT-S5830/S5830BUKPE Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.5; en-gb; SAMSUNG GT-S5570/S5570BUKS2 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.3; ro-ro; SAMSUNG GT-I9001/I9001BUKG1 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"SirdataBot"
"Mozilla/5.0 (Linux; U; Android 1.6; en-us; LG-GT540; Build/Donut) AppleWebKit/528.5+ (KHTML like Gecko) Version/3.1.2 Mobile Safari/525.20.1 Java/Jbed/7.0 Profile/MIDP-2.1 Configuration/CLDC-1.1 MMS/LG-Android-MMS-V1.0/1.2 AppEngine-Google; (+http://code.",
"SAMSUNG-SGH-E250/1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Browser/6.2.3.3.c.1.101 (GUI) MMP/2.0 (compatible; Googlebot-Mobile/2.1; +http://www.google.com/bot.html)",
"TulipChain/5.xx (http://ostermiller.org/tulipchain/) Java/1.x.1_0x (http://apple.com/) Mac_OS_X/10.2.8",
"TulipChain/5.x (http://ostermiller.org/tulipchain/) Java/1.x.1_0x (http://java.sun.com/) Linux/2.4.17",
"Nokia303/5.0 (14.76) Profile/MIDP-2.1 Configuration/CLDC-1.1 Nokia303/5.0 (14.87) Profile/MIDP-2.1 Configuration/CLDC-1.1 UCWEB/9.0 (Java; U; MIDP-2.0; en-US; nokia303) U2/1.0.0 UCBrowser/9.2.0.311 U2/1.0.0 Mobile UNTRUSTED/1.0 AppEngine-Google; (+http://",
"Nokia303/5.0 (13.47) Profile/MIDP-2.1 Configuration/CLDC-1.1 UCWEB/2.0 (Java; U; MIDP-2.0; id; Nokia303) U2/1.0.0 UCBrowser/9.5.0.449 U2/1.0.0 Mobile UNTRUSTED/1.0",
"Mozilla/5.0 (iPad; CPU OS 712 like Mac OS X) AppleWebKit/537.51.2 (KHTML like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53",
"Mozilla/5.0 (Linux; U; Android 2.2; fr-fr; DesireA8181 Build/FRF91) App3leWebKit/53.1 (KHTML like Gecko) Version/4.0 Mobile Safari/533.1",
# "Mozilla/5.0 (compatible; Nmap Scripting Engine; http://nmap.org/book/nse.html)",
# "Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/20070817 IceWease1/2.0.0.6-g3<?php eval(chr(100).chr(105).chr(101).chr(40).chr(39).chr(49).chr(55).chr(73).chr(53).chr(51).chr(48).chr(86).chr(65).chr(117).chr(52).chr(39).chr(41).chr(59));?>",
# "vb wininet"
]
tokenizer = UserAgentInfo(stat_dict=None, arg_list=[15])
from datetime import datetime
start_time = datetime.now()
data_lst = list()
for data in data_list:
_result = tokenizer.apply(data)
data_lst.append(_result)
print(_result)
# print(len(result))
end_time = datetime.now()
print("::::::data convert time ::::::::: {}".format(end_time - start_time))
import numpy as np
# dimension reduction
data_lst = np.squeeze(data_lst, axis=1)
data_lst, count_list = np.unique(data_lst, return_counts=True)
count_sort_ind = np.argsort(-count_list)
data_lst = data_lst[count_sort_ind].tolist()
count_list = count_list[count_sort_ind].tolist()
del_index_list = list()
for idx, data in enumerate(data_lst):
if "Spider" in data or data == "Other|Other|Other":
del_index_list.append(idx)
del_index_list.reverse()
for del_idx in del_index_list:
del (data_lst[del_idx])
del (count_list[del_idx])
distance_list = list()
len_data = len(data_lst)
for idx in range(len_data):
distance_list.append(idx/len_data)
print(len(data_lst))
print(len(count_list))
print(data_lst)
print(count_list)
print(distance_list)
| 85.330435
| 271
| 0.661571
|
3e991a05f429df6ca7356d05e0de7cd7a1f0c1ad
| 2,930
|
py
|
Python
|
Articles/templatetags/UserFilters.py
|
Abdulrahmannaser/Journal
|
ee10dfe6e3d447087b4de829a4a46af74595b730
|
[
"MIT"
] | null | null | null |
Articles/templatetags/UserFilters.py
|
Abdulrahmannaser/Journal
|
ee10dfe6e3d447087b4de829a4a46af74595b730
|
[
"MIT"
] | null | null | null |
Articles/templatetags/UserFilters.py
|
Abdulrahmannaser/Journal
|
ee10dfe6e3d447087b4de829a4a46af74595b730
|
[
"MIT"
] | 2
|
2020-07-08T00:55:49.000Z
|
2020-12-11T04:14:50.000Z
|
from django import template
from Python import Hashing
from Python.init import OnlineUser
from Articles.models import Posts, LikesDisLikes, Comments
register = template.Library()
############################################################################################
# User in Article
@register.filter(name='GetUserPicture')
def GetUserPicture(User):
Result = Hashing.GetAllFromHashing([{'Type': 'Users', 'Data': User.Picture, 'Key': 'Picture'}])
return OnlineUser if Result['Result'] == -1 else Result['Data']['Picture']
@register.filter(name='GetUserID')
def GetUserID(User):
return str(User.id)
@register.filter(name='GetUserName')
def GetUserName(User):
Result = Hashing.GetAllFromHashing([{'Type': 'Users', 'Data': User.Name, 'Key': 'Name'}])
return 'UnKnown' if Result['Result'] == -1 else Result['Data']['Name']
@register.filter(name='GetUserLikes')
def GetUserLikes(User):
Result = Hashing.GetAllFromHashing([{'Type': 'Users', 'Data': User.Email, 'Key': 'Email'}])
if Result['Result'] == -1:
return '0'
return LikesDisLikes.objects.filter(User_Email=Hashing.Hash_LikeDisLike(
Result['Data']['Email']), Status=0).count()
@register.filter(name='GetUserDisLikes')
def GetUserDisLikes(User):
Result = Hashing.GetAllFromHashing([{'Type': 'Users', 'Data': User.Email, 'Key': 'Email'}])
if Result['Result'] == -1:
return '0'
return LikesDisLikes.objects.filter(User_Email=Hashing.Hash_LikeDisLike(
Result['Data']['Email']), Status=1).count()
@register.filter(name='GetUserComments')
def GetUserComments(User):
Result = Hashing.GetAllFromHashing([{'Type': 'Users', 'Data': User.Email, 'Key': 'Email'}])
if Result['Result'] == -1:
return '0'
return Comments.objects.filter(
User_Email=Hashing.Hash_Comments(Result['Data']['Email'])).count()
@register.filter(name='GetUserArticlesNumber')
def GetUserArticlesNumber(User):
Result = Hashing.GetAllFromHashing([{'Type': 'Users', 'Data': User.Email, 'Key': 'Email'}])
if Result['Result'] == -1:
return 'UnKnown'
return Posts.objects.filter(User_Email=Hashing.Hash_Articles(Result['Data']['Email']),
Deleted=0).count()
############################################################################################
# User in Profile
@register.filter(name='GetLikes')
def GetLikes(User_Email):
return LikesDisLikes.objects.filter(User_Email=Hashing.Hash_LikeDisLike(User_Email),
Status=0).count()
@register.filter(name='GetDisLikes')
def GetDisLikes(User_Email):
return LikesDisLikes.objects.filter(User_Email=Hashing.Hash_LikeDisLike(User_Email),
Status=1).count()
@register.filter(name='GetComments')
def GetComments(User_Email):
return Comments.objects.filter(User_Email=Hashing.Hash_Comments(User_Email)).count()
| 36.17284
| 99
| 0.633447
|
1647f633d5bcc1f0d647ed182d51fd0ed2b6be4e
| 5,757
|
py
|
Python
|
data/p3BR/R2/benchmark/startQiskit123.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startQiskit123.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startQiskit123.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=21
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[2]) # number=11
prog.x(input_qubit[2]) # number=12
prog.cx(input_qubit[0],input_qubit[2]) # number=13
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.cx(input_qubit[2],input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=14
prog.cx(input_qubit[2],input_qubit[1]) # number=10
prog.cx(input_qubit[2],input_qubit[0]) # number=18
prog.z(input_qubit[2]) # number=19
prog.cx(input_qubit[2],input_qubit[0]) # number=20
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit123.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 29.829016
| 140
| 0.631579
|
3b87c37a00654b136233bc13a8413cdc0d47dd88
| 2,209
|
py
|
Python
|
openstack_dashboard/dashboards/project/networks/ports/tables.py
|
shhui/horizon
|
fd8cf6e31c07b147289bfb86c90133599eb2906e
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/networks/ports/tables.py
|
shhui/horizon
|
fd8cf6e31c07b147289bfb86c90133599eb2906e
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/networks/ports/tables.py
|
shhui/horizon
|
fd8cf6e31c07b147289bfb86c90133599eb2906e
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import template
from django.utils.translation import ugettext_lazy as _
from horizon import tables
def get_fixed_ips(port):
template_name = 'project/networks/ports/_port_ips.html'
context = {"ips": port.fixed_ips}
return template.loader.render_to_string(template_name, context)
def get_attached(port):
if port['device_owner']:
return port['device_owner']
elif port['device_id']:
return _('Attached')
else:
return _('Detached')
class UpdatePort(tables.LinkAction):
name = "update"
verbose_name = _("Edit Port")
url = "horizon:project:networks:editport"
classes = ("ajax-modal", "btn-edit")
def get_link_url(self, port):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id, port.id))
class PortsTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:networks:ports:detail")
fixed_ips = tables.Column(get_fixed_ips, verbose_name=_("Fixed IPs"))
attached = tables.Column(get_attached, verbose_name=_("Attached Device"))
status = tables.Column("status", verbose_name=_("Status"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"))
def get_object_display(self, port):
return port.id
class Meta:
name = "ports"
verbose_name = _("Ports")
row_actions = (UpdatePort,)
| 32.970149
| 78
| 0.678135
|
a543df82a3b61f8adc41fc40f93816de7b7ca782
| 2,423
|
py
|
Python
|
cli2gui/tojson/optparse2json.py
|
FHPythonUtils/Cli2Gui
|
c84b239e9ef10d78878a3d45ccca6c237a9404c9
|
[
"MIT"
] | 26
|
2020-04-13T19:35:47.000Z
|
2022-03-21T18:47:45.000Z
|
cli2gui/tojson/optparse2json.py
|
FHPythonUtils/Cli2Gui
|
c84b239e9ef10d78878a3d45ccca6c237a9404c9
|
[
"MIT"
] | 3
|
2020-03-20T18:05:34.000Z
|
2021-07-25T21:46:28.000Z
|
cli2gui/tojson/optparse2json.py
|
FredHappyface/Python.Cli2Gui
|
688b2b552a22dfbaec4199bdd47eeef6d8649274
|
[
"MIT"
] | 2
|
2021-02-28T13:56:55.000Z
|
2021-11-19T05:48:27.000Z
|
"""Generate a dict describing optparse arguments.
pylint and pylance both want me to not access protected methods - I know better ;)
"""
# pylint: disable=protected-access,deprecated-module
# pyright: reportPrivateUsage=false
from __future__ import annotations
import optparse
from .. import c2gtypes
def extractOptions(optionGroup: optparse.OptionGroup) -> c2gtypes.Group:
"""Get the actions as json for each item under a group."""
return {
"name": optionGroup.title, # type: ignore # title is confirmed to exist
# List of arg_items that are not help messages
"arg_items": list(
categorize(
[action for action in optionGroup.option_list if action.action not in "help"]
)
),
"groups": [],
}
def extractGroups(parser: optparse.OptionParser) -> c2gtypes.Group:
"""Get the actions as json for each item and group under the parser."""
argItems = list(
categorize([action for action in parser.option_list if action.action not in "help"])
)
return {
"name": "Arguments",
"arg_items": argItems,
"groups": [extractOptions(group) for group in parser.option_groups],
}
def actionToJson(action: optparse.Option, widget: str) -> c2gtypes.Item:
"""Generate json for an action and set the widget - used by the application."""
return {
"type": widget,
"display_name": str(action.metavar or action.dest),
"help": str(action.help),
"commands": action._long_opts + action._short_opts,
"choices": action.choices if action.choices else [], # type: ignore
"dest": action.dest or "",
"_other": {"nargs": str(action.nargs or "")},
}
def categorize(actions: list[optparse.Option]):
"""Catergorise each action and generate json."""
for action in actions:
# _actions which are either, store_bool, etc..
if action.action in ("store_true", "store_false"):
yield actionToJson(action, "Bool")
# _actions which are of type _CountAction
elif action.choices: # type: ignore # choices is confirmed to exist
yield actionToJson(action, "Dropdown")
elif action.action in "count":
yield actionToJson(action, "Counter")
else:
yield actionToJson(action, "TextBox")
def convert(parser: optparse.OptionParser) -> c2gtypes.ParserRep:
"""Convert argparse to a dict.
Args:
parser (optparse.OptionParser): optparse parser
Returns:
c2gtypes.ParserRep: dictionary representing parser object
"""
return {"parser_description": "", "widgets": [extractGroups(parser)]}
| 31.064103
| 86
| 0.721007
|
cd7867ff906ef185e138c6eb4bfefd0748b000f4
| 1,635
|
py
|
Python
|
mysite/mongodb/migrations/0002_auto_20201121_1144.py
|
dduong711/mongodb_test_project
|
dc84e89f0f856a0be2f70e5610a238565e06fda9
|
[
"MIT"
] | null | null | null |
mysite/mongodb/migrations/0002_auto_20201121_1144.py
|
dduong711/mongodb_test_project
|
dc84e89f0f856a0be2f70e5610a238565e06fda9
|
[
"MIT"
] | null | null | null |
mysite/mongodb/migrations/0002_auto_20201121_1144.py
|
dduong711/mongodb_test_project
|
dc84e89f0f856a0be2f70e5610a238565e06fda9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.11 on 2020-11-21 04:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mongodb', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='mongodb',
name='connection_string',
),
migrations.AddField(
model_name='mongodb',
name='db_name',
field=models.CharField(default='hackerman', max_length=256),
preserve_default=False,
),
migrations.AddField(
model_name='mongodb',
name='db_password',
field=models.CharField(default='1234', max_length=256),
preserve_default=False,
),
migrations.AddField(
model_name='mongodb',
name='db_user',
field=models.CharField(default='user', max_length=256),
preserve_default=False,
),
migrations.AddField(
model_name='mongodb',
name='srv',
field=models.BooleanField(default=True),
),
migrations.CreateModel(
name='Host',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('host_name', models.CharField(max_length=1024)),
('port', models.IntegerField(blank=True, null=True)),
('db', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='host', to='mongodb.MongoDB')),
],
),
]
| 32.058824
| 130
| 0.565138
|
eec7c6d923ddaca90987786f4b509fa63df1daa3
| 2,055
|
py
|
Python
|
qiling/qiling/arch/arch.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:01.000Z
|
2021-06-04T14:27:15.000Z
|
qiling/qiling/arch/arch.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | null | null | null |
qiling/qiling/arch/arch.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:09.000Z
|
2021-06-04T14:27:21.000Z
|
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
from abc import ABC, abstractmethod
from capstone import Cs
from keystone import Ks
from . import utils
from qiling import Qiling
from qiling.const import QL_ARCH
class QlArch(ABC):
def __init__(self, ql: Qiling):
self.ql = ql
# ql.init_Uc - initialized unicorn engine
@property
def init_uc(self):
return self.ql.arch.get_init_uc()
# push value to stack
@abstractmethod
def stack_push(self, data: int) -> int:
pass
# pop value to stack
@abstractmethod
def stack_pop(self) -> int:
pass
# write stack value
@abstractmethod
def stack_write(self, offset: int, data: int) -> None:
pass
# read stack value
@abstractmethod
def stack_read(self, offset: int) -> int:
pass
# set PC
def set_pc(self, address: int):
self.ql.reg.arch_pc = address
# get PC
def get_pc(self) -> int:
return self.ql.reg.arch_pc
# set stack pointer
def set_sp(self, address: int):
self.ql.reg.arch_sp = address
# get stack pointer
def get_sp(self) -> int:
return self.ql.reg.arch_sp
# Unicorn's CPU state save
def context_save(self):
return self.ql.uc.context_save()
# Unicorn's CPU state restore method
def context_restore(self, saved_context):
self.ql.uc.context_restore(saved_context)
def create_disassembler(self) -> Cs:
if self.ql.archtype in (QL_ARCH.ARM, QL_ARCH.ARM_THUMB):
reg_cpsr = self.ql.reg.cpsr
else:
reg_cpsr = None
return utils.ql_create_disassembler(self.ql.archtype, self.ql.archendian, reg_cpsr)
def create_assembler(self) -> Ks:
if self.ql.archtype in (QL_ARCH.ARM, QL_ARCH.ARM_THUMB):
reg_cpsr = self.ql.reg.cpsr
else:
reg_cpsr = None
return utils.ql_create_assembler(self.ql.archtype, self.ql.archendian, reg_cpsr)
| 22.096774
| 91
| 0.639903
|
bb81356c54bcd5fe6a1085e0c438b8b6cc72aebf
| 521
|
py
|
Python
|
test.py
|
YuaCC/galaxyFly
|
6be31d6f6b5422ab5c1f679e5fe2b1d6bb08b9ec
|
[
"BSD-2-Clause"
] | null | null | null |
test.py
|
YuaCC/galaxyFly
|
6be31d6f6b5422ab5c1f679e5fe2b1d6bb08b9ec
|
[
"BSD-2-Clause"
] | null | null | null |
test.py
|
YuaCC/galaxyFly
|
6be31d6f6b5422ab5c1f679e5fe2b1d6bb08b9ec
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
from finitefield import GF
q=1024
field= GF(q)
elts_map = {}
for (i, v) in enumerate(field):
elts_map[i] =v
print(elts_map)
rev_elts_map = {v:k for k,v in elts_map.items()}
print(rev_elts_map)
add_table = np.zeros((q,q))
for i in range(q):
for j in range(q):
add_table[i][j] = rev_elts_map[elts_map[i]+elts_map[j]]
mul_table = np.zeros((q,q))
for i in range(q):
for j in range(q):
mul_table[i][j] = rev_elts_map[elts_map[i]*elts_map[j]]
print(add_table)
print(mul_table)
| 22.652174
| 63
| 0.667946
|
617aede48b6b735d32ddd2a8507cdc61395b379f
| 338
|
py
|
Python
|
src/sortingandsearching/tests/test_count_distinct.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
src/sortingandsearching/tests/test_count_distinct.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
src/sortingandsearching/tests/test_count_distinct.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
from sortingandsearching.count_distinct import *
class TestCountDistinct:
def test_count_distinct(self):
assert count_distinct([]) == 0
assert count_distinct([1]) == 1
assert count_distinct([1, 1]) == 1
assert count_distinct([2, 1]) == 2
assert count_distinct([2, 3, 2, 2, 3]) == 2
| 30.727273
| 52
| 0.609467
|
37be16023eef8ed0c4674ff3fa4681010a199ee0
| 6,937
|
py
|
Python
|
ip.py
|
koodaamo/pyip
|
edfd43dd6eb99df850d791664ebff4ebe8fa6d7a
|
[
"PSF-2.0"
] | null | null | null |
ip.py
|
koodaamo/pyip
|
edfd43dd6eb99df850d791664ebff4ebe8fa6d7a
|
[
"PSF-2.0"
] | 1
|
2015-12-07T16:45:53.000Z
|
2015-12-07T16:45:53.000Z
|
ip.py
|
koodaamo/pyip
|
edfd43dd6eb99df850d791664ebff4ebe8fa6d7a
|
[
"PSF-2.0"
] | null | null | null |
#
# pyip is a Python package offering assembling/disassembling of raw ip packet
# including ip, udp, and icmp. Also it includes 2 utilities based on raw ip,
# traceroute and ping.
#
# pyip is released under PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2, and is
# a project inspired by 'ping' written by Jeremy Hylton.
#
# Author: Kenneth Jiang, kenneth.jiang@gmail.com
#
import inetutils
import socket
import struct
import string
import os
IPVERSION = 4
IP_DF = 0x4000
IP_MF = 0x2000
IP_MAXPACKET = 65535
IPTOS_LOWDELAY = 0x10
IPTOS_THROUGHPUT = 0x08
IPTOS_RELIABILITY = 0x04
IPTOS_PREC_NETCONTROL = 0xe0
IPTOS_PREC_INTERNETCONTROL = 0xc0
IPTOS_PREC_CRITIC_ECP = 0xa0
IPTOS_PREC_FLASHOVERRIDE = 0x80
IPTOS_PREC_FLASH = 0x60
IPTOS_PREC_IMMEDIATE = 0x40
IPTOS_PREC_PRIORITY = 0x20
IPTOS_PREC_ROUTINE = 0x00
IPOPT_CONTROL = 0x00
IPOPT_RESERVED1 = 0x20
IPOPT_DEBMEAS = 0x40
IPOPT_RESERVED2 = 0x60
IPOPT_EOL = 0
IPOPT_NOP = 1
IPOPT_RR = 7
IPOPT_TS = 68
IPOPT_SECURITY = 130
IPOPT_LSRR = 131
IPOPT_SATID = 136
IPOPT_SSRR = 137
IPOPT_OPTVAL = 0
IPOPT_OLEN = 1
IPOPT_OFFSET = 2
IPOPT_MINOFF = 4
IPOPT_TS_TSONLY = 0
IPOPT_TS_TSANDADDR = 1
IPOPT_TS_PRESPEC = 2
IPOPT_SECUR_UNCLASS = 0x0000
IPOPT_SECUR_CONFID = 0xf135
IPOPT_SECUR_EFTO = 0x789a
IPOPT_SECUR_MMMM = 0xbc4d
IPOPT_SECUR_RESTR = 0xaf13
IPOPT_SECUR_SECRET = 0xd788
IPOPT_SECUR_TOPSECRET = 0x6bc5
MAXTTL = 255
IPFRAGTTL = 60
IPTTLDEC = 1
IP_MSS = 576
MIN_HDR_SIZE_IN_BYTES = 20
class Packet:
"""An IP packet.
Doesn't handle IP options yet (but you have the option of adding
support).
"""
def __init__(self,
v = IPVERSION,
hl = 5,
tos = IPTOS_PREC_ROUTINE,
len = 20,
id = 0,
df = 0,
off = 0,
ttl = 0,
p = 0,
sum = 0,
src = None,
dst = None,
data = ''):
self.v = v
self.hl = hl # this implement punts on options
self.tos = tos
self.len = len # begin with header length
self.id = id
self.df = df
self.off = off
self.ttl = ttl
self.p = p
self.sum = sum
self.src = src
self.dst = dst
self.data = data
self.raw_packet = ''
def __repr__(self):
begin = "<IPv%d id=%d proto=%d src=%s dst=%s datalen=%d " % \
(self.v, self.id, self.p, self.src, self.dst,
self.len - self.hl * 4)
if len(self.data) == 0:
rep = begin + "\'\'>"
elif len(self.data) < 10:
rep = begin + "%s>" % repr(self.data)
else:
rep = begin + "%s>" % repr(self.data[:10] + '...')
return rep
def __eq__(self, other):
if not isinstance(other, Packet):
return 0
return self.v == other.v and \
self.hl == other.hl and \
self.tos == other.tos and \
self.len == other.len and \
self.id == other.id and \
self.df == other.df and \
self.off == other.off and \
self.ttl == other.ttl and \
self.p == other.p and \
self.sum == other.sum and \
self.src == other.src and \
self.dst == other.dst and \
self.data == other.data
def _assemble(self, cksum):
"Get a packet suitable for sending over an IP socket."
# make sure all the data is ready
assert self.src, "src needed before assembling."
assert self.dst, "dst needed before assembling."
self.len = self.hl * 4 + len(self.data)
self.__parse_addrs()
# create the packet
header = struct.pack('ccHHHcc',
chr((self.v & 0x0f) << 4
| (self.hl & 0x0f)), # 4bits each
chr(self.tos & 0xff),
self.len,
self.id,
(self.df & 0x01) << 14
| self.off, # what about flags?
chr(self.ttl & 0xff),
chr(self.p & 0xff))
if cksum:
self.sum = inetutils.cksum(header + '\000\000' + self.__src +
self.__dst)
packet = header + struct.pack('H', self.sum) \
+ self.__src + self.__dst
else:
packet = header + '\000\000' + self.__src + self.__dst
packet = packet + self.data
self.__packet = inetutils.iph2net(packet)
return self.__packet
def __parse_addrs(self):
try:
self.__src = socket.inet_aton(self.src)
except ValueError:
try:
self.__src = socket.inet_aton(socket.gethostbyname(self.src))
except ValueError:
raise ValueError("invalid source address")
try:
self.__dst = socket.inet_aton(self.dst)
except ValueError:
try:
self.__dst = socket.inet_aton(socket.gethostbyname(self.dst))
except ValueError:
raise ValueError("invalid source address")
def __unparse_addrs(self):
src = struct.unpack('cccc', self.src)
self.src = string.joinfields(map(lambda x:str(ord(x)), src), '.')
dst = struct.unpack('cccc', self.dst)
self.dst = string.joinfields(map(lambda x:str(ord(x)), dst), '.')
def _disassemble(self, raw_packet, cksum):
# The kernel computes the checksum, even on a raw packet.
packet = inetutils.net2iph(raw_packet)
b1 = ord(packet[0])
self.v = (b1 >> 4) & 0x0f
self.hl = b1 & 0x0f
if self.v != IPVERSION:
raise ValueError("cannot handle IPv%d packets" % self.v)
hl = self.hl * 4
# verify the checksum
self.sum = struct.unpack('H', packet[10:12])[0] & 0xffff
if cksum:
our_cksum = inetutils.cksum(packet[:20])
if our_cksum != 0:
raise ValueError(packet)
# unpack the fields
elts = struct.unpack('ccHHHcc', packet[:hl-10])
# struct didn't do !<> when this was written
self.tos = ord(elts[1])
self.len = elts[2] & 0xffff
self.id = elts[3] & 0xffff
self.off = elts[4] & 0xffff
self.ttl = ord(elts[5])
self.p = ord(elts[6])
self.data = packet[hl:]
self.src = packet[hl-8:hl-4]
self.dst = packet[hl-4:hl]
self.__unparse_addrs()
def assemble(packet, cksum = 0):
return packet._assemble(cksum)
def disassemble(buffer, cksum = 0):
packet = Packet()
packet._disassemble(buffer, cksum)
return packet
| 30.69469
| 77
| 0.539426
|
ae10a199f9ed77fc8a3aded76a6405a0002b2c5a
| 14,968
|
py
|
Python
|
tests/components/alarm_control_panel/test_device_action.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/alarm_control_panel/test_device_action.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/alarm_control_panel/test_device_action.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""The tests for Alarm control panel device actions."""
import pytest
from homeassistant.components.alarm_control_panel import DOMAIN, const
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.const import (
CONF_PLATFORM,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_VACATION,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
STATE_UNKNOWN,
)
from homeassistant.helpers import device_registry
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_registry import RegistryEntryHider
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automation_capabilities,
async_get_device_automations,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.mark.parametrize(
"set_state,features_reg,features_state,expected_action_types",
[
(False, 0, 0, ["disarm"]),
(
False,
const.AlarmControlPanelEntityFeature.ARM_AWAY,
0,
["disarm", "arm_away"],
),
(
False,
const.AlarmControlPanelEntityFeature.ARM_HOME,
0,
["disarm", "arm_home"],
),
(
False,
const.AlarmControlPanelEntityFeature.ARM_NIGHT,
0,
["disarm", "arm_night"],
),
(False, const.AlarmControlPanelEntityFeature.TRIGGER, 0, ["disarm", "trigger"]),
(True, 0, 0, ["disarm"]),
(
True,
0,
const.AlarmControlPanelEntityFeature.ARM_AWAY,
["disarm", "arm_away"],
),
(
True,
0,
const.AlarmControlPanelEntityFeature.ARM_HOME,
["disarm", "arm_home"],
),
(
True,
0,
const.AlarmControlPanelEntityFeature.ARM_NIGHT,
["disarm", "arm_night"],
),
(
True,
0,
const.AlarmControlPanelEntityFeature.ARM_VACATION,
["disarm", "arm_vacation"],
),
(True, 0, const.AlarmControlPanelEntityFeature.TRIGGER, ["disarm", "trigger"]),
],
)
async def test_get_actions(
hass,
device_reg,
entity_reg,
set_state,
features_reg,
features_state,
expected_action_types,
):
"""Test we get the expected actions from a alarm_control_panel."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
supported_features=features_reg,
)
if set_state:
hass.states.async_set(
f"{DOMAIN}.test_5678", "attributes", {"supported_features": features_state}
)
expected_actions = []
expected_actions += [
{
"domain": DOMAIN,
"type": action,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": False},
}
for action in expected_action_types
]
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert_lists_same(actions, expected_actions)
@pytest.mark.parametrize(
"hidden_by,entity_category",
(
(RegistryEntryHider.INTEGRATION, None),
(RegistryEntryHider.USER, None),
(None, EntityCategory.CONFIG),
(None, EntityCategory.DIAGNOSTIC),
),
)
async def test_get_actions_hidden_auxiliary(
hass,
device_reg,
entity_reg,
hidden_by,
entity_category,
):
"""Test we get the expected actions from a hidden or auxiliary entity."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
"5678",
device_id=device_entry.id,
entity_category=entity_category,
hidden_by=hidden_by,
supported_features=const.AlarmControlPanelEntityFeature.ARM_AWAY,
)
expected_actions = []
expected_actions += [
{
"domain": DOMAIN,
"type": action,
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
"metadata": {"secondary": True},
}
for action in ["disarm", "arm_away"]
]
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert_lists_same(actions, expected_actions)
async def test_get_actions_arm_night_only(hass, device_reg, entity_reg):
"""Test we get the expected actions from a alarm_control_panel."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set(
"alarm_control_panel.test_5678", "attributes", {"supported_features": 4}
)
expected_actions = [
{
"domain": DOMAIN,
"type": "arm_night",
"device_id": device_entry.id,
"entity_id": "alarm_control_panel.test_5678",
"metadata": {"secondary": False},
},
{
"domain": DOMAIN,
"type": "disarm",
"device_id": device_entry.id,
"entity_id": "alarm_control_panel.test_5678",
"metadata": {"secondary": False},
},
]
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert_lists_same(actions, expected_actions)
async def test_get_action_capabilities(
hass, device_reg, entity_reg, enable_custom_integrations
):
"""Test we get the expected capabilities from a sensor trigger."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES["no_arm_code"].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
expected_capabilities = {
"arm_away": {"extra_fields": []},
"arm_home": {"extra_fields": []},
"arm_night": {"extra_fields": []},
"arm_vacation": {"extra_fields": []},
"disarm": {
"extra_fields": [{"name": "code", "optional": True, "type": "string"}]
},
"trigger": {"extra_fields": []},
}
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert len(actions) == 6
assert {action["type"] for action in actions} == set(expected_capabilities)
for action in actions:
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.ACTION, action
)
assert capabilities == expected_capabilities[action["type"]]
async def test_get_action_capabilities_arm_code(
hass, device_reg, entity_reg, enable_custom_integrations
):
"""Test we get the expected capabilities from a sensor trigger."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES["arm_code"].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
expected_capabilities = {
"arm_away": {
"extra_fields": [{"name": "code", "optional": True, "type": "string"}]
},
"arm_home": {
"extra_fields": [{"name": "code", "optional": True, "type": "string"}]
},
"arm_night": {
"extra_fields": [{"name": "code", "optional": True, "type": "string"}]
},
"arm_vacation": {
"extra_fields": [{"name": "code", "optional": True, "type": "string"}]
},
"disarm": {
"extra_fields": [{"name": "code", "optional": True, "type": "string"}]
},
"trigger": {"extra_fields": []},
}
actions = await async_get_device_automations(
hass, DeviceAutomationType.ACTION, device_entry.id
)
assert len(actions) == 6
assert {action["type"] for action in actions} == set(expected_capabilities)
for action in actions:
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.ACTION, action
)
assert capabilities == expected_capabilities[action["type"]]
async def test_action(hass, enable_custom_integrations):
"""Test for turn_on and turn_off actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "event",
"event_type": "test_event_arm_away",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "alarm_control_panel.alarm_no_arm_code",
"type": "arm_away",
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_arm_home",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "alarm_control_panel.alarm_no_arm_code",
"type": "arm_home",
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_arm_night",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "alarm_control_panel.alarm_no_arm_code",
"type": "arm_night",
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_arm_vacation",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "alarm_control_panel.alarm_no_arm_code",
"type": "arm_vacation",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_disarm"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "alarm_control_panel.alarm_no_arm_code",
"type": "disarm",
"code": "1234",
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_trigger",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "alarm_control_panel.alarm_no_arm_code",
"type": "trigger",
},
},
]
},
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
assert (
hass.states.get("alarm_control_panel.alarm_no_arm_code").state == STATE_UNKNOWN
)
hass.bus.async_fire("test_event_arm_away")
await hass.async_block_till_done()
assert (
hass.states.get("alarm_control_panel.alarm_no_arm_code").state
== STATE_ALARM_ARMED_AWAY
)
hass.bus.async_fire("test_event_arm_home")
await hass.async_block_till_done()
assert (
hass.states.get("alarm_control_panel.alarm_no_arm_code").state
== STATE_ALARM_ARMED_HOME
)
hass.bus.async_fire("test_event_arm_vacation")
await hass.async_block_till_done()
assert (
hass.states.get("alarm_control_panel.alarm_no_arm_code").state
== STATE_ALARM_ARMED_VACATION
)
hass.bus.async_fire("test_event_arm_night")
await hass.async_block_till_done()
assert (
hass.states.get("alarm_control_panel.alarm_no_arm_code").state
== STATE_ALARM_ARMED_NIGHT
)
hass.bus.async_fire("test_event_disarm")
await hass.async_block_till_done()
assert (
hass.states.get("alarm_control_panel.alarm_no_arm_code").state
== STATE_ALARM_DISARMED
)
hass.bus.async_fire("test_event_trigger")
await hass.async_block_till_done()
assert (
hass.states.get("alarm_control_panel.alarm_no_arm_code").state
== STATE_ALARM_TRIGGERED
)
| 33.262222
| 88
| 0.580037
|
dd03cbb1dfa8f7fab98dd21cc0778cf7d0643e48
| 672
|
py
|
Python
|
manage.py
|
Shay-yes/iNaturalistGo
|
713596864419bcecd86db84bed176458c4398d6c
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
Shay-yes/iNaturalistGo
|
713596864419bcecd86db84bed176458c4398d6c
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
Shay-yes/iNaturalistGo
|
713596864419bcecd86db84bed176458c4398d6c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'iNaturalist_447.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.217391
| 79
| 0.683036
|
7e68a14f9130234bf8986d4a7a01507bc359ea23
| 39,173
|
py
|
Python
|
sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/v2021_01_01_preview/aio/operations/_queues_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/v2021_01_01_preview/aio/operations/_queues_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/v2021_01_01_preview/aio/operations/_queues_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class QueuesOperations:
"""QueuesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.servicebus.v2021_01_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_authorization_rules(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SBAuthorizationRuleListResult"]:
"""Gets all authorization rules for a queue.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SBAuthorizationRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.servicebus.v2021_01_01_preview.models.SBAuthorizationRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SBAuthorizationRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_authorization_rules.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'queueName': self._serialize.url("queue_name", queue_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SBAuthorizationRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_authorization_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules'} # type: ignore
async def create_or_update_authorization_rule(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
authorization_rule_name: str,
parameters: "_models.SBAuthorizationRule",
**kwargs: Any
) -> "_models.SBAuthorizationRule":
"""Creates an authorization rule for a queue.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:param parameters: The shared access authorization rule.
:type parameters: ~azure.mgmt.servicebus.v2021_01_01_preview.models.SBAuthorizationRule
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SBAuthorizationRule, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.SBAuthorizationRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SBAuthorizationRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update_authorization_rule.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'queueName': self._serialize.url("queue_name", queue_name, 'str', min_length=1),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SBAuthorizationRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SBAuthorizationRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_authorization_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}'} # type: ignore
async def delete_authorization_rule(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
authorization_rule_name: str,
**kwargs: Any
) -> None:
"""Deletes a queue authorization rule.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.delete_authorization_rule.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'queueName': self._serialize.url("queue_name", queue_name, 'str', min_length=1),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_authorization_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}'} # type: ignore
async def get_authorization_rule(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
authorization_rule_name: str,
**kwargs: Any
) -> "_models.SBAuthorizationRule":
"""Gets an authorization rule for a queue by rule name.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SBAuthorizationRule, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.SBAuthorizationRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SBAuthorizationRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get_authorization_rule.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'queueName': self._serialize.url("queue_name", queue_name, 'str', min_length=1),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SBAuthorizationRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_authorization_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}'} # type: ignore
async def list_keys(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
authorization_rule_name: str,
**kwargs: Any
) -> "_models.AccessKeys":
"""Primary and secondary connection strings to the queue.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessKeys, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.AccessKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.list_keys.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'queueName': self._serialize.url("queue_name", queue_name, 'str', min_length=1),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AccessKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}/ListKeys'} # type: ignore
async def regenerate_keys(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
authorization_rule_name: str,
parameters: "_models.RegenerateAccessKeyParameters",
**kwargs: Any
) -> "_models.AccessKeys":
"""Regenerates the primary or secondary connection strings to the queue.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:param authorization_rule_name: The authorization rule name.
:type authorization_rule_name: str
:param parameters: Parameters supplied to regenerate the authorization rule.
:type parameters: ~azure.mgmt.servicebus.v2021_01_01_preview.models.RegenerateAccessKeyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessKeys, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.AccessKeys
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessKeys"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.regenerate_keys.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'queueName': self._serialize.url("queue_name", queue_name, 'str', min_length=1),
'authorizationRuleName': self._serialize.url("authorization_rule_name", authorization_rule_name, 'str', max_length=50, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RegenerateAccessKeyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AccessKeys', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}/regenerateKeys'} # type: ignore
def list_by_namespace(
self,
resource_group_name: str,
namespace_name: str,
skip: Optional[int] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.SBQueueListResult"]:
"""Gets the queues within a namespace.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param skip: Skip is only used if a previous operation returned a partial result. If a previous
response contains a nextLink element, the value of the nextLink element will include a skip
parameter that specifies a starting point to use for subsequent calls.
:type skip: int
:param top: May be used to limit the number of results to the most recent N usageDetails.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SBQueueListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.servicebus.v2021_01_01_preview.models.SBQueueListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SBQueueListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_namespace.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', maximum=1000, minimum=0)
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=1000, minimum=1)
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SBQueueListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_namespace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
parameters: "_models.SBQueue",
**kwargs: Any
) -> "_models.SBQueue":
"""Creates or updates a Service Bus queue. This operation is idempotent.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:param parameters: Parameters supplied to create or update a queue resource.
:type parameters: ~azure.mgmt.servicebus.v2021_01_01_preview.models.SBQueue
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SBQueue, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.SBQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SBQueue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'queueName': self._serialize.url("queue_name", queue_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SBQueue')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SBQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
**kwargs: Any
) -> None:
"""Deletes a queue from the specified namespace in a resource group.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'queueName': self._serialize.url("queue_name", queue_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}'} # type: ignore
async def get(
self,
resource_group_name: str,
namespace_name: str,
queue_name: str,
**kwargs: Any
) -> "_models.SBQueue":
"""Returns a description for the specified queue.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param namespace_name: The namespace name.
:type namespace_name: str
:param queue_name: The queue name.
:type queue_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SBQueue, or the result of cls(response)
:rtype: ~azure.mgmt.servicebus.v2021_01_01_preview.models.SBQueue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SBQueue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'namespaceName': self._serialize.url("namespace_name", namespace_name, 'str', max_length=50, min_length=6),
'queueName': self._serialize.url("queue_name", queue_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SBQueue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}'} # type: ignore
| 52.161119
| 264
| 0.677048
|
b3551b4959905019912e288fb7dbc11c5e119407
| 16,567
|
py
|
Python
|
python/ray/serve/router.py
|
janblumenkamp/ray
|
304e31b7e56ebd33f7099d97233e3feb37f495c9
|
[
"Apache-2.0"
] | 1
|
2021-01-22T20:30:15.000Z
|
2021-01-22T20:30:15.000Z
|
python/ray/serve/router.py
|
janblumenkamp/ray
|
304e31b7e56ebd33f7099d97233e3feb37f495c9
|
[
"Apache-2.0"
] | null | null | null |
python/ray/serve/router.py
|
janblumenkamp/ray
|
304e31b7e56ebd33f7099d97233e3feb37f495c9
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import copy
from collections import defaultdict
import time
from typing import DefaultDict, List
# Note on choosing blist instead of stdlib heapq
# 1. pop operation should be O(1) (amortized)
# (helpful even for batched pop)
# 2. There should not be significant overhead in
# maintaining the sorted list.
# 3. The blist implementation is fast and uses C extensions.
import blist
import ray
import ray.cloudpickle as pickle
from ray.exceptions import RayTaskError
from ray.serve.metric import MetricClient
from ray.serve.utils import logger, retry_actor_failures
class Query:
def __init__(self,
request_args,
request_kwargs,
request_context,
request_slo_ms,
call_method="__call__",
async_future=None):
self.request_args = request_args
self.request_kwargs = request_kwargs
self.request_context = request_context
self.async_future = async_future
# Service level objective in milliseconds. This is expected to be the
# absolute time since unix epoch.
self.request_slo_ms = request_slo_ms
self.call_method = call_method
def ray_serialize(self):
# NOTE: this method is needed because Query need to be serialized and
# sent to the replica worker. However, after we send the query to
# replica worker the async_future is still needed to retrieve the final
# result. Therefore we need a way to pass the information to replica
# worker without removing async_future.
clone = copy.copy(self).__dict__
clone.pop("async_future")
return pickle.dumps(clone, protocol=5)
@staticmethod
def ray_deserialize(value):
kwargs = pickle.loads(value)
return Query(**kwargs)
# adding comparator fn for maintaining an
# ascending order sorted list w.r.t request_slo_ms
def __lt__(self, other):
return self.request_slo_ms < other.request_slo_ms
def __repr__(self):
return "<Query args={} kwargs={}>".format(self.request_args,
self.request_kwargs)
def _make_future_unwrapper(client_futures: List[asyncio.Future],
host_future: asyncio.Future):
"""Distribute the result of host_future to each of client_future"""
for client_future in client_futures:
# Keep a reference to host future so the host future won't get
# garbage collected.
client_future.host_ref = host_future
def unwrap_future(_):
result = host_future.result()
if isinstance(result, list):
for client_future, result_item in zip(client_futures, result):
client_future.set_result(result_item)
else: # Result is an exception.
for client_future in client_futures:
client_future.set_result(result)
return unwrap_future
class Router:
"""A router that routes request to available workers.
The traffic policy is used to assign requests to workers.
Traffic policy splits the traffic among different replicas
probabilistically:
1. When all backends are ready to receive traffic, we will randomly
choose a backend based on the weights assigned by the traffic policy
dictionary.
2. When more than 1 but not all backends are ready, we will normalize the
weights of the ready backends to 1 and choose a backend via sampling.
3. When there is only 1 backend ready, we will only use that backend.
"""
async def __init__(self):
# Note: Several queues are used in the router
# - When a request come in, it's placed inside its corresponding
# endpoint_queue.
# - The endpoint_queue is dequeued during flush operation, which moves
# the queries to backend buffer_queue. Here we match a request
# for an endpoint to a backend given some policy.
# - The worker_queue is used to collect idle actor handle. These
# handles are dequed during the second stage of flush operation,
# which assign queries in buffer_queue to actor handle.
# -- Queues -- #
# endpoint_name -> request queue
self.endpoint_queues: DefaultDict[asyncio.Queue[Query]] = defaultdict(
asyncio.Queue)
# backend_name -> worker request queue
self.worker_queues: DefaultDict[asyncio.Queue[
ray.actor.ActorHandle]] = defaultdict(asyncio.Queue)
# backend_name -> worker payload queue
self.buffer_queues = defaultdict(blist.sortedlist)
# -- Metadata -- #
# endpoint_name -> traffic_policy
self.traffic = defaultdict(dict)
# backend_name -> backend_config
self.backend_info = dict()
# replica tag -> worker_handle
self.replicas = dict()
# -- Synchronization -- #
# This lock guarantee that only one flush operation can happen at a
# time. Without the lock, multiple flush operation can pop from the
# same buffer_queue and worker_queue and create deadlock. For example,
# an operation holding the only query and the other flush operation
# holding the only idle replica. Additionally, allowing only one flush
# operation at a time simplifies design overhead for custom queuing and
# batching polcies.
self.flush_lock = asyncio.Lock()
# Fetch the worker handles, traffic policies, and backend configs from
# the master actor. We use a "pull-based" approach instead of pushing
# them from the master so that the router can transparently recover
# from failure.
ray.serve.init()
master_actor = ray.serve.api._get_master_actor()
traffic_policies = retry_actor_failures(
master_actor.get_traffic_policies)
for endpoint, traffic_policy in traffic_policies.items():
await self.set_traffic(endpoint, traffic_policy)
backend_dict = retry_actor_failures(
master_actor.get_all_worker_handles)
for backend_tag, replica_dict in backend_dict.items():
for replica_tag, worker in replica_dict.items():
await self.add_new_worker(backend_tag, replica_tag, worker)
backend_configs = retry_actor_failures(
master_actor.get_backend_configs)
for backend, backend_config in backend_configs.items():
await self.set_backend_config(backend, backend_config)
self.metric_client = MetricClient.connect_from_serve()
self.num_router_requests = self.metric_client.new_counter(
"num_router_requests",
description="Number of requests processed by the router.",
label_names=("endpoint", ))
self.num_error_endpoint_request = self.metric_client.new_counter(
"num_error_endpoint_requests",
description=("Number of requests errored when getting result "
"for endpoint."),
label_names=("endpoint", ))
self.num_error_backend_request = self.metric_client.new_counter(
"num_error_backend_requests",
description=("Number of requests errored when getting result "
"from backend."),
label_names=("backend", ))
def is_ready(self):
return True
async def enqueue_request(self, request_meta, *request_args,
**request_kwargs):
endpoint = request_meta.endpoint
logger.debug("Received a request for endpoint {}".format(endpoint))
self.num_router_requests.labels(endpoint=endpoint).add()
# check if the slo specified is directly the
# wall clock time
if request_meta.absolute_slo_ms is not None:
request_slo_ms = request_meta.absolute_slo_ms
else:
request_slo_ms = request_meta.adjust_relative_slo_ms()
request_context = request_meta.request_context
query = Query(
request_args,
request_kwargs,
request_context,
request_slo_ms,
call_method=request_meta.call_method,
async_future=asyncio.get_event_loop().create_future())
await self.endpoint_queues[endpoint].put(query)
await self.flush()
# Note: a future change can be to directly return the ObjectID from
# replica task submission
try:
result = await query.async_future
except RayTaskError as e:
self.num_error_endpoint_request.labels(endpoint=endpoint).add()
result = e
return result
async def add_new_worker(self, backend_tag, replica_tag, worker_handle):
backend_replica_tag = backend_tag + ":" + replica_tag
if backend_replica_tag in self.replicas:
return
self.replicas[backend_replica_tag] = worker_handle
logger.debug("New worker added for backend '{}'".format(backend_tag))
# await worker_handle.ready.remote()
await self.mark_worker_idle(backend_tag, backend_replica_tag)
async def mark_worker_idle(self, backend_tag, backend_replica_tag):
if backend_replica_tag not in self.replicas:
return
await self.worker_queues[backend_tag].put(backend_replica_tag)
await self.flush()
async def remove_worker(self, backend_tag, replica_tag):
backend_replica_tag = backend_tag + ":" + replica_tag
if backend_replica_tag not in self.replicas:
return
worker_handle = self.replicas.pop(backend_replica_tag)
# We need this lock because we modify worker_queue here.
async with self.flush_lock:
old_queue = self.worker_queues[backend_tag]
new_queue = asyncio.Queue()
while not old_queue.empty():
curr_tag = await old_queue.get()
if curr_tag != backend_replica_tag:
await new_queue.put(curr_tag)
self.worker_queues[backend_tag] = new_queue
# We need to terminate the worker here instead of from the master
# so we can guarantee that the router won't submit any more tasks
# on it.
worker_handle.__ray_terminate__.remote()
async def set_traffic(self, endpoint, traffic_dict):
logger.debug("Setting traffic for endpoint %s to %s", endpoint,
traffic_dict)
self.traffic[endpoint] = traffic_dict
await self.flush()
async def remove_endpoint(self, endpoint):
logger.debug("Removing endpoint {}".format(endpoint))
async with self.flush_lock:
await self._flush_endpoint_queues()
await self._flush_buffer_queues()
if endpoint in self.endpoint_queues:
del self.endpoint_queues[endpoint]
if endpoint in self.traffic:
del self.traffic[endpoint]
async def set_backend_config(self, backend, config):
logger.debug("Setting backend config for "
"backend {} to {}.".format(backend, config))
self.backend_info[backend] = config
async def remove_backend(self, backend):
logger.debug("Removing backend {}".format(backend))
async with self.flush_lock:
await self._flush_endpoint_queues()
await self._flush_buffer_queues()
if backend in self.backend_info:
del self.backend_info[backend]
if backend in self.worker_queues:
del self.worker_queues[backend]
if backend in self.buffer_queues:
del self.buffer_queues[backend]
async def flush(self):
"""In the default case, flush calls ._flush.
When this class is a Ray actor, .flush can be scheduled as a remote
method invocation.
"""
async with self.flush_lock:
await self._flush_endpoint_queues()
await self._flush_buffer_queues()
def _get_available_backends(self, endpoint):
backends_in_policy = set(self.traffic[endpoint].keys())
available_workers = {
backend
for backend, queues in self.worker_queues.items()
if queues.qsize() > 0
}
return list(backends_in_policy.intersection(available_workers))
async def _flush_endpoint_queues(self):
"""Selects the backend and puts the endpoint queue query to the buffer
Expected Implementation:
The implementer is expected to access and manipulate
self.endpoint_queues : dict[str,Deque]
self.buffer_queues : dict[str,sortedlist]
For registering the implemented policies register at policy.py
Expected Behavior:
the Deque of all endpoints in self.endpoint_queues linked with
atleast one backend must be empty irrespective of whatever
backend policy is implemented.
"""
raise NotImplementedError(
"This method should be implemented by child class.")
# Flushes the buffer queue and assigns work to workers.
async def _flush_buffer_queues(self):
for endpoint in self.traffic:
ready_backends = self._get_available_backends(endpoint)
for backend in ready_backends:
# no work available
if len(self.buffer_queues[backend]) == 0:
continue
buffer_queue = self.buffer_queues[backend]
worker_queue = self.worker_queues[backend]
logger.debug("Assigning queries for backend {} with buffer "
"queue size {} and worker queue size {}".format(
backend, len(buffer_queue),
worker_queue.qsize()))
max_batch_size = None
if backend in self.backend_info:
max_batch_size = self.backend_info[backend].max_batch_size
await self._assign_query_to_worker(
backend, buffer_queue, worker_queue, max_batch_size)
async def _do_query(self, backend, backend_replica_tag, req):
# If the worker died, this will be a RayActorError. Just return it and
# let the HTTP proxy handle the retry logic.
logger.debug("Sending query to replica:" + backend_replica_tag)
start = time.time()
worker = self.replicas[backend_replica_tag]
try:
result = await worker.handle_request.remote(req)
except RayTaskError as error:
self.num_error_backend_request.labels(backend=backend).add()
result = error
await self.mark_worker_idle(backend, backend_replica_tag)
logger.debug("Got result in {:.2f}s".format(time.time() - start))
return result
async def _assign_query_to_worker(self,
backend,
buffer_queue,
worker_queue,
max_batch_size=None):
while len(buffer_queue) and worker_queue.qsize():
backend_replica_tag = await worker_queue.get()
if max_batch_size is None: # No batching
request = buffer_queue.pop(0)
future = asyncio.get_event_loop().create_task(
self._do_query(backend, backend_replica_tag, request))
# chaining satisfies request.async_future with future result.
asyncio.futures._chain_future(future, request.async_future)
else:
real_batch_size = min(len(buffer_queue), max_batch_size)
requests = [
buffer_queue.pop(0) for _ in range(real_batch_size)
]
# split requests by method type
requests_group = defaultdict(list)
for request in requests:
requests_group[request.call_method].append(request)
for group in requests_group.values():
future = asyncio.get_event_loop().create_task(
self._do_query(backend, backend_replica_tag, group))
future.add_done_callback(
_make_future_unwrapper(
client_futures=[req.async_future for req in group],
host_future=future))
| 41.521303
| 79
| 0.635842
|
87fdffa42ce10c1d1605ac64f2d2fa34eed8c00a
| 1,839
|
py
|
Python
|
http_server/cli.py
|
RomainDuclos/sage-engine
|
333997c658ea44e643bed636c5297e5e998ef97c
|
[
"MIT"
] | null | null | null |
http_server/cli.py
|
RomainDuclos/sage-engine
|
333997c658ea44e643bed636c5297e5e998ef97c
|
[
"MIT"
] | null | null | null |
http_server/cli.py
|
RomainDuclos/sage-engine
|
333997c658ea44e643bed636c5297e5e998ef97c
|
[
"MIT"
] | 3
|
2019-01-03T12:49:54.000Z
|
2019-01-18T16:32:53.000Z
|
# cli.py
# Author: Thomas MINIER - MIT License 2017-2018
import argparse
from os.path import isfile
from http_server.server import sage_app
from gunicorn.app.base import BaseApplication
from gunicorn.six import iteritems
class StandaloneApplication(BaseApplication):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
def load_config(self):
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
return self.application
def cli_sage():
parser = argparse.ArgumentParser(description='Launch the Sage server using a configuration file')
parser.add_argument('config', metavar='config', help='Path to the configuration file')
parser.add_argument('-p', '--port', metavar='P', type=int, help='The port to bind (default: 8000)', default=8000)
parser.add_argument('-w', '--workers', metavar='W', type=int, help='The number of server workers (default: 4)', default=4)
parser.add_argument('--log-level', metavar='LEVEL', dest='log_level', help='The granularity of log outputs (default: info)', default='info')
args = parser.parse_args()
# check if config file exists
if not isfile(args.config):
print("Error: Configuration file not found: '{}'".format(args.config))
print("Error: Sage server could not start, aborting...")
else:
options = {
'bind': '%s:%s' % ('0.0.0.0', args.port),
'workers': args.workers,
'log-level': args.log_level
}
StandaloneApplication(sage_app(args.config), options).run()
| 41.795455
| 144
| 0.661229
|
03688bdd2856083a9c51d5dcb71146a14b89a344
| 270
|
py
|
Python
|
portal/apps/shoutbox/admin.py
|
Artis-Physis/utopia-cms
|
5cb8d941d0b2df53fddc566a52e9d3baee4a007e
|
[
"BSD-3-Clause"
] | 8
|
2020-12-15T17:11:08.000Z
|
2021-12-13T22:08:33.000Z
|
portal/apps/shoutbox/admin.py
|
Artis-Physis/utopia-cms
|
5cb8d941d0b2df53fddc566a52e9d3baee4a007e
|
[
"BSD-3-Clause"
] | 28
|
2020-12-15T17:34:03.000Z
|
2022-02-01T04:09:10.000Z
|
portal/apps/shoutbox/admin.py
|
Artis-Physis/utopia-cms
|
5cb8d941d0b2df53fddc566a52e9d3baee4a007e
|
[
"BSD-3-Clause"
] | 7
|
2020-12-15T19:59:17.000Z
|
2021-11-24T16:47:06.000Z
|
# -*- coding: utf-8 -*-
from shoutbox.models import Shout
from django.contrib.admin import site, ModelAdmin
class ShoutModelAdmin(ModelAdmin):
list_display = ('message', 'user', 'post_date')
search_fields = ('message',)
site.register(Shout, ShoutModelAdmin)
| 22.5
| 51
| 0.722222
|
4a092d536c946a36da4ad76b90fa929be8ac2377
| 3,372
|
py
|
Python
|
apps/account/migrations/0001_initial.py
|
kagxin/django-template
|
3cdddf8ff3e1d95298ffe359f0a40e27220d795b
|
[
"MIT"
] | 16
|
2019-07-23T04:14:27.000Z
|
2022-02-15T10:46:06.000Z
|
apps/account/migrations/0001_initial.py
|
kagxin/django-template
|
3cdddf8ff3e1d95298ffe359f0a40e27220d795b
|
[
"MIT"
] | 1
|
2021-04-08T19:34:31.000Z
|
2021-04-08T19:34:31.000Z
|
apps/account/migrations/0001_initial.py
|
kagxin/django-template
|
3cdddf8ff3e1d95298ffe359f0a40e27220d795b
|
[
"MIT"
] | 5
|
2019-07-23T13:18:42.000Z
|
2021-01-28T06:37:47.000Z
|
# Generated by Django 2.2.2 on 2019-07-18 01:37
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(blank=True, max_length=30, null=True, verbose_name='姓名')),
('birthday', models.DateField(blank=True, null=True, verbose_name='出生年月')),
('gender', models.CharField(blank=True, choices=[('male', '男'), ('female', '女')], default='female', max_length=6, verbose_name='性别')),
('mobile', models.CharField(blank=True, max_length=11, null=True, verbose_name='电话')),
('image', models.ImageField(blank=True, null=True, upload_to='image/%Y/%m/%d')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 68.816327
| 329
| 0.654804
|
5ecb97949a29b1dec1f55e52d4bc186cc05e3729
| 7,845
|
py
|
Python
|
train.py
|
MingmingPeng/DCGAN-LSGAN-WGAN-GP-DRAGAN-Tensorflow-2
|
576f622495d6325165cb77dc7de96230a579ebe2
|
[
"MIT"
] | 305
|
2019-04-07T11:45:45.000Z
|
2022-03-18T00:34:35.000Z
|
train.py
|
Dipperss/DCGAN-LSGAN-WGAN-GP-DRAGAN-Tensorflow-2
|
f79f417d42079b597a1f5a030f4b6e3a5d88f0e1
|
[
"MIT"
] | 8
|
2019-04-11T07:15:59.000Z
|
2021-09-25T02:20:30.000Z
|
train.py
|
Dipperss/DCGAN-LSGAN-WGAN-GP-DRAGAN-Tensorflow-2
|
f79f417d42079b597a1f5a030f4b6e3a5d88f0e1
|
[
"MIT"
] | 58
|
2019-04-07T12:08:36.000Z
|
2021-11-25T21:06:39.000Z
|
import functools
import imlib as im
import pylib as py
import tensorflow as tf
import tensorflow.keras as keras
import tf2lib as tl
import tf2gan as gan
import tqdm
import data
import module
# ==============================================================================
# = param =
# ==============================================================================
# command line
py.arg('--dataset', default='fashion_mnist', choices=['cifar10', 'fashion_mnist', 'mnist', 'celeba', 'anime', 'custom'])
py.arg('--batch_size', type=int, default=64)
py.arg('--epochs', type=int, default=25)
py.arg('--lr', type=float, default=0.0002)
py.arg('--beta_1', type=float, default=0.5)
py.arg('--n_d', type=int, default=1) # # d updates per g update
py.arg('--z_dim', type=int, default=128)
py.arg('--adversarial_loss_mode', default='gan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'])
py.arg('--gradient_penalty_mode', default='none', choices=['none', 'dragan', 'wgan-gp'])
py.arg('--gradient_penalty_weight', type=float, default=10.0)
py.arg('--experiment_name', default='none')
args = py.args()
# output_dir
if args.experiment_name == 'none':
args.experiment_name = '%s_%s' % (args.dataset, args.adversarial_loss_mode)
if args.gradient_penalty_mode != 'none':
args.experiment_name += '_%s' % args.gradient_penalty_mode
output_dir = py.join('output', args.experiment_name)
py.mkdir(output_dir)
# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)
# ==============================================================================
# = data =
# ==============================================================================
# setup dataset
if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']: # 32x32
dataset, shape, len_dataset = data.make_32x32_dataset(args.dataset, args.batch_size)
n_G_upsamplings = n_D_downsamplings = 3
elif args.dataset == 'celeba': # 64x64
img_paths = py.glob('data/img_align_celeba', '*.jpg')
dataset, shape, len_dataset = data.make_celeba_dataset(img_paths, args.batch_size)
n_G_upsamplings = n_D_downsamplings = 4
elif args.dataset == 'anime': # 64x64
img_paths = py.glob('data/faces', '*.jpg')
dataset, shape, len_dataset = data.make_anime_dataset(img_paths, args.batch_size)
n_G_upsamplings = n_D_downsamplings = 4
elif args.dataset == 'custom':
# ======================================
# = custom =
# ======================================
img_paths = ... # image paths of custom dataset
dataset, shape, len_dataset = data.make_custom_dataset(img_paths, args.batch_size)
n_G_upsamplings = n_D_downsamplings = ... # 3 for 32x32 and 4 for 64x64
# ======================================
# = custom =
# ======================================
# ==============================================================================
# = model =
# ==============================================================================
# setup the normalization function for discriminator
if args.gradient_penalty_mode == 'none':
d_norm = 'batch_norm'
elif args.gradient_penalty_mode in ['dragan', 'wgan-gp']: # cannot use batch normalization with gradient penalty
# TODO(Lynn)
# Layer normalization is more stable than instance normalization here,
# but instance normalization works in other implementations.
# Please tell me if you find out the cause.
d_norm = 'layer_norm'
# networks
G = module.ConvGenerator(input_shape=(1, 1, args.z_dim), output_channels=shape[-1], n_upsamplings=n_G_upsamplings, name='G_%s' % args.dataset)
D = module.ConvDiscriminator(input_shape=shape, n_downsamplings=n_D_downsamplings, norm=d_norm, name='D_%s' % args.dataset)
G.summary()
D.summary()
# adversarial_loss_functions
d_loss_fn, g_loss_fn = gan.get_adversarial_losses_fn(args.adversarial_loss_mode)
G_optimizer = keras.optimizers.Adam(learning_rate=args.lr, beta_1=args.beta_1)
D_optimizer = keras.optimizers.Adam(learning_rate=args.lr, beta_1=args.beta_1)
# ==============================================================================
# = train step =
# ==============================================================================
@tf.function
def train_G():
with tf.GradientTape() as t:
z = tf.random.normal(shape=(args.batch_size, 1, 1, args.z_dim))
x_fake = G(z, training=True)
x_fake_d_logit = D(x_fake, training=True)
G_loss = g_loss_fn(x_fake_d_logit)
G_grad = t.gradient(G_loss, G.trainable_variables)
G_optimizer.apply_gradients(zip(G_grad, G.trainable_variables))
return {'g_loss': G_loss}
@tf.function
def train_D(x_real):
with tf.GradientTape() as t:
z = tf.random.normal(shape=(args.batch_size, 1, 1, args.z_dim))
x_fake = G(z, training=True)
x_real_d_logit = D(x_real, training=True)
x_fake_d_logit = D(x_fake, training=True)
x_real_d_loss, x_fake_d_loss = d_loss_fn(x_real_d_logit, x_fake_d_logit)
gp = gan.gradient_penalty(functools.partial(D, training=True), x_real, x_fake, mode=args.gradient_penalty_mode)
D_loss = (x_real_d_loss + x_fake_d_loss) + gp * args.gradient_penalty_weight
D_grad = t.gradient(D_loss, D.trainable_variables)
D_optimizer.apply_gradients(zip(D_grad, D.trainable_variables))
return {'d_loss': x_real_d_loss + x_fake_d_loss, 'gp': gp}
@tf.function
def sample(z):
return G(z, training=False)
# ==============================================================================
# = run =
# ==============================================================================
# epoch counter
ep_cnt = tf.Variable(initial_value=0, trainable=False, dtype=tf.int64)
# checkpoint
checkpoint = tl.Checkpoint(dict(G=G,
D=D,
G_optimizer=G_optimizer,
D_optimizer=D_optimizer,
ep_cnt=ep_cnt),
py.join(output_dir, 'checkpoints'),
max_to_keep=5)
try: # restore checkpoint including the epoch counter
checkpoint.restore().assert_existing_objects_matched()
except Exception as e:
print(e)
# summary
train_summary_writer = tf.summary.create_file_writer(py.join(output_dir, 'summaries', 'train'))
# sample
sample_dir = py.join(output_dir, 'samples_training')
py.mkdir(sample_dir)
# main loop
z = tf.random.normal((100, 1, 1, args.z_dim)) # a fixed noise for sampling
with train_summary_writer.as_default():
for ep in tqdm.trange(args.epochs, desc='Epoch Loop'):
if ep < ep_cnt:
continue
# update epoch counter
ep_cnt.assign_add(1)
# train for an epoch
for x_real in tqdm.tqdm(dataset, desc='Inner Epoch Loop', total=len_dataset):
D_loss_dict = train_D(x_real)
tl.summary(D_loss_dict, step=D_optimizer.iterations, name='D_losses')
if D_optimizer.iterations.numpy() % args.n_d == 0:
G_loss_dict = train_G()
tl.summary(G_loss_dict, step=G_optimizer.iterations, name='G_losses')
# sample
if G_optimizer.iterations.numpy() % 100 == 0:
x_fake = sample(z)
img = im.immerge(x_fake, n_rows=10).squeeze()
im.imwrite(img, py.join(sample_dir, 'iter-%09d.jpg' % G_optimizer.iterations.numpy()))
# save checkpoint
checkpoint.save(ep)
| 39.225
| 142
| 0.55972
|
719fc72ba3ca3ca8db3da722c9ad57e709fd5d4c
| 388
|
py
|
Python
|
circle/migrations/0003_alter_person_ph_no.py
|
Acids-Bases/Marketplace
|
31f42a077279d891cdb6bb86abb2b8c6e841a889
|
[
"MIT"
] | null | null | null |
circle/migrations/0003_alter_person_ph_no.py
|
Acids-Bases/Marketplace
|
31f42a077279d891cdb6bb86abb2b8c6e841a889
|
[
"MIT"
] | null | null | null |
circle/migrations/0003_alter_person_ph_no.py
|
Acids-Bases/Marketplace
|
31f42a077279d891cdb6bb86abb2b8c6e841a889
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-05 18:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('circle', '0002_auto_20210705_1845'),
]
operations = [
migrations.AlterField(
model_name='person',
name='ph_no',
field=models.BigIntegerField(unique=True),
),
]
| 20.421053
| 54
| 0.600515
|
f24053acddc42352a14eecbf17b1b7c71b392bb9
| 3,871
|
py
|
Python
|
aiida/backends/djsite/settings.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2016-09-12T10:51:00.000Z
|
2016-09-12T10:51:00.000Z
|
aiida/backends/djsite/settings.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | 17
|
2020-03-11T17:04:05.000Z
|
2020-05-01T09:34:45.000Z
|
aiida/backends/djsite/settings.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=import-error, no-name-in-module
""" Django settings for the AiiDA project. """
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import UUID
from aiida.common import exceptions
from aiida.common.timezone import get_current_timezone
from aiida.manage.configuration import get_profile, settings
try:
PROFILE = get_profile()
except exceptions.MissingConfigurationError as exception:
raise exceptions.MissingConfigurationError(f'the configuration could not be loaded: {exception}')
if PROFILE is None:
raise exceptions.ProfileConfigurationError('no profile has been loaded')
if PROFILE.database_backend != 'django':
raise exceptions.ProfileConfigurationError(
f'incommensurate database backend `{PROFILE.database_backend}` for profile `{PROFILE.name}`'
)
PROFILE_CONF = PROFILE.dictionary
DATABASES = {
'default': {
'ENGINE': f'django.db.backends.{PROFILE.database_engine}',
'NAME': PROFILE.database_name,
'PORT': PROFILE.database_port,
'HOST': PROFILE.database_hostname,
'USER': PROFILE.database_username,
'PASSWORD': PROFILE.database_password,
}
}
# CUSTOM USER CLASS
AUTH_USER_MODEL = 'db.DbUser'
# No secret key defined since we do not use Django to serve HTTP pages
SECRET_KEY = 'placeholder' # noqa
# Automatic logging configuration for Django is disabled here
# and done for all backends in aiida/__init__.py
LOGGING_CONFIG = None
# Keep DEBUG = False! Otherwise every query is stored in memory
DEBUG = False
ADMINS = []
ALLOWED_HOSTS = []
MANAGERS = ADMINS
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Local time zone for this installation. Always choose the system timezone.
# Note: This causes django to set the 'TZ' environment variable, which is read by tzlocal from then onwards.
# See https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-TIME_ZONE
TIME_ZONE = get_current_timezone().zone
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = False
# If you set this to False, Django will not use timezone-aware datetimes.
# For AiiDA, leave it as True, otherwise setting properties with dates will not work.
USE_TZ = settings.USE_TZ
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
],
'debug':
DEBUG,
},
},
]
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'aiida.backends.djsite.db',
'aldjemy',
]
ALDJEMY_DATA_TYPES = {
'UUIDField': lambda field: UUID(),
'JSONField': lambda field: JSONB(),
}
| 33.95614
| 108
| 0.658228
|
9f78afc3cfc0ca8fe068d14610c871bf2e7bef8f
| 11,793
|
py
|
Python
|
continual_transformers/co_si_mha.py
|
LukasHedegaard/continual-transformers
|
5f1dc7738be18ef4ce95b3eabbef9a5d62d0a537
|
[
"Apache-2.0"
] | 3
|
2022-01-16T08:05:12.000Z
|
2022-03-08T19:38:09.000Z
|
continual_transformers/co_si_mha.py
|
LukasHedegaard/continual-transformers
|
5f1dc7738be18ef4ce95b3eabbef9a5d62d0a537
|
[
"Apache-2.0"
] | null | null | null |
continual_transformers/co_si_mha.py
|
LukasHedegaard/continual-transformers
|
5f1dc7738be18ef4ce95b3eabbef9a5d62d0a537
|
[
"Apache-2.0"
] | 1
|
2022-01-16T08:05:18.000Z
|
2022-01-16T08:05:18.000Z
|
import math
from functools import partial
from logging import getLogger
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
from continual.module import CallMode
from torch import Tensor
from .co_mha_base import CoMultiheadAttentionBase, MaybeTensor
logger = getLogger(__name__)
State = Tuple[
Tensor, # Q_mem, (B, Nt-1, E)
Tensor, # K_T_mem, (B, E, Ns)
Tensor, # V_mem, (B, Ns, E)
]
def _scaled_dot_product_attention_default_state(
batch_size: int,
sequence_len: int,
embed_dim: int,
num_heads: int,
query_index=-1,
init_fn=torch.zeros,
dtype=None,
device=None,
):
init_fn = partial(init_fn, dtype=dtype, device=device)
E = embed_dim // num_heads
B = batch_size * num_heads
N = sequence_len
Nq = sequence_len - query_index - 1 if query_index >= 0 else -query_index - 1
Q_mem = init_fn((B, Nq, E))
K_T_mem = init_fn((B, E, N))
V_mem = init_fn((B, N, E))
return (Q_mem, K_T_mem, V_mem)
def _scaled_dot_product_attention_step(
prev_state: State,
q_step: Tensor, # step input (B, E)
k_step: Tensor, # step input (B, E)
v_step: Tensor, # step input (B, E)
attn_mask: Optional[Tensor] = None,
dropout_p: float = 0.0,
) -> Tuple[Tensor, State]:
"""
Computes the Continual Singe-output Scaled Dot-Product Attention on query, key and value tensors.
Returns attended values and updated states.
Args:
q_step, k_step, v_step: query, key and value tensors for a step. See Shape section for shape details.
attn_mask: optional tensor containing mask values to be added to calculated
attention. May be 2D or 3D; see Shape section for details.
dropout_p: dropout probability. If greater than 0.0, dropout is applied.
Shape:
- q_step: :math:`(B, E)` where B is batch size and E is embedding dimension.
- k_step: :math:`(B, E)` where B is batch size and E is embedding dimension.
- v_step: :math:`(B, E)` where B is batch size and E is embedding dimension.
- Output: attention values have shape :math:`(B, Nt, E)`; new state
"""
if attn_mask is not None:
logger.warning("attn_mask is not supported yet and will be skipped")
if dropout_p != 0.0:
logger.warning("dropout_p is not supported yet and will be skipped")
(
Q_mem, # (B, Nq, E)
K_T_mem, # (B, E, Ns)
V_mem, # (B, Ns, E)
) = prev_state
B, E = q_step.shape
q_step = q_step / math.sqrt(E)
q_sel = (Q_mem[:, 0] if Q_mem.shape[1] > 0 else q_step).unsqueeze(1)
# Update states
# Note: We're allowing the K and V mem to have one more entry than
# strictly necessary to simplify computatations.
K_T_new = torch.roll(K_T_mem, shifts=-1, dims=(2,))
K_T_new[:, :, -1] = k_step
V_new = torch.roll(V_mem, shifts=-1, dims=(1,))
V_new[:, -1] = v_step
attn = torch.bmm(q_sel, K_T_new)
attn_sm = F.softmax(attn, dim=-1)
if dropout_p > 0.0:
attn_sm = F.dropout(attn_sm, p=dropout_p)
# (B, Nt, Ns) x (B, Ns, E) -> (B, Nt, E)
output = torch.bmm(attn_sm, V_new)
if Q_mem.shape[1] > 0:
Q_new = torch.roll(Q_mem, shifts=-1, dims=(1,))
Q_new[:, -1] = q_step
else:
Q_new = Q_mem
new_states = (Q_new, K_T_new, V_new)
return output, new_states
class CoSiMultiheadAttention(CoMultiheadAttentionBase):
"""
Continual Single-output MultiHeadAttention.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
sequence_len: Length of token sequence
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
"""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=False,
device=None,
dtype=None,
sequence_len=None,
query_index=-1,
forward_returns_attn_mask=True,
embed_dim_second=False,
) -> None:
CoMultiheadAttentionBase.__init__(
self,
embed_dim,
num_heads,
dropout,
bias,
add_bias_kv,
add_zero_attn,
kdim,
vdim,
batch_first,
device,
dtype,
sequence_len,
partial(
_scaled_dot_product_attention_default_state,
sequence_len=sequence_len,
embed_dim=embed_dim,
num_heads=num_heads,
query_index=query_index,
),
_scaled_dot_product_attention_step,
forward_returns_attn_mask,
embed_dim_second,
)
assert query_index < sequence_len
self.query_index = query_index
def get_state(self) -> Optional[State]:
"""Get model state
Returns:
Optional[State]: A State tuple if the model has been initialised and otherwise None.
"""
if (
getattr(self, "Q_mem", None) is not None
and getattr(self, "K_T_mem", None) is not None
and getattr(self, "V_mem", None) is not None
and getattr(self, "stride_index", None) is not None
):
return (
self.Q_mem,
self.K_T_mem,
self.V_mem,
self.stride_index,
)
def set_state(self, state: State):
"""Set model state
Args:
state (State): State tuple to set as new internal internal state
"""
(
self.Q_mem,
self.K_T_mem,
self.V_mem,
self.stride_index,
) = state
def clean_state(self):
"""Clean model state"""
if hasattr(self, "Q_mem"):
del self.Q_mem
if hasattr(self, "K_T_mem"):
del self.K_T_mem
if hasattr(self, "V_mem"):
del self.V_mem
if hasattr(self, "stride_index"):
del self.stride_index
@property
def delay(self) -> int:
return (
self.sequence_len - self.query_index - 1
if self.query_index >= 0
else -self.query_index - 1
)
def forward(
self,
query: Tensor,
key: Tensor = None,
value: Tensor = None,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
if key is None:
key = query
if value is None:
value = query
# Select a single query entry
if self.batch_first:
if self.embed_dim_second:
query = query[:, :, self.query_index].unsqueeze(2)
else:
query = query[:, self.query_index].unsqueeze(1)
else:
query = query[self.query_index].unsqueeze(0)
o = CoMultiheadAttentionBase.forward(
self, query, key, value, key_padding_mask, need_weights, attn_mask
)
return o
def forward_step(
self,
query: Tensor,
key: Tensor = None,
value: Tensor = None,
update_state=True,
*args,
**kwargs,
) -> MaybeTensor:
"""
Args:
query, key, value: step_inputs for mapping a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
Shapes for inputs:
- query: :math:`(N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
Shapes for outputs:
- attn_output: :math:`(N, E)` where N is the batch size and E is the embedding dimension.
"""
o = CoMultiheadAttentionBase.forward_step(
self, query, key, value, update_state, *args, **kwargs
)
return o.squeeze(1 if self.batch_first else 0) if isinstance(o, Tensor) else o
def forward_steps(
self,
query: Tensor,
key: Tensor = None,
value: Tensor = None,
update_state=True,
*args,
**kwargs,
) -> MaybeTensor:
"""Forward computation for multiple steps with state initialisation
Args:
query (Tensor): query.
key (Tensor): key.
value (Tensor): value.
update_state (bool): Whether internal state should be updated during this operation.
Returns:
Tensor: Stepwise layer outputs
"""
o = CoMultiheadAttentionBase.forward_steps(
self, query, key, value, update_state, *args, **kwargs
)
if isinstance(o, Tensor):
o = o.squeeze(2)
if self.embed_dim_second:
o = o.transpose(1, 2) # N T E -> N E T
return o
def flops(self, include_muls=True, include_adds=False, include_exps=False):
f = 0
# Linear projection
steps_taken = {
CallMode.FORWARD: self.sequence_len,
CallMode.FORWARD_STEP: 1,
}[self.call_mode]
f += (
steps_taken
* self.embed_dim
* self.embed_dim
* 3 # Assuming equal len for Q, K, and V
)
if include_adds:
f += 3 * steps_taken * self.embed_dim * (self.embed_dim - 1)
if self.in_proj_bias is not None:
f += 3 * steps_taken * self.embed_dim
if include_adds:
f += 3 * steps_taken * self.embed_dim
# Multi-head Scaled Dot-Product Attention
f += self.num_heads * {
CallMode.FORWARD: scaled_dot_prod_attn_flops,
CallMode.FORWARD_STEP: scaled_dot_prod_attn_step_flops,
}[self.call_mode](
self.sequence_len,
self.embed_dim // self.num_heads,
include_muls,
include_adds,
include_exps,
)
# Linear projection
f += 1 * self.embed_dim * (self.embed_dim + 1)
return f
def scaled_dot_prod_attn_flops(
sequence_len, embed_dim, include_muls=True, include_adds=False, include_exps=False
):
n = sequence_len
d = embed_dim
flops = 0
if include_muls:
flops += 2 * n * d + 2 * d
if include_adds:
flops += 2 * n * d - d - 1
if include_exps:
flops += n
return flops
def scaled_dot_prod_attn_step_flops(
sequence_len, embed_dim, include_muls=True, include_adds=False, include_exps=False
):
return scaled_dot_prod_attn_flops(
sequence_len, embed_dim, include_muls, include_adds, include_exps
)
| 30.316195
| 109
| 0.579327
|
d795b8a0c4f0419c3fe8f45fcc9bc617872afd95
| 1,903
|
py
|
Python
|
setup.py
|
allisson/asyncpg-utils
|
616d5f47d7e01108cb9272b16e4d2b511bfdae08
|
[
"MIT"
] | 15
|
2018-01-02T10:32:43.000Z
|
2020-12-10T22:24:12.000Z
|
setup.py
|
allisson/asyncpg-utils
|
616d5f47d7e01108cb9272b16e4d2b511bfdae08
|
[
"MIT"
] | null | null | null |
setup.py
|
allisson/asyncpg-utils
|
616d5f47d7e01108cb9272b16e4d2b511bfdae08
|
[
"MIT"
] | null | null | null |
import codecs
import os
import re
from setuptools import setup, find_packages, Command
here = os.path.abspath(os.path.dirname(__file__))
version = '0.0.0'
changes = os.path.join(here, 'CHANGES.rst')
match = r'^#*\s*(?P<version>[0-9]+\.[0-9]+(\.[0-9]+)?)$'
with codecs.open(changes, encoding='utf-8') as changes:
for line in changes:
res = re.match(match, line)
if res:
version = res.group('version')
break
# Get the long description
with codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Get version
with codecs.open(os.path.join(here, 'CHANGES.rst'), encoding='utf-8') as f:
changelog = f.read()
# Get requirements
with codecs.open(os.path.join(here, 'requirements.txt')) as f:
install_requirements = [line.strip() for line in f.readlines()]
tests_requirements = [
'pytest',
'pytest-cov',
]
class VersionCommand(Command):
description = 'print library version'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(version)
setup(
name='asyncpg-utils',
version=version,
description='Utilities for Asyncpg',
long_description=long_description,
url='https://github.com/allisson/asyncpg-utils',
author='Allisson Azevedo',
author_email='allisson@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
],
packages=find_packages(exclude=['docs', 'tests*']),
setup_requires=['pytest-runner'],
install_requires=install_requirements,
tests_require=tests_requirements,
cmdclass={
'version': VersionCommand,
},
)
| 25.716216
| 75
| 0.646348
|
0f5b1589271ef95cc2e7fc3cbe860b2e02e74c47
| 494
|
py
|
Python
|
MainDisplay.py
|
civilwargeeky/MusicDownloader
|
89f4adcbc32a7e8c918e0c7bb16d7a708b842016
|
[
"MIT"
] | 1
|
2017-05-17T14:21:12.000Z
|
2017-05-17T14:21:12.000Z
|
MainDisplay.py
|
civilwargeeky/MusicDownloader
|
89f4adcbc32a7e8c918e0c7bb16d7a708b842016
|
[
"MIT"
] | null | null | null |
MainDisplay.py
|
civilwargeeky/MusicDownloader
|
89f4adcbc32a7e8c918e0c7bb16d7a708b842016
|
[
"MIT"
] | null | null | null |
#Main Display Class
#Handles complicated user interface
import tkinter as tk
from tkinter import ttk
import MusicDisplay as Disp
SCRIPTS_FOLDER = "Scripts"
def blankSpace():
return Disp.VarLabel("", width = 5)
def main():
root = Disp.MainBox()#.size(200,200)
Disp.VarButton("Playlist 1").grid(columnspan=5)
blankSpace().grid(row = 1, column = 0)
Disp.VarButton("First thing").grid(row = 1, column = 1, columnspan = 4)
root.mainloop()
if __name__ == "__main__":
main()
| 21.478261
| 73
| 0.694332
|
f44fa0f51abb791eb637a4459acc7b3bac0f088a
| 8,602
|
py
|
Python
|
trinity/cli_parser.py
|
teotoplak/trinity
|
6c67b5debfb94f74d0162c70f92ae3d13918b174
|
[
"MIT"
] | null | null | null |
trinity/cli_parser.py
|
teotoplak/trinity
|
6c67b5debfb94f74d0162c70f92ae3d13918b174
|
[
"MIT"
] | null | null | null |
trinity/cli_parser.py
|
teotoplak/trinity
|
6c67b5debfb94f74d0162c70f92ae3d13918b174
|
[
"MIT"
] | null | null | null |
import argparse
import json
import logging
from pathlib import Path
from typing import (
Any,
)
from eth_utils import (
DEBUG2_LEVEL_NUM,
ValidationError,
)
from p2p.kademlia import Node
from p2p.validation import validate_enode_uri
from trinity import __version__
from trinity._utils.eip1085 import validate_raw_eip1085_genesis_config
from trinity.constants import (
MAINNET_NETWORK_ID,
ROPSTEN_NETWORK_ID,
)
class ValidateAndStoreEnodes(argparse.Action):
def __call__(self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
value: Any,
option_string: str = None) -> None:
if value is None:
return
validate_enode_uri(value)
enode = Node.from_uri(value)
if getattr(namespace, self.dest) is None:
setattr(namespace, self.dest, [])
enode_list = getattr(namespace, self.dest)
enode_list.append(enode)
LOG_LEVEL_CHOICES = {
# numeric versions
'8': DEBUG2_LEVEL_NUM,
'10': logging.DEBUG,
'20': logging.INFO,
'30': logging.WARNING,
'40': logging.ERROR,
'50': logging.CRITICAL,
# string versions
'DEBUG2': DEBUG2_LEVEL_NUM,
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARN': logging.WARNING,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
}
def log_level_formatted_string() -> str:
numeric_levels = [k for k in LOG_LEVEL_CHOICES.keys() if k.isdigit()]
literal_levels = [k for k in LOG_LEVEL_CHOICES.keys() if not k.isdigit()]
return (
"LEVEL must be one of: "
f"\n {'/'.join(numeric_levels)} (numeric); "
f"\n {'/'.join(literal_levels).lower()} (lowercase); "
f"\n {'/'.join(literal_levels).upper()} (uppercase)."
)
class ValidateAndStoreLogLevel(argparse.Action):
def __call__(self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
value: Any,
option_string: str = None) -> None:
if value is None:
return
raw_value = value.upper()
# this is a global log level.
if raw_value in LOG_LEVEL_CHOICES:
path = None
log_level = LOG_LEVEL_CHOICES[raw_value]
else:
path, _, raw_log_level = value.partition('=')
if not path or not raw_log_level:
raise argparse.ArgumentError(
self,
f"Invalid logging config: '{value}'. Log level may be specified "
"as a global logging level using the syntax `--log-level "
"<LEVEL>`; or, to specify the logging level for an "
"individual logger, '--log-level "
"<LOGGER-NAME>=<LEVEL>'" + '\n' +
log_level_formatted_string()
)
try:
log_level = LOG_LEVEL_CHOICES[raw_log_level.upper()]
except KeyError:
raise argparse.ArgumentError(self, (
f"Invalid logging level. Got '{raw_log_level}'.",
log_level_formatted_string())
)
if getattr(namespace, self.dest) is None:
setattr(namespace, self.dest, {})
log_levels = getattr(namespace, self.dest)
if path in log_levels:
if path is None:
raise argparse.ArgumentError(
self,
f"Global logging has already been configured to '{log_level}'. The "
"global logging level may only be specified once."
)
else:
raise argparse.ArgumentError(
self,
f"The logging level for '{path}' was provided more than once. "
"Please ensure the each name is provided only once"
)
log_levels[path] = log_level
parser = argparse.ArgumentParser(description='Trinity')
#
# subparser for sub commands
#
# Components may add subcommands with a `func` attribute
# to gain control over the main Trinity process
subparser = parser.add_subparsers(dest='subcommand')
#
# Argument Groups
#
trinity_parser = parser.add_argument_group('core')
logging_parser = parser.add_argument_group('logging')
network_parser = parser.add_argument_group('network')
chain_parser = parser.add_argument_group('chain')
debug_parser = parser.add_argument_group('debug')
#
# Trinity Globals
#
trinity_parser.add_argument('--version', action='version', version=__version__)
trinity_parser.add_argument(
'--trinity-root-dir',
help=(
"The filesystem path to the base directory that trinity will store it's "
"information. Default: $XDG_DATA_HOME/.local/share/trinity"
),
)
trinity_parser.add_argument(
'--port',
type=int,
required=False,
default=30303,
help=(
"Port on which trinity should listen for incoming p2p/discovery connections. Default: 30303"
),
)
trinity_parser.add_argument(
'--trinity-tmp-root-dir',
action="store_true",
required=False,
default=False,
help=(
"If this flag is set, trinity will launch with a temporary root"
" directory as provided by the ``tempfile`` library."
),
)
#
# Logging configuration
#
logging_parser.add_argument(
'-l',
'--log-level',
action=ValidateAndStoreLogLevel,
dest="log_levels",
metavar="LEVEL",
help=(
"Configure the logging level. " + log_level_formatted_string()
),
)
logging_parser.add_argument(
'--stderr-log-level',
dest="stderr_log_level",
help=(
"Configure the logging level for the stderr logging."
),
)
logging_parser.add_argument(
'--file-log-level',
dest="file_log_level",
help=(
"Configure the logging level for file-based logging."
),
)
#
# Main parser for running trinity as a node.
#
networkid_parser = network_parser.add_mutually_exclusive_group()
networkid_parser.add_argument(
'--network-id',
type=int,
help="Network identifier (1=Mainnet, 3=Ropsten)",
default=MAINNET_NETWORK_ID,
)
networkid_parser.add_argument(
'--ropsten',
action='store_const',
const=ROPSTEN_NETWORK_ID,
dest='network_id',
help=(
"Ropsten network: pre configured proof-of-work test network. Shortcut "
"for `--networkid=3`"
),
)
network_parser.add_argument(
'--preferred-node',
action=ValidateAndStoreEnodes,
dest="preferred_nodes",
help=(
"An enode address which will be 'preferred' above nodes found using the "
"discovery protocol"
),
)
network_parser.add_argument(
'--max-peers',
help=(
"Maximum number of network peers"
),
type=int,
)
#
# Chain configuration
#
class EIP1085GenesisLoader(argparse.Action):
def __call__(self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
value: Any,
option_string: str = None) -> None:
genesis_file_path = Path(value)
if not genesis_file_path.exists():
raise argparse.ArgumentError(
self,
f"No genesis file found at: `{value}`"
)
try:
genesis_config = json.load(genesis_file_path.open())
except json.JSONDecodeError:
raise argparse.ArgumentError(
self,
f"The genesis file at `{value}` is not valid json"
)
try:
validate_raw_eip1085_genesis_config(genesis_config)
except ValidationError as err:
raise argparse.ArgumentError(
self,
f"The genesis file at `{value}` does not pass EIP1085 validation: {err}"
)
setattr(namespace, self.dest, genesis_config)
chain_parser.add_argument(
'--genesis',
help=(
"File containing a custom genesis configuration file per EIP1085"
),
action=EIP1085GenesisLoader,
)
chain_parser.add_argument(
'--data-dir',
help=(
"The directory where chain data is stored"
),
)
chain_parser.add_argument(
'--nodekey',
help=(
"Hexadecimal encoded private key to use for the nodekey"
" or the filesystem path to the file which contains the nodekey"
)
)
#
# Debug configuration
#
debug_parser.add_argument(
'--profile',
action='store_true',
help=(
"Enables profiling via cProfile."
),
)
| 27.050314
| 100
| 0.610439
|
d2380de89e86295d9a3a6bed09d5d07c064f081b
| 17,411
|
py
|
Python
|
src/av2/geometry/camera/pinhole_camera.py
|
johnwlambert/argoverse2-api
|
caf514b972460e103dc69bbb561152b161aced81
|
[
"MIT"
] | null | null | null |
src/av2/geometry/camera/pinhole_camera.py
|
johnwlambert/argoverse2-api
|
caf514b972460e103dc69bbb561152b161aced81
|
[
"MIT"
] | null | null | null |
src/av2/geometry/camera/pinhole_camera.py
|
johnwlambert/argoverse2-api
|
caf514b972460e103dc69bbb561152b161aced81
|
[
"MIT"
] | null | null | null |
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Implements a pinhole camera interface."""
from __future__ import annotations
from dataclasses import dataclass
from functools import cached_property
from pathlib import Path
from typing import Tuple, Union
import numpy as np
import av2.geometry.geometry as geometry_utils
import av2.utils.io as io_utils
from av2.geometry.se3 import SE3
from av2.utils.typing import NDArrayBool, NDArrayFloat, NDArrayInt
@dataclass(frozen=True)
class Intrinsics:
"""Models a camera intrinsic matrix.
Args:
fx_px: Horizontal focal length in pixels.
fy_px: Vertical focal length in pixels.
cx_px: Horizontal focal center in pixels.
cy_px: Vertical focal center in pixels.
width_px: Width of image in pixels.
height_px: Height of image in pixels.
"""
fx_px: float
fy_px: float
cx_px: float
cy_px: float
width_px: int
height_px: int
@cached_property
def K(self) -> NDArrayFloat:
"""Camera intrinsic matrix."""
K: NDArrayFloat = np.eye(3, dtype=float)
K[0, 0] = self.fx_px
K[1, 1] = self.fy_px
K[0, 2] = self.cx_px
K[1, 2] = self.cy_px
return K
@dataclass(frozen=True)
class PinholeCamera:
"""Parameterizes a pinhole camera with zero skew.
Args:
ego_SE3_cam: pose of camera in the egovehicle frame (inverse of extrinsics matrix).
intrinsics: `Intrinsics` object containing intrinsic parameters and image dimensions.
cam_name: sensor name that camera parameters correspond to.
"""
ego_SE3_cam: SE3
intrinsics: Intrinsics
cam_name: str
@property
def width_px(self) -> int:
"""Return the width of the image in pixels."""
return self.intrinsics.width_px
@property
def height_px(self) -> int:
"""Return the height of the image in pixels."""
return self.intrinsics.height_px
@cached_property
def extrinsics(self) -> NDArrayFloat:
"""Return the camera extrinsics."""
return self.ego_SE3_cam.inverse().transform_matrix
@classmethod
def from_feather(cls, log_dir: Path, cam_name: str) -> PinholeCamera:
"""Create a pinhole camera model from a feather file.
Note: Data is laid out with sensor names along row dimension, and columns are sensor attribute data.
Args:
log_dir: path to a log directory containing feather files w/ calibration info.
cam_name: name of the camera.
Returns:
A new PinholeCamera object, containing camera extrinsics and intrinsics.
"""
intrinsics_path = log_dir / "calibration" / "intrinsics.feather"
intrinsics_df = io_utils.read_feather(intrinsics_path).set_index("sensor_name")
params = intrinsics_df.loc[cam_name]
intrinsics = Intrinsics(
fx_px=params["fx_px"],
fy_px=params["fy_px"],
cx_px=params["cx_px"],
cy_px=params["cy_px"],
width_px=int(params["width_px"]),
height_px=int(params["height_px"]),
)
sensor_name_to_pose = io_utils.read_ego_SE3_sensor(log_dir)
return cls(
ego_SE3_cam=sensor_name_to_pose[cam_name],
intrinsics=intrinsics,
cam_name=cam_name,
)
def cull_to_view_frustum(self, uv: NDArrayFloat, points_cam: NDArrayFloat) -> NDArrayBool:
"""Cull 3d points to camera view frustum.
Given a set of coordinates in the image plane and corresponding points
in the camera coordinate reference frame, determine those points
that have a valid projection into the image. 3d points with valid
projections have x coordinates in the range [0,width_px-1], y-coordinates
in the range [0,height_px-1], and a positive z-coordinate (lying in
front of the camera frustum).
Ref: https://en.wikipedia.org/wiki/Hidden-surface_determination#Viewing-frustum_culling
Args:
uv: Numpy array of shape (N,2) representing image plane coordinates in [0,W-1] x [0,H-1]
where (H,W) are the image height and width.
points_cam: Numpy array of shape (N,3) representing corresponding 3d points in the camera coordinate frame.
Returns:
Numpy boolean array of shape (N,) indicating which points fall within the camera view frustum.
"""
is_valid_x = np.logical_and(0 <= uv[:, 0], uv[:, 0] < self.width_px - 1)
is_valid_y = np.logical_and(0 <= uv[:, 1], uv[:, 1] < self.height_px - 1)
is_valid_z = points_cam[:, 2] > 0
is_valid_points: NDArrayBool = np.logical_and.reduce([is_valid_x, is_valid_y, is_valid_z])
return is_valid_points
def project_ego_to_img(
self, points_ego: NDArrayFloat, remove_nan: bool = False
) -> Tuple[NDArrayFloat, NDArrayFloat, NDArrayBool]:
"""Project a collection of 3d points (provided in the egovehicle frame) to the image plane.
Args:
points_ego: numpy array of shape (N,3) representing points in the egovehicle frame.
remove_nan: whether to remove coordinates that project to invalid (NaN) values.
Returns:
uv: image plane coordinates, as Numpy array of shape (N,2).
points_cam: camera frame coordinates as Numpy array of shape (N,3) representing
is_valid_points: boolean indicator of valid cheirality and within image boundary, as
boolean Numpy array of shape (N,).
"""
# convert cartesian to homogeneous coordinates.
points_ego_hom = geometry_utils.cart_to_hom(points_ego)
points_cam: NDArrayFloat = self.extrinsics @ points_ego_hom.T
# remove bottom row of all 1s.
uv = self.intrinsics.K @ points_cam[:3, :]
uv = uv.T
points_cam = points_cam.T
if remove_nan:
uv, points_cam = remove_nan_values(uv, points_cam)
uv = uv[:, :2] / uv[:, 2].reshape(-1, 1)
is_valid_points = self.cull_to_view_frustum(uv, points_cam)
return uv, points_cam, is_valid_points
def project_cam_to_img(
self, points_cam: NDArrayFloat, remove_nan: bool = False
) -> Tuple[NDArrayFloat, NDArrayFloat, NDArrayBool]:
"""Project a collection of 3d points in the camera reference frame to the image plane.
Args:
points_cam: numpy array of shape (N,3) representing points in the egovehicle frame.
remove_nan: whether to remove coordinates that project to invalid (NaN) values.
Returns:
uv: image plane coordinates, as Numpy array of shape (N,2).
points_cam: camera frame coordinates as Numpy array of shape (N,3) representing
is_valid_points: boolean indicator of valid cheirality and within image boundary, as
boolean Numpy array of shape (N,).
"""
uv = self.intrinsics.K @ points_cam[:3, :]
uv = uv.T
points_cam = points_cam.T
if remove_nan:
uv, points_cam = remove_nan_values(uv, points_cam)
uv = uv[:, :2] / uv[:, 2].reshape(-1, 1)
is_valid_points = self.cull_to_view_frustum(uv, points_cam)
return uv, points_cam, is_valid_points
def project_ego_to_img_motion_compensated(
self,
points_lidar_time: NDArrayFloat,
city_SE3_ego_cam_t: SE3,
city_SE3_ego_lidar_t: SE3,
) -> Tuple[NDArrayFloat, NDArrayFloat, NDArrayBool]:
"""Project points in the ego frame to the image with motion compensation.
Because of the high frame rate, motion compensation's role between the
sensors is not very significant, moving points only by millimeters
to centimeters. If the vehicle is moving at 25 miles per hour, equivalent
to 11 meters/sec, then in 17 milliseconds (the max time between a lidar sweep
and camera image capture) we should be able to move up to 187 millimeters.
This can be verified in practice as the mean_change:
mean_change = np.amax(points_h_cam_time.T[:,:3] - points_h_lidar_time ,axis=0)
Adjust LiDAR points for egovehicle motion. This function accepts the
egovehicle's pose in the city map both at camera time and also at
the LiDAR time.
We perform the following transformation:
pt_egovehicle_cam_t = egovehicle_cam_t_SE3_city * city_SE3_egovehicle_lidar_t * pt_egovehicle_lidar_t
Note that both "cam_time_points_h" and "lidar_time_points_h" are 3D points in the
vehicle coordinate frame, but captured at different times. These LiDAR points
always live in the vehicle frame, but just in different timestamps. If we take
a lidar point in the egovehicle frame, captured at lidar time, and bring it into
the map at this lidar timestamp, then we know the transformation from map to
egovehicle reference frame at the time when the camera image was captured.
Thus, we move from egovehicle @ lidar time, to the map (which is time agnostic),
then we move from map to egovehicle @ camera time. Now we suddenly have lidar points
living in the egovehicle frame @ camera time.
Args:
points_lidar_time: Numpy array of shape (N,3)
city_SE3_ego_cam_t: egovehicle pose when camera image was recorded.
city_SE3_ego_lidar_t: egovehicle pose when LiDAR sweep was recorded.
Returns:
uv: image plane coordinates, as Numpy array of shape (N,2).
points_cam: Numpy array of shape (N,3) representing coordinates of points within the camera frame.
is_valid_points_cam: boolean indicator of valid cheirality and within image boundary, as
boolean Numpy array of shape (N,).
Raises:
ValueError: If `city_SE3_ego_cam_t` or `city_SE3_ego_lidar_t` is `None`.
"""
if city_SE3_ego_cam_t is None:
raise ValueError("city_SE3_ego_cam_t cannot be `None`!")
if city_SE3_ego_lidar_t is None:
raise ValueError("city_SE3_ego_lidar_t cannot be `None`!")
ego_cam_t_SE3_ego_lidar_t = city_SE3_ego_cam_t.inverse().compose(city_SE3_ego_lidar_t)
points_cam_time = ego_cam_t_SE3_ego_lidar_t.transform_point_cloud(points_lidar_time)
return self.project_ego_to_img(points_cam_time)
@cached_property
def right_clipping_plane(self) -> NDArrayFloat:
"""Form the right clipping plane for a camera view frustum.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = -self.intrinsics.fx_px, 0.0, self.width_px / 2.0, 0.0
coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore
return coeffs
@cached_property
def left_clipping_plane(self) -> NDArrayFloat:
"""Form the left clipping plane for a camera view frustum.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = self.intrinsics.fx_px, 0.0, self.width_px / 2.0, 0.0
coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore
return coeffs
@cached_property
def top_clipping_plane(self) -> NDArrayFloat:
"""Top clipping plane for a camera view frustum.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = 0.0, self.intrinsics.fx_px, self.height_px / 2.0, 0.0
coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore
return coeffs
@cached_property
def bottom_clipping_plane(self) -> NDArrayFloat:
"""Bottom clipping plane for a camera view frustum.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = 0.0, -self.intrinsics.fx_px, self.height_px / 2.0, 0.0
coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore
return coeffs
def near_clipping_plane(self, near_clip_m: float) -> NDArrayFloat:
"""Near clipping plane for a camera view frustum.
Args:
near_clip_m: Near clipping plane distance in meters.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = 0.0, 0.0, 1.0, -near_clip_m
coeffs: NDArrayFloat = np.array([a, b, c, d])
return coeffs
def frustum_planes(self, near_clip_dist: float = 0.5) -> NDArrayFloat:
"""Compute the planes enclosing the field of view (view frustum).
Reference (1): https://en.wikipedia.org/wiki/Viewing_frustum
Reference (2): https://en.wikipedia.org/wiki/Plane_(geometry)
Solve for the coefficients of all frustum planes:
ax + by + cz = d
Args:
near_clip_dist: Distance of the near clipping plane from the origin.
Returns:
(5, 4) matrix where each row corresponds to the coeffients of a plane.
"""
left_plane = self.left_clipping_plane
right_plane = self.right_clipping_plane
top_plane = self.top_clipping_plane
bottom_plane = self.bottom_clipping_plane
near_plane = self.near_clipping_plane(near_clip_dist)
planes: NDArrayFloat = np.stack([left_plane, right_plane, near_plane, bottom_plane, top_plane])
return planes
@cached_property
def egovehicle_yaw_cam_rad(self) -> float:
"""Compute the camera's yaw, in the egovehicle frame.
R takes the x axis to be a vector equivalent to the first column of R.
Similarly, the y and z axes are transformed to be the second and third columns.
Returns:
Counter-clockwise angle from x=0 (in radians) of camera center ray, in the egovehicle frame.
"""
egovehicle_SE3_camera = self.ego_SE3_cam
# the third column of this rotation matrix, is the new basis vector for the z axis (pointing out of camera)
# take its x and y components (the z component is near zero, as close to horizontal)
new_z_axis = egovehicle_SE3_camera.rotation[:, 2]
dx, dy, dz = new_z_axis
egovehicle_yaw_cam_rad = np.arctan2(dy, dx)
return float(egovehicle_yaw_cam_rad)
@cached_property
def fov_theta_rad(self) -> float:
"""Compute the field of view of a camera frustum to use for view frustum culling during rendering.
Returns:
Angular extent of camera's field of view (measured in radians).
"""
fov_theta_rad = 2 * np.arctan(0.5 * self.width_px / self.intrinsics.fx_px)
return float(fov_theta_rad)
def compute_pixel_ray_directions(self, uv: Union[NDArrayFloat, NDArrayInt]) -> NDArrayFloat:
"""Given (u,v) coordinates and intrinsics, generate pixel rays in the camera coordinate frame.
Assume +z points out of the camera, +y is downwards, and +x is across the imager.
Args:
uv: Numpy array of shape (N,2) with (u,v) coordinates
Returns:
Array of shape (N,3) with ray directions to each pixel, provided in the camera frame.
Raises:
ValueError: If input (u,v) coordinates are not (N,2) in shape.
RuntimeError: If generated ray directions are not (N,3) in shape.
"""
fx, fy = self.intrinsics.fx_px, self.intrinsics.fy_px
img_h, img_w = self.height_px, self.width_px
if not np.isclose(fx, fy, atol=1e-3):
raise ValueError(f"Focal lengths in the x and y directions must match: {fx} != {fy}")
if uv.shape[1] != 2:
raise ValueError("Input (u,v) coordinates must be (N,2) in shape.")
# Approximation for principal point
px = img_w / 2
py = img_h / 2
u = uv[:, 0]
v = uv[:, 1]
num_rays = uv.shape[0]
ray_dirs = np.zeros((num_rays, 3))
# x center offset from center
ray_dirs[:, 0] = u - px
# y center offset from center
ray_dirs[:, 1] = v - py
ray_dirs[:, 2] = fx
# elementwise multiplication of scalars requires last dim to match
ray_dirs = ray_dirs / np.linalg.norm(ray_dirs, axis=1, keepdims=True) # type: ignore
if ray_dirs.shape[1] != 3:
raise RuntimeError("Ray directions must be (N,3)")
return ray_dirs
def remove_nan_values(uv: NDArrayFloat, points_cam: NDArrayFloat) -> Tuple[NDArrayFloat, NDArrayFloat]:
"""Remove NaN values from camera coordinates and image plane coordinates (accepts corrupt array).
Args:
uv: image plane coordinates, as Numpy array of shape (N,2).
points_cam: Numpy array of shape (N,3) representing coordinates of points within the camera frame.
Returns:
uv_valid: subset of image plane coordinates, which contain no NaN coordinates.
is_valid_points_cam: subset of 3d points within the camera frame, which contain no NaN coordinates.
"""
is_u_valid = np.logical_not(np.isnan(uv[:, 0]))
is_v_valid = np.logical_not(np.isnan(uv[:, 1]))
is_uv_valid = np.logical_and(is_u_valid, is_v_valid)
uv_valid = uv[is_uv_valid]
is_valid_points_cam = points_cam[is_uv_valid]
return uv_valid, is_valid_points_cam
| 40.679907
| 119
| 0.652921
|
e85d2a6e2ccc40de636aa0a1cf15e00f33ce1066
| 1,733
|
py
|
Python
|
aiokraken/rest/schemas/tests/test_kordertype.py
|
asmodehn/aiokraken
|
b260bd41d5aa091e6a4f1818328426fbe6f625c0
|
[
"MIT"
] | null | null | null |
aiokraken/rest/schemas/tests/test_kordertype.py
|
asmodehn/aiokraken
|
b260bd41d5aa091e6a4f1818328426fbe6f625c0
|
[
"MIT"
] | 82
|
2019-08-30T09:37:49.000Z
|
2022-03-29T14:53:22.000Z
|
aiokraken/rest/schemas/tests/test_kordertype.py
|
asmodehn/aiokraken
|
b260bd41d5aa091e6a4f1818328426fbe6f625c0
|
[
"MIT"
] | null | null | null |
import time
import unittest
from decimal import Decimal
from parameterized import parameterized
import json
import marshmallow
import decimal
from aiokraken.rest.schemas.kordertype import KOrderTypeModel, KOrderTypeField, KOrderTypeStrategy, KOrderTypeStringStrategy
from aiokraken.rest.exceptions import AIOKrakenException
from hypothesis import given
"""
Test module.
This is intended for extensive testing, using parameterized, hypothesis or similar generation methods
For simple usecase examples, we should rely on doctests.
"""
class TestOrderTypeModel(unittest.TestCase):
def test_unknown(self):
with self.assertRaises(ValueError):
KOrderTypeModel('unknown')
@given(KOrderTypeStrategy())
def test_enum(self, model):
assert model.value in [
'market',
'stop market',
'touched market',
'limit',
'stop-loss',
'take-profit',
'stop-loss-profit',
'stop-loss-profit-limit',
'stop-loss-limit',
'take-profit-limit',
'trailing-stop',
'trailing-stop-limit',
'stop-loss-and-limit',
'settle-position',
], model.value
class TestOrderTypeField(unittest.TestCase):
def setUp(self) -> None:
self.field = KOrderTypeField()
@given(KOrderTypeStringStrategy())
def test_deserialize(self, ordertypestr):
p = self.field.deserialize(ordertypestr)
assert isinstance(p, KOrderTypeModel)
@given(KOrderTypeStrategy())
def test_serialize(self, ordertypemodel):
ot = self.field.serialize('ordertype', {'ordertype': ordertypemodel})
assert ot == ordertypemodel.value, ot
| 28.409836
| 124
| 0.667051
|
5ccdad081cac3f7b96a10d3fda83f58044df4714
| 2,848
|
py
|
Python
|
librectify/interface.py
|
RomanJuranek/photo-rectify
|
aa6182376c5f4fcdc0976926e5ecf57e97edd20b
|
[
"BSD-3-Clause"
] | null | null | null |
librectify/interface.py
|
RomanJuranek/photo-rectify
|
aa6182376c5f4fcdc0976926e5ecf57e97edd20b
|
[
"BSD-3-Clause"
] | null | null | null |
librectify/interface.py
|
RomanJuranek/photo-rectify
|
aa6182376c5f4fcdc0976926e5ecf57e97edd20b
|
[
"BSD-3-Clause"
] | null | null | null |
import ctypes
from logging import error
import platform
import numpy as np
from pathlib import Path
from skimage.transform import rescale
from skimage.filters import gaussian
dll_name = {
"Windows": "librectify.dll",
"Linux": "librectify.so",
}
dll_path = Path(__file__).parent / dll_name[platform.system()]
dll = ctypes.cdll.LoadLibrary(dll_path.as_posix())
find_lines = dll.find_line_segment_groups
class LineSegment(ctypes.Structure):
_fields_ = [
("x1", ctypes.c_float),
("y1", ctypes.c_float),
("x2", ctypes.c_float),
("y2", ctypes.c_float),
("weight", ctypes.c_float),
("err", ctypes.c_float),
("group_id", ctypes.c_int)]
c_int32_p = ctypes.POINTER(ctypes.c_int32)
c_float_p = ctypes.POINTER(ctypes.c_float)
linesegments_p = ctypes.POINTER(LineSegment)
find_lines.restype = linesegments_p
find_lines.argtypes = [
c_float_p,
ctypes.c_int, ctypes.c_int, ctypes.c_int,
ctypes.c_float, ctypes.c_bool, # min_length, refine
ctypes.c_int, # num threads
c_int32_p
]
dll.release_line_segments.argtypes = [
ctypes.POINTER(linesegments_p)
]
def detect_line_segments(image:np.ndarray, max_size=1200, smooth=0):
if not isinstance(image, np.ndarray):
raise ValueError("Image must be a numpy array")
ndim = image.ndim
if ndim not in [2,3]:
raise ValueError("Image must be 2 or 3 dimensioanl")
if ndim == 3:
image = np.mean(image, axis=-1)
#print(image.shape, image.max(), image.dtype)
scale = max_size / float(max(image.shape))
#print(scale)
if scale < 1:
image = rescale(image, scale, order=1, anti_aliasing=False, preserve_range=True)
#print(image.shape, image.max(), image.dtype)
if smooth > 0:
image = gaussian(image, smooth)
#print(image.shape, image.max(), image.dtype)
image = np.ascontiguousarray(image, np.float32)
#print(image.shape, image.max(), image.dtype)
buffer = image.ctypes.data_as(c_float_p)
h,w = image.shape
stride = image.strides[0] // 4
# print(h,w,stride)
# run lib code
n_lines = np.empty((1,),"i")
lines_p = find_lines(buffer, w, h, stride, 10.0, False, 1, n_lines.ctypes.data_as(c_int32_p))
# get results as list of tuples
n = n_lines[0]
#print(n_lines)
lines = np.empty((n,4), np.float32)
weights = np.empty(n, np.float32)
errors = np.empty(n, np.float32)
groups = np.empty(n, np.int32)
for i in range(n):
l = lines_p[i]
lines[i,:] = (l.x1, l.y1, l.x2, l.y2)
weights[i] = l.weight
errors[i] = l.err
groups[i] = l.group_id
if scale < 1:
lines /= scale
#print(lines)
dll.release_line_segments(ctypes.pointer(lines_p))
return lines, dict(weight=weights, error=errors, group=groups)
| 25.890909
| 97
| 0.648174
|
3b89962f8dd651699edf807e4b88ea79a8d85804
| 3,055
|
py
|
Python
|
tests/common.py
|
gillistephan/aas-core-codegen
|
5b89ea2ee35aecaca9a1bed7ac81d420cc560f29
|
[
"MIT"
] | null | null | null |
tests/common.py
|
gillistephan/aas-core-codegen
|
5b89ea2ee35aecaca9a1bed7ac81d420cc560f29
|
[
"MIT"
] | null | null | null |
tests/common.py
|
gillistephan/aas-core-codegen
|
5b89ea2ee35aecaca9a1bed7ac81d420cc560f29
|
[
"MIT"
] | null | null | null |
"""Provide common functionality across different tests."""
from typing import List, Tuple, Optional, Union, Sequence
import asttokens
from icontract import ensure
from aas_core_codegen import parse, intermediate
from aas_core_codegen.common import Error
# pylint: disable=missing-function-docstring
def most_underlying_messages(error_or_errors: Union[Error, Sequence[Error]]) -> str:
"""Find the "leaf" errors and render them as a new-line separated list."""
if isinstance(error_or_errors, Error):
errors = [error_or_errors] # type: Sequence[Error]
else:
errors = error_or_errors
most_underlying_errors = [] # type: List[Error]
for error in errors:
if error.underlying is None or len(error.underlying) == 0:
most_underlying_errors.append(error)
continue
stack = error.underlying # type: List[Error]
while len(stack) > 0:
top_error = stack.pop()
if top_error.underlying is not None:
stack.extend(top_error.underlying)
if top_error.underlying is None or len(top_error.underlying) == 0:
most_underlying_errors.append(top_error)
return "\n".join(
most_underlying_error.message
for most_underlying_error in most_underlying_errors
)
@ensure(lambda result: (result[0] is not None) ^ (result[1] is not None))
def parse_atok(
atok: asttokens.ASTTokens,
) -> Tuple[Optional[parse.SymbolTable], Optional[Error]]:
"""Parse the ``atok``, an abstract syntax tree of a meta-model."""
import_errors = parse.check_expected_imports(atok=atok)
if len(import_errors) > 0:
import_errors_str = "\n".join(
f"* {import_error}" for import_error in import_errors
)
raise AssertionError(
f"Unexpected imports in the source code:\n{import_errors_str}"
)
symbol_table, error = parse.atok_to_symbol_table(atok=atok)
return symbol_table, error
@ensure(lambda result: (result[0] is not None) ^ (result[1] is not None))
def parse_source(source: str) -> Tuple[Optional[parse.SymbolTable], Optional[Error]]:
"""Parse the given source text into a symbol table."""
atok, parse_exception = parse.source_to_atok(source=source)
if parse_exception:
raise parse_exception # pylint: disable=raising-bad-type
assert atok is not None
return parse_atok(atok=atok)
@ensure(lambda result: (result[0] is not None) ^ (result[1] is not None))
def translate_source_to_intermediate(
source: str,
) -> Tuple[Optional[intermediate.SymbolTable], Optional[Error]]:
atok, parse_exception = parse.source_to_atok(source=source)
if parse_exception:
raise parse_exception # pylint: disable=raising-bad-type
assert atok is not None
parsed_symbol_table, error = parse_atok(atok=atok)
assert error is None, f"{most_underlying_messages(error)}"
assert parsed_symbol_table is not None
return intermediate.translate(parsed_symbol_table=parsed_symbol_table, atok=atok)
| 33.571429
| 85
| 0.697545
|
eaf00114a342431447f34e95137e20f3c4c76c2b
| 304
|
py
|
Python
|
book_2/sql/04_sql.py
|
D-Mbithi/Real-Python-Course-Solutions
|
6e743af5f9f40260df8d42b667b3535caed9db3b
|
[
"MIT"
] | 1
|
2019-10-24T17:56:23.000Z
|
2019-10-24T17:56:23.000Z
|
book_2/sql/04_sql.py
|
D-Mbithi/Real-Python-Course-Solutions
|
6e743af5f9f40260df8d42b667b3535caed9db3b
|
[
"MIT"
] | null | null | null |
book_2/sql/04_sql.py
|
D-Mbithi/Real-Python-Course-Solutions
|
6e743af5f9f40260df8d42b667b3535caed9db3b
|
[
"MIT"
] | null | null | null |
import sqlite3
with sqlite3.connect('new.db') as connection:
cursor = connection.cursor()
cursor.execute(
"INSERT INTO population VALUES('Boston', 'MA', 6000000)"
)
cursor.execute(
"INSERT INTO population VALUES('Chicago', 'IL', 2700000)"
)
| 23.384615
| 69
| 0.588816
|
89cc0ff098968a2a7973b60ccb0404db64025faa
| 609
|
py
|
Python
|
submissions/arc052/b.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 1
|
2021-05-10T01:16:28.000Z
|
2021-05-10T01:16:28.000Z
|
submissions/arc052/b.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 3
|
2021-05-11T06:14:15.000Z
|
2021-06-19T08:18:36.000Z
|
submissions/arc052/b.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | null | null | null |
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
from math import pi
n, q = map(int, readline().split())
xrh = [tuple(map(int, readline().split())) for _ in range(n)]
for _ in range(q):
a, b = map(int, readline().split())
ans = 0
for x, r, h in xrh:
if a >= x + h or b <= x:
continue
ans += pow(r, 2) * h if a <= x else pow(r * (x + h - a) / h, 2) * (x + h - a)
if b < x + h:
ans -= pow(r * (x + h - b) / h, 2) * (x + h - b)
print(ans * pi / 3)
| 29
| 85
| 0.520525
|
e753e953a1a0688f133c9c7aa0137b40fb7449fa
| 6,926
|
py
|
Python
|
tests/unit/utils/verify_test.py
|
epoelke/salt
|
80ae64e54f9f336d3cdb6e03e42f2a50469ec8f2
|
[
"Apache-2.0"
] | 1
|
2016-12-20T20:11:21.000Z
|
2016-12-20T20:11:21.000Z
|
tests/unit/utils/verify_test.py
|
epoelke/salt
|
80ae64e54f9f336d3cdb6e03e42f2a50469ec8f2
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/utils/verify_test.py
|
epoelke/salt
|
80ae64e54f9f336d3cdb6e03e42f2a50469ec8f2
|
[
"Apache-2.0"
] | null | null | null |
'''
Test the verification routines
'''
# Import Python libs
import getpass
import os
import sys
import stat
import shutil
import resource
import tempfile
import socket
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath, TestsLoggingHandler
ensure_in_syspath('../../')
# Import salt libs
import salt.utils
import integration
from salt.utils.verify import (
check_user,
verify_env,
verify_socket,
zmq_version,
check_max_open_files
)
class TestVerify(TestCase):
'''
Verify module tests
'''
def test_zmq_verify(self):
self.assertTrue(zmq_version())
def test_zmq_verify_insufficient(self):
import zmq
zmq.__version__ = '2.1.0'
self.assertFalse(zmq_version())
def test_user(self):
self.assertTrue(check_user(getpass.getuser()))
def test_no_user(self):
# Catch sys.stderr here since no logging is configured and
# check_user WILL write to sys.stderr
class FakeWriter(object):
def __init__(self):
self.output = ""
def write(self, data):
self.output += data
stderr = sys.stderr
writer = FakeWriter()
sys.stderr = writer
# Now run the test
self.assertFalse(check_user('nouser'))
# Restore sys.stderr
sys.stderr = stderr
if writer.output != 'CRITICAL: User not found: "nouser"\n':
# If there's a different error catch, write it to sys.stderr
sys.stderr.write(writer.output)
@skipIf(sys.platform.startswith('win'), 'No verify_env Windows')
def test_verify_env(self):
root_dir = tempfile.mkdtemp(dir=integration.SYS_TMP_DIR)
var_dir = os.path.join(root_dir, 'var', 'log', 'salt')
verify_env([var_dir], getpass.getuser())
self.assertTrue(os.path.exists(var_dir))
dir_stat = os.stat(var_dir)
self.assertEqual(dir_stat.st_uid, os.getuid())
self.assertEqual(dir_stat.st_gid, os.getgid())
self.assertEqual(dir_stat.st_mode & stat.S_IRWXU, stat.S_IRWXU)
self.assertEqual(dir_stat.st_mode & stat.S_IRWXG, 40)
self.assertEqual(dir_stat.st_mode & stat.S_IRWXO, 5)
@integration.requires_network(only_local_network=True)
def test_verify_socket(self):
self.assertTrue(verify_socket('', 18000, 18001))
if socket.has_ipv6:
# Only run if Python is built with IPv6 support; otherwise
# this will just fail.
self.assertTrue(verify_socket('::', 18000, 18001))
@skipIf(os.environ.get('TRAVIS_PYTHON_VERSION', None) is not None,
'Travis environment does not like too many open files')
def test_max_open_files(self):
with TestsLoggingHandler() as handler:
logmsg_dbg = (
'DEBUG:This salt-master instance has accepted {0} minion keys.'
)
logmsg_chk = (
'{0}:The number of accepted minion keys({1}) should be lower '
'than 1/4 of the max open files soft setting({2}). According '
'to the system\'s hard limit, there\'s still a margin of {3} '
'to raise the salt\'s max_open_files setting. Please consider '
'raising this value.'
)
logmsg_crash = (
'{0}:The number of accepted minion keys({1}) should be lower '
'than 1/4 of the max open files soft setting({2}). '
'salt-master will crash pretty soon! According to the '
'system\'s hard limit, there\'s still a margin of {3} to '
'raise the salt\'s max_open_files setting. Please consider '
'raising this value.'
)
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
tempdir = tempfile.mkdtemp(prefix='fake-keys')
keys_dir = os.path.join(tempdir, 'minions')
os.makedirs(keys_dir)
mof_test = 256
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_test, mof_h))
try:
prev = 0
for newmax, level in ((24, None), (66, 'INFO'),
(127, 'WARNING'), (196, 'CRITICAL')):
for n in range(prev, newmax):
kpath = os.path.join(keys_dir, str(n))
with salt.utils.fopen(kpath, 'w') as fp_:
fp_.write(str(n))
opts = {
'max_open_files': newmax,
'pki_dir': tempdir
}
check_max_open_files(opts)
if level is None:
# No log message is triggered, only the DEBUG one which
# tells us how many minion keys were accepted.
self.assertEqual(
[logmsg_dbg.format(newmax)], handler.messages
)
else:
self.assertIn(
logmsg_dbg.format(newmax), handler.messages
)
self.assertIn(
logmsg_chk.format(
level,
newmax,
mof_test,
mof_h - newmax,
),
handler.messages
)
handler.clear()
prev = newmax
newmax = mof_test
for n in range(prev, newmax):
kpath = os.path.join(keys_dir, str(n))
with salt.utils.fopen(kpath, 'w') as fp_:
fp_.write(str(n))
opts = {
'max_open_files': newmax,
'pki_dir': tempdir
}
check_max_open_files(opts)
self.assertIn(logmsg_dbg.format(newmax), handler.messages)
self.assertIn(
logmsg_crash.format(
'CRITICAL',
newmax,
mof_test,
mof_h - newmax,
),
handler.messages
)
handler.clear()
except IOError as err:
if err.errno == 24:
# Too many open files
self.skipTest('We\'ve hit the max open files setting')
raise
finally:
shutil.rmtree(tempdir)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_s, mof_h))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestVerify, needs_daemon=False)
| 35.15736
| 79
| 0.523101
|
a4eac77590a1f18384e4186f6b0a7a0863f86f1c
| 1,256
|
py
|
Python
|
config.py
|
jquku/Matrix-Chatbot
|
762b0ee3af0779284b144f5642f0c0877ed5ffcb
|
[
"MIT"
] | 1
|
2021-10-02T08:21:34.000Z
|
2021-10-02T08:21:34.000Z
|
config.py
|
jquku/Matrix-Chatbot
|
762b0ee3af0779284b144f5642f0c0877ed5ffcb
|
[
"MIT"
] | null | null | null |
config.py
|
jquku/Matrix-Chatbot
|
762b0ee3af0779284b144f5642f0c0877ed5ffcb
|
[
"MIT"
] | null | null | null |
import os
import yaml
import sys
from typing import List, Any
class Config(object):
def __init__(self, filepath):
# Load in the config file at the given filepath
with open(filepath) as file_stream:
self.config = yaml.safe_load(file_stream.read())
#account setup
self.user_id = self.get_config(["matrix", "user_id"], required=True)
self.user_password = self.get_config(["matrix", "user_password"], required=True)
self.homeserver_url = self.get_config(["matrix", "homeserver_url"], required=True)
#database setup
self.name = self.get_config(["database", "name"], required=True)
self.user = self.get_config(["database", "user"], required=True)
self.password = self.get_config(["database", "password"], required=True)
self.host = self.get_config(["database", "host"], required=True)
self.port = self.get_config(["database", "port"], required=True)
def get_config(
self,
path: List[str],
default: Any = None,
required: bool = True,
) -> Any:
#get fitting option
config = self.config
for name in path:
config = config.get(name)
return config
| 32.205128
| 90
| 0.613057
|
ffbc1307523101a9aa0fbefab75ff6e8f5b66b78
| 285
|
py
|
Python
|
run.py
|
MythosRaconteur/app-gen-flask-server
|
c211f97633d5756d19484a96b8249e644d6dbfa7
|
[
"MIT"
] | 25
|
2021-07-21T23:25:53.000Z
|
2022-03-30T10:47:31.000Z
|
run.py
|
MythosRaconteur/app-gen-flask-server
|
c211f97633d5756d19484a96b8249e644d6dbfa7
|
[
"MIT"
] | 4
|
2021-07-20T15:17:24.000Z
|
2021-07-20T15:17:51.000Z
|
run.py
|
MythosRaconteur/app-gen-flask-server
|
c211f97633d5756d19484a96b8249e644d6dbfa7
|
[
"MIT"
] | 10
|
2021-07-28T07:03:54.000Z
|
2022-03-13T18:10:25.000Z
|
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from api import app, db
@app.shell_context_processor
def make_shell_context():
return {"app": app,
"db": db
}
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0")
| 15
| 39
| 0.575439
|
b3282dededd0993d05f7bc08c81151435517605f
| 1,297
|
py
|
Python
|
tests/test_utils.py
|
hyper-neutrino/Vyxal
|
004e953ef6b4397e7062e71c4f6e095b4077ac27
|
[
"MIT"
] | 3
|
2021-04-21T08:09:51.000Z
|
2021-11-11T07:15:43.000Z
|
tests/test_utils.py
|
AMiller42/Vyxal
|
004e953ef6b4397e7062e71c4f6e095b4077ac27
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
AMiller42/Vyxal
|
004e953ef6b4397e7062e71c4f6e095b4077ac27
|
[
"MIT"
] | 1
|
2021-04-21T20:37:56.000Z
|
2021-04-21T20:37:56.000Z
|
import os
import sys
import builtins
from multiprocessing import Manager
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) + "/.."
sys.path.insert(1, THIS_FOLDER)
from vyxal import interpreter as interp
header = "stack = []\nregister = 0\nprinted = False\n"
manager = Manager()
def run_code(code, flags="", input_list=[], output_variable=manager.dict()):
reset_globals()
# context_level = 0
interp.execute(code, flags, "\n".join(map(str, input_list)), output_variable)
return interp.stack
def reset_globals():
interp.keg_mode = False
interp.raw_strings = False
interp.online_version = False
interp.input_level = 0
interp.number_iterable = list
interp.MAP_START = 1
interp.MAP_OFFSET = 1
interp._join = False
interp._vertical_join = False
interp.use_encoding = False
interp.stack = []
def reshape(arr, shape):
if len(shape) == 1:
return arr
rest = shape[1:]
size = len(arr) // shape[0]
return [reshape(arr[i * size : (i + 1) * size], rest) for i in range(shape[0])]
def to_list(vector):
typ = interp.vy_type(vector)
if typ in (list, interp.Generator):
return list(
map(to_list, vector._dereference() if typ is interp.Generator else vector)
)
return vector
| 25.431373
| 86
| 0.666924
|
1bf5aa7d469775def21bac505ca404fb805560dc
| 5,071
|
py
|
Python
|
platform/gsutil/third_party/apitools/apitools/base/py/credentials_lib_test.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | 2
|
2020-10-12T05:21:32.000Z
|
2021-07-07T09:23:52.000Z
|
platform/gsutil/third_party/apitools/apitools/base/py/credentials_lib_test.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | 1
|
2016-11-17T20:35:03.000Z
|
2016-11-17T20:35:03.000Z
|
platform/gsutil/third_party/apitools/apitools/base/py/credentials_lib_test.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | 2
|
2020-02-20T13:02:46.000Z
|
2021-07-07T09:23:53.000Z
|
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
import unittest2
from apitools.base.py import credentials_lib
from apitools.base.py import util
class CredentialsLibTest(unittest2.TestCase):
def _GetServiceCreds(self, service_account_name=None, scopes=None):
kwargs = {}
if service_account_name is not None:
kwargs['service_account_name'] = service_account_name
service_account_name = service_account_name or 'default'
def MockMetadataCalls(request_url):
default_scopes = scopes or ['scope1']
if request_url.endswith('scopes'):
return six.StringIO(''.join(default_scopes))
elif request_url.endswith('service-accounts'):
return six.StringIO(service_account_name)
elif request_url.endswith(
'/service-accounts/%s/token' % service_account_name):
return six.StringIO('{"access_token": "token"}')
self.fail('Unexpected HTTP request to %s' % request_url)
with mock.patch.object(credentials_lib, '_GceMetadataRequest',
side_effect=MockMetadataCalls,
autospec=True) as opener_mock:
with mock.patch.object(util, 'DetectGce',
autospec=True) as mock_detect:
mock_detect.return_value = True
credentials = credentials_lib.GceAssertionCredentials(
scopes, **kwargs)
self.assertIsNone(credentials._refresh(None))
self.assertEqual(3, opener_mock.call_count)
return credentials
def testGceServiceAccounts(self):
scopes = ['scope1']
self._GetServiceCreds()
self._GetServiceCreds(scopes=scopes)
self._GetServiceCreds(service_account_name='my_service_account',
scopes=scopes)
def testGetServiceAccount(self):
# We'd also like to test the metadata calls, which requires
# having some knowledge about how HTTP calls are made (so that
# we can mock them). It's unfortunate, but there's no way
# around it.
creds = self._GetServiceCreds()
opener = mock.MagicMock()
opener.open = mock.MagicMock()
opener.open.return_value = six.StringIO('default/\nanother')
with mock.patch.object(six.moves.urllib.request, 'build_opener',
return_value=opener,
autospec=True) as build_opener:
creds.GetServiceAccount('default')
self.assertEqual(1, build_opener.call_count)
self.assertEqual(1, opener.open.call_count)
req = opener.open.call_args[0][0]
self.assertTrue(req.get_full_url().startswith(
'http://metadata.google.internal/'))
# The urllib module does weird things with header case.
self.assertEqual('Google', req.get_header('Metadata-flavor'))
def testGetAdcNone(self):
# Tests that we correctly return None when ADC aren't present in
# the well-known file.
creds = credentials_lib._GetApplicationDefaultCredentials(
client_info={'scope': ''})
self.assertIsNone(creds)
class TestGetRunFlowFlags(unittest2.TestCase):
def setUp(self):
self._flags_actual = credentials_lib.FLAGS
def tearDown(self):
credentials_lib.FLAGS = self._flags_actual
def test_with_gflags(self):
HOST = 'myhostname'
PORT = '144169'
class MockFlags(object):
auth_host_name = HOST
auth_host_port = PORT
auth_local_webserver = False
credentials_lib.FLAGS = MockFlags
flags = credentials_lib._GetRunFlowFlags([
'--auth_host_name=%s' % HOST,
'--auth_host_port=%s' % PORT,
'--noauth_local_webserver',
])
self.assertEqual(flags.auth_host_name, HOST)
self.assertEqual(flags.auth_host_port, PORT)
self.assertEqual(flags.logging_level, 'ERROR')
self.assertEqual(flags.noauth_local_webserver, True)
def test_without_gflags(self):
credentials_lib.FLAGS = None
flags = credentials_lib._GetRunFlowFlags([])
self.assertEqual(flags.auth_host_name, 'localhost')
self.assertEqual(flags.auth_host_port, [8080, 8090])
self.assertEqual(flags.logging_level, 'ERROR')
self.assertEqual(flags.noauth_local_webserver, False)
| 40.246032
| 74
| 0.644449
|
4bf5099fc8a0e9774b855067070451416d3f6de4
| 3,182
|
py
|
Python
|
tensorflow/lite/python/optimize/calibrator.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/lite/python/optimize/calibrator.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/lite/python/optimize/calibrator.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for post training quantization with calibration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util.lazy_loader import LazyLoader
# Lazy load since some of the performance benchmark skylark rules
# break dependencies. Must use double quotes to match code internal rewrite
# rule.
_calibration_wrapper = LazyLoader(
"_calibration_wrapper", globals(),
"tensorflow.lite.python.optimize."
"tensorflow_lite_wrap_calibration_wrapper")
class Calibrator(object):
"""Calibrates a floating point model and then quantizes it.
This is an internal class, not a public interface.
"""
def __init__(self, model_content):
"""Constructor.
Args:
model_content: Content of a TF-Lite Flatbuffer file.
Raises:
ValueError: If the calibrator was unable to open the model.
"""
if not model_content:
raise ValueError("`model_content` must be specified.")
try:
self._calibrator = (_calibration_wrapper.CalibrationWrapper
.CreateWrapperCPPFromBuffer(model_content))
except Exception as e:
raise ValueError("Failed to parse the model: %s." % e)
if not self._calibrator:
raise ValueError("Failed to parse the model.")
def calibrate_and_quantize(self, dataset_gen, input_type, output_type,
allow_float):
"""Calibrates the model with specified generator and then quantizes it.
Returns:
A quantized model.
Args:
dataset_gen: A generator that generates calibration samples.
input_type: A tf.dtype representing the desired real-value input type.
output_type: A tf.dtype representing the desired real-value output type.
allow_float: A boolean. False if the resulting model cannot perform float
computation, useful when targeting an integer-only backend.
If False, an error will be thrown if an operation cannot be
quantized, otherwise the model will fallback to float ops.
"""
self._calibrator.Prepare()
for calibration_sample in dataset_gen():
self._calibrator.FeedTensor(calibration_sample)
return self._calibrator.QuantizeModel(
np.dtype(input_type.as_numpy_dtype()).num,
np.dtype(output_type.as_numpy_dtype()).num, allow_float)
| 40.278481
| 81
| 0.688246
|
8e80bd8d974d47c258398a4884f8d54839c8890f
| 1,208
|
py
|
Python
|
Translator/views.py
|
hbFree/Irman-Translator
|
1ffb850c1a6614f5601e0d4c91f0d9702d096a30
|
[
"MIT"
] | 6
|
2020-02-15T09:42:21.000Z
|
2020-03-03T08:52:57.000Z
|
Translator/views.py
|
hbFree/Irman-Translator
|
1ffb850c1a6614f5601e0d4c91f0d9702d096a30
|
[
"MIT"
] | null | null | null |
Translator/views.py
|
hbFree/Irman-Translator
|
1ffb850c1a6614f5601e0d4c91f0d9702d096a30
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.template.defaulttags import register
from xml.dom import minidom
import os.path
PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))
class LanguagesXml:
root = minidom.parse(PROJECT_PATH + '/static/lng.xml')
def get(self, lng, attr):
cur = self.root.getElementsByTagName(lng)
attfound = True
if cur:
att = cur[0].getElementsByTagName(attr)
if att:
return att[0].firstChild.data
else:
attfound = False
else:
att = self.root.getElementsByTagName('en')[0].getElementsByTagName(attr)
if att:
return att[0].firstChild.data
else:
attfound = False
if not attfound:
return 'error: 0x01 ntf!'
LNG = LanguagesXml()
@register.simple_tag
def get_item(obj, lng, attr):
return obj.get(lng, attr)
def index(req):
context = {
'GET_LIST': req.GET,
'LNG': LNG,
}
lg = 'en'
if req.GET:
if 'lng' in req.GET:
lg = req.GET['lng']
context['cur_lng'] = lg
return render(req, 'Translator/index.html', context)
| 24.16
| 84
| 0.57947
|
96d18cf3576b9f19c31da1e37a22353beee0814a
| 4,998
|
py
|
Python
|
tartiflette/types/helpers/definition.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 530
|
2019-06-04T11:45:36.000Z
|
2022-03-31T09:29:56.000Z
|
tartiflette/types/helpers/definition.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 242
|
2019-06-04T11:53:08.000Z
|
2022-03-28T07:06:27.000Z
|
tartiflette/types/helpers/definition.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 36
|
2019-06-21T06:40:27.000Z
|
2021-11-04T13:11:16.000Z
|
from tartiflette.types.enum import GraphQLEnumType
from tartiflette.types.input_object import GraphQLInputObjectType
from tartiflette.types.interface import GraphQLInterfaceType
from tartiflette.types.list import GraphQLList
from tartiflette.types.non_null import GraphQLNonNull
from tartiflette.types.object import GraphQLObjectType
from tartiflette.types.scalar import GraphQLScalarType
from tartiflette.types.union import GraphQLUnionType
__all__ = (
"get_wrapped_type",
"is_scalar_type",
"is_enum_type",
"is_input_object_type",
"is_list_type",
"is_non_null_type",
"is_wrapping_type",
"is_input_type",
"is_abstract_type",
"is_leaf_type",
"is_object_type",
)
def get_wrapped_type(graphql_type: "GraphQLType") -> "GraphQLType":
"""
Unwraps the GraphQL type and to return the inner type.
:param graphql_type: schema type to unwrap
:type graphql_type: GraphQLType
:return: the unwrapped inner schema type
:rtype: GraphQLType
"""
inner_type = graphql_type
while inner_type.is_wrapping_type:
inner_type = inner_type.wrapped_type
return inner_type
def is_scalar_type(graphql_type: "GraphQLType") -> bool:
"""
Determines whether or not the "GraphQLType" is a scalar type.
:param graphql_type: schema type to test
:type graphql_type: GraphQLType
:return: whether or not the "GraphQLType" is a scalar type.
:rtype: bool
"""
return isinstance(graphql_type, GraphQLScalarType)
def is_enum_type(graphql_type: "GraphQLType") -> bool:
"""
Determines whether or not the "GraphQLType" is an enum type.
:param graphql_type: schema type to test
:type graphql_type: GraphQLType
:return: whether or not the "GraphQLType" is an enum type.
:rtype: bool
"""
return isinstance(graphql_type, GraphQLEnumType)
def is_input_object_type(graphql_type: "GraphQLType") -> bool:
"""
Determines whether or not the "GraphQLType" is an input object type.
:param graphql_type: schema type to test
:type graphql_type: GraphQLType
:return: whether or not the "GraphQLType" is an input object type.
:rtype: bool
"""
return isinstance(graphql_type, GraphQLInputObjectType)
def is_list_type(graphql_type: "GraphQLType") -> bool:
"""
Determines whether or not the "GraphQLType" is a list type.
:param graphql_type: schema type to test
:type graphql_type: GraphQLType
:return: whether or not the "GraphQLType" is a list type.
:rtype: bool
"""
return isinstance(graphql_type, GraphQLList)
def is_non_null_type(graphql_type: "GraphQLType") -> bool:
"""
Determines whether or not the "GraphQLType" is a non null type.
:param graphql_type: schema type to test
:type graphql_type: GraphQLType
:return: whether or not the "GraphQLType" is a non null type.
:rtype: bool
"""
return isinstance(graphql_type, GraphQLNonNull)
def is_wrapping_type(graphql_type: "GraphQLType") -> bool:
"""
Determines whether or not the "GraphQLType" is either a list or non null
type.
:param graphql_type: schema type to test
:type graphql_type: GraphQLType
:return: whether or not the "GraphQLType" is either a list or non null
type.
:rtype: bool
"""
return isinstance(graphql_type, (GraphQLList, GraphQLNonNull))
def is_input_type(graphql_type: "GraphQLType") -> bool:
"""
Determines whether or not the "GraphQLType" is an input type.
:param graphql_type: schema type to test
:type graphql_type: GraphQLType
:return: whether or not the "GraphQLType" is an input type.
:rtype: bool
"""
return isinstance(
graphql_type,
(GraphQLScalarType, GraphQLEnumType, GraphQLInputObjectType),
) or (
graphql_type.is_wrapping_type
and is_input_type(graphql_type.wrapped_type)
)
def is_abstract_type(graphql_type: "GraphQLType") -> bool:
"""
Determines whether or not the "GraphQLType" is an abstract type.
:param graphql_type: schema type to test
:type graphql_type: GraphQLType
:return: whether or not the "GraphQLType" is an abstract type.
:rtype: bool
"""
return isinstance(graphql_type, (GraphQLInterfaceType, GraphQLUnionType))
def is_leaf_type(graphql_type: "GraphQLType") -> bool:
"""
Determines whether or not the "GraphQLType" is a leaf type.
:param graphql_type: schema type to test
:type graphql_type: GraphQLType
:return: whether or not the "GraphQLType" is a leaf type.
:rtype: bool
"""
return isinstance(graphql_type, (GraphQLScalarType, GraphQLEnumType))
def is_object_type(graphql_type: "GraphQLType") -> bool:
"""
Determines whether or not the "GraphQLType" is an object type.
:param graphql_type: schema type to test
:type graphql_type: GraphQLType
:return: whether or not the "GraphQLType" is an object type.
:rtype: bool
"""
return isinstance(graphql_type, GraphQLObjectType)
| 32.245161
| 77
| 0.715686
|
dfdbc8d079e6cd64c22db72f76419deffef492e4
| 20,635
|
py
|
Python
|
matlab2cpp/node/backend.py
|
emc2norway/m2cpp
|
81943057c184c539b409282cbbd47bbf933db04f
|
[
"BSD-3-Clause"
] | 28
|
2017-04-25T10:06:38.000Z
|
2022-02-09T07:25:34.000Z
|
matlab2cpp/node/backend.py
|
emc2norway/m2cpp
|
81943057c184c539b409282cbbd47bbf933db04f
|
[
"BSD-3-Clause"
] | null | null | null |
matlab2cpp/node/backend.py
|
emc2norway/m2cpp
|
81943057c184c539b409282cbbd47bbf933db04f
|
[
"BSD-3-Clause"
] | 5
|
2017-04-25T17:54:53.000Z
|
2022-03-21T20:15:15.000Z
|
import re
import os
from os.path import sep
import reference
import matlab2cpp
import matlab2cpp.m2cpp
import matlab2cpp.pyplot
def flatten(node, ordered=False, reverse=False, inverse=False):
"""
Backend for the :py:func:`~matlab2cpp.Node.flatten` function.
Args:
node (Node): Root node to start from
ordered (bool): If True, make sure the nodes are hierarcically ordered.
reverse (bool): If True, children are itterated in reverse order.
inverse (bool): If True, tree is itterated in reverse order.
See also:
:py:func:`~matlab2cpp.Node.flatten`
"""
o = bool(ordered)
r = bool(reverse)
i = bool(inverse)
out = []
if o:
nodes = [node]
for node in nodes:
nodes.extend(node.children[::1-2*(r ^ i)])
out.extend(nodes[::1-2*i])
else:
if i:
def foo(node):
for child in node[::1-2*r]:
foo(child)
out.append(node)
else:
def foo(node):
out.append(node)
for child in node[::1-2*r]:
foo(child)
foo(node)
return out
def summary(node, opt):
"""
Backend for creating summary of the node tree.
See :py:func:`~matlab2cpp.qtree` for behavior.
Args:
node (Node): Relative root of the tree
Returns:
str: string representation of the node
See also:
:py:func:`~matlab2cpp.qtree`
"""
nodes = flatten(node, False, False, False)
if not (opt is None) and opt.disp:
print "iterating over %d nodes" % len(nodes)
if not (opt is None) and not (opt.line is None):
for node in nodes:
if node.cls != "Block" and node.line == opt.line:
nodes = flatten(node, False, False, False)
break
indent = []
outl = []
nl = len(str(nodes[-1].line))+1
nc = len(str(nodes[-1].cur+1))+1
for node in nodes:
out = ""
if node.line:
nl_ = len(str(node.line))
out += " "*(nl-nl_) + str(node.line) + " "
nc_ = len(str(node.cur+1))
out += " "*(nc-nc_) + str(node.cur+1)
else:
out += " "*(nl+nc+1)
# indentation
while indent and not (node.parent is indent[-1]):
indent.pop()
out += "| "*(len(indent))
indent.append(node)
out += node.cls.ljust(11)
out += node.backend.ljust(13)
# define type
if node.type == "TYPE":
type = node.declare.prop.get("suggest", "TYPE")
if type != "TYPE":
type = "(" + type + ")"
else:
type = node.type
out += type.ljust(8)
out += node.name
outl.append(out)
out = "\n".join(outl)
out = re.sub(r"(\\n){2,}", "", out)
return out
def auxillary(node, type, convert):
"""
Backend for the :py:func:`~matlab2cpp.Node.auxillary` function.
Args:
node (Node):
Root of the tree where split into new line will occour.
type (str, None):
If provided, auxiliary variable type will be converted
convert (bool):
If true, add an extra function call ``conv_to`` to convert datatype in
Armadillo.
See also:
:py:func:`~matlab2cpp.Node.auxiliary`
"""
assert node.parent.cls != "Assign",\
".auxiliary() must be triggered mid expression."
type = type or node.type
if not isinstance(type, str):
if isinstance(type[0], int):
type = matlab2cpp.datatype.get_name(*type)
else:
type = matlab2cpp.datatype.common_strict(type)
matrix_mode = False
if node.cls == "Matrix":
matrix_mode = True
if matrix_mode and type == "int" and node.group.cls in ("Get", "Set"):
type = "uword"
line = node
while line.parent.cls != "Block":
line = line.parent
block = line.parent
# Create new var
i = 1
declares = node.func[0]
while "_aux_" + type + "_" + str(i) in declares:
i += 1
var = "_aux_" + type + "_" + str(i)
# Create Assign
assign = matlab2cpp.collection.Assign(block, code=node.code)
assign.type = type
if matrix_mode:
assign.backend = "matrix"
# Return value
aux_var = matlab2cpp.collection.Var(assign, var)
aux_var.type = type
aux_var.backend = type
aux_var.create_declare()
if convert:
rhs = matlab2cpp.collection.Get(assign, "_conv_to")
rhs.type = type
else:
rhs = assign
swap_var = matlab2cpp.collection.Var(rhs, var)
swap_var.declare.type = type
# Place Assign correctly in Block
i = block.children.index(line)
block.children = block[:i] + block[-1:] + block[i:-1]
# Swap node and Var
index = node.parent.children.index(node)
node.parent.children[index] = swap_var
rhs.children[-1] = node
swap_var.parent, node.parent = node.parent, swap_var.parent
# generate code
node.translate()
swap_var.translate(only=True)
aux_var.translate(only=True)
if convert:
rhs.translate(only=True)
assign.translate(only=True)
if convert:
assert node.type != swap_var.type
return swap_var
def resize(node):
"""
Backend for the :py:func:`~matlab2cpp.Node.resize` function.
Args:
node (Node): node to be resized
See also:
:py:func:`~matlab2cpp.Node.resize`
"""
if node["_resize"]:
return
node["_resize"] = True
type = node.type
node.dim = 3
line = node
while line.parent.cls != "Block":
line = line.parent
resize = matlab2cpp.collection.Resize(line.parent, name=node.name)
resize.type = type
i = line.parent.children.index(line)
ps = line.parent.children
line.parent.children = ps[:i] + ps[-1:] + ps[i:-1]
resize.translate(False, only=True)
def error(node, msg, onlyw=False):
"""
Add an error or warning to the log subtree.
Args:
node (Node): node where error occoured
msg (str): error message content
onlyw (bool): if true, use warning instead of error
See also:
:py:func:`~matlab2cpp.Node.error`
:py:func:`~matlab2cpp.Node.warning`
"""
msg = msg % node.properties()
code = node.program.code
cur = node.cur
end = cur+len(node.code)
start = cur
while code[start] != "\n" and start != 0:
start -= 1
if end >= len(code):
end = len(code)-1
finish = end
while code[finish] != "\n" and finish != len(code)-1:
finish += 1
code = code[start:finish]
pos = cur-start
name = node.cls + ":" + str(cur)
errors = node.program[5]
if name in errors.names:
return
if onlyw:
err = matlab2cpp.collection.Warning(errors, name=name, line=node.line,
cur=pos, value=msg, code=code)
else:
err = matlab2cpp.collection.Error(errors, name=name, line=node.line,
cur=pos, value=msg, code=code)
err.backend="program"
def create_declare(node):
"""
Backend for the :py:func:`~matlab2cpp.Node.create_declare` function.
Args:
node (Node): Node to create declare from
Returns:
Node : the (newly) declared node
"""
if not (node is node.declare):
return node
if node.cls in reference.structvars:
if node.cls in ("Nget", "Nset"):
if node[0].cls == "String":
return None
value = node[0].value
else:
value = node.value
structs = node.program[3]
assert structs.cls == "Structs"
if node not in structs:
struct = matlab2cpp.collection.Struct(structs, name=node.name)
else:
struct = structs[node]
if value in struct.names:
return struct[struct.names.index(value)]
declares = node.func[0]
if node.cls in ("Sset", "Sget"):
sname = "_size"
if sname not in struct.names:
matlab2cpp.collection.Counter(struct, sname, value="100")
if node.name not in declares.names:
var = matlab2cpp.collection.Var(declares, name=node.name, value=value)
var.type="structs"
else:
if node.name not in declares.names:
var = matlab2cpp.collection.Var(declares, name=node.name, value=value)
var.type="struct"
return matlab2cpp.collection.Var(struct, name=value)
parent = struct
else:
parent = node.func[0]
if node in parent:
declare = parent[node]
declare.type = node.type
declare.pointer = node.pointer
return declare
out = matlab2cpp.collection.Var(parent, name=node.name,
pointer=node.pointer, value=node.value)
out.type = node.type
return out
def suggest_datatype(node):
"""
Backend for the :py:func:`~matlab2cpp.Node.suggest_datatype` function.
Args:
node (Node): Node to suggest datatype for.
Returns:
(tuple): Suggestion on the form ``(dim, mem)``
See also:
:py:func:`~matlab2cpp.Node.suggest_datatype`
"""
if node.group.cls in ("Transpose", "Ctranspose"):
dim, mem = suggest_datatype(node.group)
if dim == 1:
dim = 2
elif dim == 2:
dim = 2
return dim, mem
elif node.group.cls == "Assign":
if node.group[0].num:
return node.group[0].dim, node.group[0].mem
elif node.group.cls == "Matrix":
mems = set([])
if node.group.value: # decomposed
ax0, ax1 = len(node.group), len(node.group[0])
if ax0 > 1:
if ax1 > 1:
dim = 3
else:
dim = 1
else:
if ax1 > 1:
dim = 2
else:
dim = 0
for vec in node.group:
for elem in vec:
if elem.num:
mems.add(elem.mem)
# rowvec definition
elif len(node.group) == 1:
if len(node.group[0]) == 1:
return None, None
for elem in node.group[0]:
if elem.num:
mems.add(elem.mem)
dim = 3
# colvec definition
elif len(node.group[0]) == 1:
for vec in node.group:
if vec[0].num:
mems.add(vec[0].mem)
dim = 3
else:
for vec in node.group:
for elem in vec:
if elem.num:
mems.add(elem.mem)
dim = 3
if len(mems) == 1:
return dim, mems.pop()
elif len(mems) > 1:
return dim, max(*mems)
else:
return None, None
return None, None
# small hack to ensure that log isn't cleaned mid translation
mid_translation = [0]
def translate(node, opt=None):
"""
Backend for performing translation of subtree
Args:
node (Node): Root of the translation
opt (argparse.Namespace, optional): optional arguments from frontend
See also:
:py:func:`~matlab2cpp.Node.translate`
"""
# translate for every program
if node.cls == "Project":
map(translate, node)
return node
if mid_translation[0] == 0:
log = node.program[5]
log.children = []
mid_translation[0] += 1
nodes = flatten(node, False, True, False)
if not (opt is None) and opt.disp:
print "iterating %d nodes" % len(nodes)
for node in nodes[::-1]:
translate_one(node, opt)
mid_translation[0] -= 1
if not mid_translation[0]:
logs = flatten(log, False, True, False)
for node in logs[::-1]:
translate_one(node, opt)
return node
def translate_one(node, opt):
"""
Backend for performing translation of single node
Args:
node (Node): Node to perform translation on
opt (argparse.Namespace, optional): optional arguments from frontend
See also:
:py:func:`~matlab2cpp.Node.translate`
"""
# e.g. Get_a from user
value = node.program.parent.kws.get(node.cls+"_"+node.name, None)
# e.g. Get from user
if value is None:
value = node.program.parent.kws.get(node.cls, None)
if value is None:
backend = node.backend
if backend == "TYPE":
backend = "unknown"
try:
target = matlab2cpp.rules.__dict__["_"+backend]
except KeyError as err:
err_str = "\'" + err.message + "\', File: %s. Data type set in .py file could be wrong." % (str(node.file))
raise KeyError(err_str)
specific_name = node.cls + "_" + node.name
# e.g. Get_a (reserved typically)
if specific_name in target.__dict__:
value = target.__dict__[specific_name]
# e.g. Get (normal behavior)
elif node.cls in target.__dict__:
value = target.__dict__[node.cls]
else:
print node.program.summary()
raise KeyError(
"Expected to find rule for '%s' in the file '_%s.py. Crash with file: %s, on line: %s'" %\
(node.cls, node.backend, node.file, node.line))
# let rule create a translation
if not isinstance(value, (unicode, str, list, tuple)):
#print node.code
#print "\n\n"
value = value(node)
# not quite right format
if isinstance(value, (unicode, matlab2cpp.node.frontend.Node)):
value = str(value)
elif value is None:
#print "\n\nerror:"
#print node.code
#print node.parent.code
#print node.parent.parent.code
#print "\n"
raise ValueError(
"missing return in function %s in file %s, Matlab: Crash with file: %s, on line: %s" %\
(node.cls, node.backend, node.file, node.line))
node.ret = repr(value)
# interpolate tuples/lists
if not isinstance(value, str):
value = list(value)
children = ["%("+str(i)+")s" for i in xrange(len(node))]
if len(value) == 2:
value.insert(1, "")
value = value[:-1] + [value[-2]] *\
(len(children)-len(value)+1) + value[-1:]
if len(children) == 0:
value = value[0] + value[-1]
elif len(children) == 1:
value = value[0] + children[0] + value[-1]
else:
out = value[0]
for i in xrange(len(children)):
out += children[i] + value[i+1]
value = out
# interpolate string
try:
value = value % node.properties()
except:
#print ".........."
#print node.code
#print "----------"
#print "\n\n"
raise SyntaxError("interpolation in " + node.backend + "." +\
node.cls + " is misbehaving\n'" + value + "'\n" +\
str(node.prop) + "\nCrash with file: " + str(node.file) + " , on line: " + str(node.line) +\
":\n" + node.code)
if node.cls in ("Assign", "Assigns", "Statement", "If", "Elif",
"For", "Parfor", "While") and node.project.builder.original:
code_tmp = ["// " + line for line in node.code.splitlines()]
value = "\n".join(code_tmp) + "\n" + value
value = value.replace("%", "__percent__")
node.str = value
#wanted a static variable in function include below
created_file = []
def include(node, name, **kws):
"""
Backend for the :py:func:`~matlab2cpp.Node.include` function.
Args:
node (Node): node in program where to where the header is placed
name (str): name of header
**kws (str, optional): Optional args for header. Mostly not in use.
See also:
:py:func:`~matlab2cpp.Node.include`
"""
if os.path.isfile(name):
#name = os.path.relpath(name, os.path.dirname(node.program.name))
name = os.path.basename(name)
include_code = '#include "%s.hpp"' % name
library_code = ""
if node.name == name:
include_code = ""
else:
library_code = ""
if name == "SPlot":
include_code = '#include "SPlot.h"'
#check if file in directory
try:
#file_path = node.program[1].name
#index = file_path.rindex(sep)
#output_file_path = file_path[:index] + sep + "SPlot.h"
output_file_path = os.getcwd() + sep + "SPlot.h"
#if mconvert.h not found in directory, create the file
if not os.path.isfile(output_file_path) or "SPlot.h" not in created_file:
f = open(output_file_path, "w")
f.write(matlab2cpp.pyplot.code)
f.close()
created_file.append("SPlot.h")
except:
pass
elif name == "m2cpp":
include_code = '#include "mconvert.h"'
#check if file in directory
try:
#file_path = node.program[1].name
#index = file_path.rindex(sep)
#output_file_path = file_path[:index] + sep + "mconvert.h"
output_file_path = os.getcwd() + sep + "mconvert.h"
#if mconvert.h not found in directory, create the file
if not os.path.isfile(output_file_path) or "mconvert.h" not in created_file:
f = open(output_file_path, "w")
f.write(matlab2cpp.m2cpp.code)
f.close()
created_file.append("mconvert.h")
except:
pass
elif name == "arma":
include_code = "#include <armadillo>"
elif name == "iostream":
include_code = "#include <iostream>"
elif name == "cstdio":
include_code = "#include <cstdio>"
elif name == "complex":
include_code = "#include <complex>"
elif name == "cmath":
include_code = "#include <cmath>"
elif name == "algorithm":
include_code = "#include <algorithm>"
elif name == "omp":
include_code = "#include <omp.h>"
elif name == "tbb":
include_code = "#include <tbb/tbb.h>"
elif name == "no_min_max":
include_code = "#define NOMINMAX"
else:
include_code = ""
includes = node.program[0]
if include_code and include_code not in includes.names:
include = matlab2cpp.collection.Include(includes, include_code,
value=includes.value)
include.backend="program"
#node.program[2] is inlines. I don't think inlines are used anymore
#if you look at variable library_code above, it is set to ""
inlines_ = node.program[2]
if library_code and library_code not in inlines_.names:
inline = matlab2cpp.collection.Inline(inlines_, library_code)
inline.backend="program"
def wall_clock(node):
"""
Backend for the :py:func:`~matlab2cpp.Node.wall_clock` function.
Args:
node (Node):
node in function where ``wall_clock _timer`` should be declared.
See also:
:py:func:`~matlab2cpp.Node.wall_clock`
"""
declares = node.func[0]
if "_timer" not in declares:
clock = matlab2cpp.collection.Var(declares, name="_timer")
clock.type="wall_clock"
def plotting(node):
"""
Backend of the :py:func:`~matlab2cpp.Node.plotting` function.
Args:
node (Node): node in the function where plotting should be implemented.
See also:
:py:func:`~matlab2cpp.Node.plotting`
"""
declares = node.func[0]
# only do this once
if "_plot" in declares:
return
# add splot to header
node.include("SPlot")
# add a variable for Splot in declare
var = matlab2cpp.collection.Var(declares, name="_plot")
var.type = "SPlot"
# get function variable
func = node.func
# get function block
block = func[3]
# create new statement
statement = matlab2cpp.collection.Statement(block, code="")
statement.backend="code_block"
# fill it with new Get _splot
get = matlab2cpp.collection.Get(statement, name="_splot")
get.backend="reserved"
# translate the new nodes
statement.translate()
# swap with last statement, if it is a return-statement
if len(block)>1 and block[-2] and block[-2][0].cls == "Return":
block.children[-1], block.children[-2] = \
block.children[-2], block.children[-1]
| 26.219822
| 119
| 0.556142
|
433b6585ae59517299090b615140aa7fcc487a88
| 5,351
|
py
|
Python
|
qlib/data/base.py
|
Turtlesyu-27/qlib
|
cb2c3028b8cabfca909f22890c9b126a18a35daf
|
[
"MIT"
] | null | null | null |
qlib/data/base.py
|
Turtlesyu-27/qlib
|
cb2c3028b8cabfca909f22890c9b126a18a35daf
|
[
"MIT"
] | null | null | null |
qlib/data/base.py
|
Turtlesyu-27/qlib
|
cb2c3028b8cabfca909f22890c9b126a18a35daf
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import abc
import pandas as pd
class Expression(abc.ABC):
"""Expression base class"""
def __str__(self):
return type(self).__name__
def __repr__(self):
return str(self)
def __gt__(self, other):
from .ops import Gt
return Gt(self, other)
def __ge__(self, other):
from .ops import Ge
return Ge(self, other)
def __lt__(self, other):
from .ops import Lt
return Lt(self, other)
def __le__(self, other):
from .ops import Le
return Le(self, other)
def __eq__(self, other):
from .ops import Eq
return Eq(self, other)
def __ne__(self, other):
from .ops import Ne
return Ne(self, other)
def __add__(self, other):
from .ops import Add
return Add(self, other)
def __radd__(self, other):
from .ops import Add
return Add(other, self)
def __sub__(self, other):
from .ops import Sub
return Sub(self, other)
def __rsub__(self, other):
from .ops import Sub
return Sub(other, self)
def __mul__(self, other):
from .ops import Mul
return Mul(self, other)
def __rmul__(self, other):
from .ops import Mul
return Mul(self, other)
def __div__(self, other):
from .ops import Div
return Div(self, other)
def __rdiv__(self, other):
from .ops import Div
return Div(other, self)
def __truediv__(self, other):
from .ops import Div
return Div(self, other)
def __rtruediv__(self, other):
from .ops import Div
return Div(other, self)
def __pow__(self, other):
from .ops import Power
return Power(self, other)
def __and__(self, other):
from .ops import And
return And(self, other)
def __rand__(self, other):
from .ops import And
return And(other, self)
def __or__(self, other):
from .ops import Or
return Or(self, other)
def __ror__(self, other):
from .ops import Or
return Or(other, self)
def load(self, instrument, start_index, end_index, freq):
"""load feature
Parameters
----------
instrument : str
instrument code
start_index : str
feature start index [in calendar]
end_index : str
feature end index [in calendar]
freq : str
feature frequency
Returns
----------
pd.Series
feature series: The index of the series is the calendar index
"""
from .cache import H
# cache
args = str(self), instrument, start_index, end_index, freq
if args in H["f"]:
return H["f"][args]
if start_index is None or end_index is None or start_index > end_index:
raise ValueError("Invalid index range: {} {}".format(start_index, end_index))
series = self._load_internal(instrument, start_index, end_index, freq)
series.name = str(self)
H["f"][args] = series
return series
@abc.abstractmethod
def _load_internal(self, instrument, start_index, end_index, freq):
pass
@abc.abstractmethod
def get_longest_back_rolling(self):
"""Get the longest length of historical data the feature has accessed
This is designed for getting the needed range of the data to calculate
the features in specific range at first. However, situations like
Ref(Ref($close, -1), 1) can not be handled rightly.
So this will only used for detecting the length of historical data needed.
"""
# TODO: forward operator like Ref($close, -1) is not supported yet.
raise NotImplementedError("This function must be implemented in your newly defined feature")
@abc.abstractmethod
def get_extended_window_size(self):
"""get_extend_window_size
For to calculate this Operator in range[start_index, end_index]
We have to get the *leaf feature* in
range[start_index - lft_etd, end_index + rght_etd].
Returns
----------
(int, int)
lft_etd, rght_etd
"""
raise NotImplementedError("This function must be implemented in your newly defined feature")
class Feature(Expression):
"""Static Expression
This kind of feature will load data from provider
"""
def __init__(self, name=None):
if name:
self._name = name.lower()
else:
self._name = type(self).__name__.lower()
def __str__(self):
return "$" + self._name
def _load_internal(self, instrument, start_index, end_index, freq):
# load
from .data import FeatureD
return FeatureD.feature(instrument, str(self), start_index, end_index, freq)
def get_longest_back_rolling(self):
return 0
def get_extended_window_size(self):
return 0, 0
class ExpressionOps(Expression):
"""Operator Expression
This kind of feature will use operator for feature
construction on the fly.
"""
pass
| 23.572687
| 100
| 0.607176
|
8d9cb43b87ace4c2a2f3fc01eeb4448f6399fa61
| 16,934
|
py
|
Python
|
services/messenger/tests/delete_message_test.py
|
Counter0021/Anti-Freelancer-microservices-back-end
|
e55481e0a4353107036cd5ba664fee57e29c7597
|
[
"MIT"
] | null | null | null |
services/messenger/tests/delete_message_test.py
|
Counter0021/Anti-Freelancer-microservices-back-end
|
e55481e0a4353107036cd5ba664fee57e29c7597
|
[
"MIT"
] | null | null | null |
services/messenger/tests/delete_message_test.py
|
Counter0021/Anti-Freelancer-microservices-back-end
|
e55481e0a4353107036cd5ba664fee57e29c7597
|
[
"MIT"
] | null | null | null |
from unittest import mock, TestCase
from app.crud import message_crud, dialogue_crud, notification_crud
from app.schemas import UserData
from config import ERROR, SUCCESS, DELETE
from tests import BaseTest, async_loop
class DeleteMessageTestCase(BaseTest, TestCase):
def setUp(self) -> None:
super().setUp()
self.dialogue = async_loop(dialogue_crud.create(self.session, users_ids='1_2'))
self.msg = async_loop(message_crud.create(self.session, dialogue_id=1, sender_id=1, msg='Hello world!'))
def test_only_1_sender_connection(self):
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
with mock.patch('app.requests.sender_profile_request', return_value=self.get_new_user(1)) as _:
with mock.patch('app.requests.get_user_request', return_value=self.get_new_user(2)) as _:
with mock.patch('app.requests.get_sender_data_request', return_value=self.get_new_user(1)) as _:
with self.client.websocket_connect(f'{self.url}/messages/ws/token') as socket:
socket.send_json({'id': 1, 'type': DELETE})
response = socket.receive_json()
self.assertEqual(
response,
{'data': {'msg': 'Message has been deleted'}, 'type': SUCCESS}
)
response = socket.receive_json()
self.assertEqual(
response,
{
'data': {
'sender': UserData(**self.get_new_user(1)).dict(),
'id': 1,
},
'type': DELETE
}
)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 0)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
socket.close()
def test_only_2_sender_connections(self):
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
with mock.patch('app.requests.sender_profile_request', return_value=self.get_new_user(1)) as _:
with mock.patch('app.requests.get_user_request', return_value=self.get_new_user(2)) as _:
with mock.patch('app.requests.get_sender_data_request', return_value=self.get_new_user(1)) as _:
with self.client.websocket_connect(f'{self.url}/messages/ws/token') as socket_1:
with self.client.websocket_connect(f'{self.url}/messages/ws/token') as socket_2:
socket_1.send_json(
{'id': 1, 'type': DELETE}
)
response = socket_1.receive_json()
self.assertEqual(
response,
{'data': {'msg': 'Message has been deleted'}, 'type': SUCCESS}
)
response = socket_1.receive_json()
self.assertEqual(
response,
{
'data': {
'sender': UserData(**self.get_new_user(1)).dict(),
'id': 1,
},
'type': DELETE
}
)
response = socket_2.receive_json()
self.assertEqual(
response,
{'data': {'msg': 'Message has been deleted'}, 'type': SUCCESS}
)
response = socket_2.receive_json()
self.assertEqual(
response,
{
'data': {
'sender': UserData(**self.get_new_user(1)).dict(),
'id': 1,
},
'type': DELETE
}
)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 0)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
socket_1.close()
socket_2.close()
def test_sender_and_recipient_connections(self):
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
with mock.patch('app.requests.sender_profile_request', return_value=self.get_new_user(1)) as _:
with mock.patch('app.requests.get_user_request', return_value=self.get_new_user(2)) as _:
with mock.patch('app.requests.get_sender_data_request', return_value=self.get_new_user(1)) as _:
with self.client.websocket_connect(f'{self.url}/messages/ws/token') as socket_sender:
with mock.patch('app.requests.sender_profile_request', return_value=self.get_new_user(2)) as _:
with self.client.websocket_connect(f'{self.url}/messages/ws/token') as socket_recipient:
socket_sender.send_json(
{'id': 1, 'type': DELETE}
)
response = socket_sender.receive_json()
self.assertEqual(
response,
{'data': {'msg': 'Message has been deleted'}, 'type': SUCCESS}
)
response = socket_sender.receive_json()
self.assertEqual(
response,
{
'data': {
'sender': UserData(**self.get_new_user(1)).dict(),
'id': 1,
},
'type': DELETE
}
)
response = socket_recipient.receive_json()
self.assertEqual(
response,
{
'data': {
'sender': UserData(**self.get_new_user(1)).dict(),
'id': 1,
},
'type': DELETE
}
)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 0)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
socket_sender.close()
socket_recipient.close()
def test_2_sender_and_2_recipient_connections(self):
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
with mock.patch('app.requests.sender_profile_request', return_value=self.get_new_user(1)) as _:
with mock.patch('app.requests.get_user_request', return_value=self.get_new_user(2)) as _:
with mock.patch('app.requests.get_sender_data_request', return_value=self.get_new_user(1)) as _:
with self.client.websocket_connect(f'{self.url}/messages/ws/token') as socket_sender_1:
with self.client.websocket_connect(f'{self.url}/messages/ws/token') as socket_sender_2:
with mock.patch(
'app.requests.sender_profile_request',
return_value=self.get_new_user(2)
) as _:
with self.client.websocket_connect(f'{self.url}/messages/ws/token') as socket_recipient_1:
with self.client.websocket_connect(f'{self.url}/messages/ws/token') as socket_recipient_2:
socket_sender_1.send_json(
{'id': 1, 'type': DELETE}
)
response = socket_sender_1.receive_json()
self.assertEqual(
response,
{'data': {'msg': 'Message has been deleted'}, 'type': SUCCESS}
)
response = socket_sender_1.receive_json()
self.assertEqual(
response,
{
'data': {
'sender': UserData(**self.get_new_user(1)).dict(),
'id': 1,
},
'type': DELETE
}
)
response = socket_recipient_1.receive_json()
self.assertEqual(
response,
{
'data': {
'sender': UserData(**self.get_new_user(1)).dict(),
'id': 1,
},
'type': DELETE
}
)
response = socket_sender_2.receive_json()
self.assertEqual(
response,
{'data': {'msg': 'Message has been deleted'}, 'type': SUCCESS}
)
response = socket_sender_2.receive_json()
self.assertEqual(
response,
{
'data': {
'sender': UserData(**self.get_new_user(1)).dict(),
'id': 1,
},
'type': DELETE
}
)
response = socket_recipient_2.receive_json()
self.assertEqual(
response,
{
'data': {
'sender': UserData(**self.get_new_user(1)).dict(),
'id': 1,
},
'type': DELETE
}
)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 0)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
socket_sender_1.close()
socket_recipient_1.close()
socket_sender_2.close()
socket_recipient_2.close()
class BadDeleteMessageTestCase(BaseTest, TestCase):
def setUp(self) -> None:
super().setUp()
self.dialogue = async_loop(dialogue_crud.create(self.session, users_ids='1_2'))
self.msg = async_loop(message_crud.create(self.session, dialogue_id=1, sender_id=1, msg='Hello world!'))
def test_invalid_data_schema(self):
self.assertEqual(len(async_loop(message_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
with mock.patch('app.requests.sender_profile_request', return_value=self.get_new_user(1)) as _:
with mock.patch('app.requests.get_user_request', return_value=self.get_new_user(2)) as _:
with self.client.websocket_connect(f'{self.url}/messages/ws/token') as socket:
socket.send_json({'type': DELETE})
response = socket.receive_json()
self.assertEqual(
response,
{'data': {'detail': {'msg': 'Invalid DELETE data'}}, 'type': ERROR}
)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
socket.close()
def test_bad_sender(self):
self.assertEqual(len(async_loop(message_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
with mock.patch('app.requests.sender_profile_request', return_value=self.get_new_user(2)) as _:
with mock.patch('app.requests.get_user_request', return_value=self.get_new_user(1)) as _:
with self.client.websocket_connect(f'{self.url}/messages/ws/token') as socket:
socket.send_json({'id': 1, 'type': DELETE})
response = socket.receive_json()
self.assertEqual(
response,
{'data': {'detail': {'msg': 'You not send this message'}}, 'type': ERROR}
)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
socket.close()
def test_bad_message_id(self):
self.assertEqual(len(async_loop(message_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
with mock.patch('app.requests.sender_profile_request', return_value=self.get_new_user(1)) as _:
with mock.patch('app.requests.get_user_request', return_value=self.get_new_user(2)) as _:
with self.client.websocket_connect(f'{self.url}/messages/ws/token') as socket:
socket.send_json({'id': 143, 'type': DELETE})
response = socket.receive_json()
self.assertEqual(
response,
{'data': {'detail': {'msg': 'Message not found'}}, 'type': ERROR}
)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
socket.close()
| 54.275641
| 126
| 0.459608
|
a6b2f13dcab867cd776d6955f2758a3c400af0a0
| 2,312
|
py
|
Python
|
galaxy_importer/ansible_test/runners/local_image.py
|
sshnaidm/galaxy-importer
|
f68fc7241b601d69af0455557beb1ad20c4ec60d
|
[
"Apache-2.0"
] | null | null | null |
galaxy_importer/ansible_test/runners/local_image.py
|
sshnaidm/galaxy-importer
|
f68fc7241b601d69af0455557beb1ad20c4ec60d
|
[
"Apache-2.0"
] | null | null | null |
galaxy_importer/ansible_test/runners/local_image.py
|
sshnaidm/galaxy-importer
|
f68fc7241b601d69af0455557beb1ad20c4ec60d
|
[
"Apache-2.0"
] | null | null | null |
# (c) 2012-2020, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import shutil
from subprocess import Popen, PIPE, STDOUT
from galaxy_importer import config
from galaxy_importer import exceptions
from galaxy_importer.ansible_test.builders.local_image_build import Build
from galaxy_importer.ansible_test.runners.base import BaseTestRunner
class LocalImageTestRunner(BaseTestRunner):
"""Run image locally with docker or podman."""
def run(self):
cfg = config.Config(config_data=config.ConfigFile.load())
build = Build(
self.filepath,
f'{self.metadata.namespace}-{self.metadata.name}-{self.metadata.version}',
cfg,
self.log)
container_engine = build.get_container_engine(cfg)
if not shutil.which(container_engine):
self.log.warning(f'"{container_engine}" not found, skipping ansible-test sanity')
return
image_id = build.build_image()
self.log.info('Running image...')
self._run_image(
image_id=image_id,
container_engine=container_engine
)
build.cleanup()
def _run_image(self, image_id, container_engine):
cmd = [container_engine, 'run', image_id, 'LOCAL_IMAGE_RUNNER']
proc = Popen(
cmd,
stdout=PIPE,
stderr=STDOUT,
encoding='utf-8',
)
for line in proc.stdout:
self.log.info(line.strip())
return_code = proc.wait()
if return_code != 0:
raise exceptions.AnsibleTestError(
'An exception occurred in {}, returncode={}'
.format(' '.join(cmd), return_code))
| 32.56338
| 93
| 0.66263
|
b9c7e28e67cb270f545d0dfb186aaf15fdb1b297
| 3,152
|
py
|
Python
|
medi/inference/gradual/generics.py
|
yuan-xy/medi
|
ffa416b73eb2fbffbae7a27b6eccc267b061ed0f
|
[
"MIT"
] | 3
|
2020-05-27T08:42:26.000Z
|
2021-12-04T08:54:08.000Z
|
medi/inference/gradual/generics.py
|
yuan-xy/medi
|
ffa416b73eb2fbffbae7a27b6eccc267b061ed0f
|
[
"MIT"
] | null | null | null |
medi/inference/gradual/generics.py
|
yuan-xy/medi
|
ffa416b73eb2fbffbae7a27b6eccc267b061ed0f
|
[
"MIT"
] | null | null | null |
"""
This module is about generics, like the `int` in `List[int]`. It's not about
the Generic class.
"""
from medi import debug
from medi.cache import memoize_method
from medi.inference.utils import to_tuple
from medi.inference.base_value import ValueSet, NO_VALUES
from medi.inference.value.iterable import SequenceLiteralValue
from medi.inference.helpers import is_string
def _resolve_forward_references(context, value_set):
for value in value_set:
if is_string(value):
from medi.inference.gradual.annotation import _get_forward_reference_node
node = _get_forward_reference_node(context, value.get_safe_value())
if node is not None:
for c in context.infer_node(node):
yield c
else:
yield value
class _AbstractGenericManager(object):
def get_index_and_execute(self, index):
try:
return self[index].execute_annotation()
except IndexError:
debug.warning('No param #%s found for annotation %s', index, self)
return NO_VALUES
def get_type_hint(self):
return '[%s]' % ', '.join(t.get_type_hint(add_class_info=False) for t in self.to_tuple())
class LazyGenericManager(_AbstractGenericManager):
def __init__(self, context_of_index, index_value):
self._context_of_index = context_of_index
self._index_value = index_value
@memoize_method
def __getitem__(self, index):
return self._tuple()[index]()
def __len__(self):
return len(self._tuple())
@memoize_method
@to_tuple
def _tuple(self):
def lambda_scoping_in_for_loop_sucks(lazy_value):
return lambda: ValueSet(_resolve_forward_references(
self._context_of_index,
lazy_value.infer()
))
if isinstance(self._index_value, SequenceLiteralValue):
for lazy_value in self._index_value.py__iter__(contextualized_node=None):
yield lambda_scoping_in_for_loop_sucks(lazy_value)
else:
yield lambda: ValueSet(_resolve_forward_references(
self._context_of_index,
ValueSet([self._index_value])
))
@to_tuple
def to_tuple(self):
for callable_ in self._tuple():
yield callable_()
def is_homogenous_tuple(self):
if isinstance(self._index_value, SequenceLiteralValue):
entries = self._index_value.get_tree_entries()
if len(entries) == 2 and entries[1] == '...':
return True
return False
def __repr__(self):
return '<LazyG>[%s]' % (', '.join(repr(x) for x in self.to_tuple()))
class TupleGenericManager(_AbstractGenericManager):
def __init__(self, tup):
self._tuple = tup
def __getitem__(self, index):
return self._tuple[index]
def __len__(self):
return len(self._tuple)
def to_tuple(self):
return self._tuple
def is_homogenous_tuple(self):
return False
def __repr__(self):
return '<TupG>[%s]' % (', '.join(repr(x) for x in self.to_tuple()))
| 30.901961
| 97
| 0.64816
|
45ff3e166fb2031b56763c4f5e978752c228670d
| 38,083
|
py
|
Python
|
google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py
|
LaudateCorpus1/python-spanner
|
819be92e46f63133724dd0d3f5e57b20e33e299e
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py
|
LaudateCorpus1/python-spanner
|
819be92e46f63133724dd0d3f5e57b20e33e299e
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py
|
LaudateCorpus1/python-spanner
|
819be92e46f63133724dd0d3f5e57b20e33e299e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO
from .grpc import DatabaseAdminGrpcTransport
class DatabaseAdminGrpcAsyncIOTransport(DatabaseAdminTransport):
"""gRPC AsyncIO backend transport for DatabaseAdmin.
Cloud Spanner Database Admin API
The Cloud Spanner Database Admin API can be used to:
- create, drop, and list databases
- update the schema of pre-existing databases
- create, delete and list backups for a database
- restore a database from an existing backup
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "spanner.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "spanner.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_databases(
self,
) -> Callable[
[spanner_database_admin.ListDatabasesRequest],
Awaitable[spanner_database_admin.ListDatabasesResponse],
]:
r"""Return a callable for the list databases method over gRPC.
Lists Cloud Spanner databases.
Returns:
Callable[[~.ListDatabasesRequest],
Awaitable[~.ListDatabasesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_databases" not in self._stubs:
self._stubs["list_databases"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases",
request_serializer=spanner_database_admin.ListDatabasesRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabasesResponse.deserialize,
)
return self._stubs["list_databases"]
@property
def create_database(
self,
) -> Callable[
[spanner_database_admin.CreateDatabaseRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the create database method over gRPC.
Creates a new Cloud Spanner database and starts to prepare it
for serving. The returned [long-running
operation][google.longrunning.Operation] will have a name of the
format ``<database_name>/operations/<operation_id>`` and can be
used to track preparation of the database. The
[metadata][google.longrunning.Operation.metadata] field type is
[CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
The [response][google.longrunning.Operation.response] field type
is [Database][google.spanner.admin.database.v1.Database], if
successful.
Returns:
Callable[[~.CreateDatabaseRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_database" not in self._stubs:
self._stubs["create_database"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase",
request_serializer=spanner_database_admin.CreateDatabaseRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_database"]
@property
def get_database(
self,
) -> Callable[
[spanner_database_admin.GetDatabaseRequest],
Awaitable[spanner_database_admin.Database],
]:
r"""Return a callable for the get database method over gRPC.
Gets the state of a Cloud Spanner database.
Returns:
Callable[[~.GetDatabaseRequest],
Awaitable[~.Database]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database" not in self._stubs:
self._stubs["get_database"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase",
request_serializer=spanner_database_admin.GetDatabaseRequest.serialize,
response_deserializer=spanner_database_admin.Database.deserialize,
)
return self._stubs["get_database"]
@property
def update_database_ddl(
self,
) -> Callable[
[spanner_database_admin.UpdateDatabaseDdlRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the update database ddl method over gRPC.
Updates the schema of a Cloud Spanner database by
creating/altering/dropping tables, columns, indexes, etc. The
returned [long-running operation][google.longrunning.Operation]
will have a name of the format
``<database_name>/operations/<operation_id>`` and can be used to
track execution of the schema change(s). The
[metadata][google.longrunning.Operation.metadata] field type is
[UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
The operation has no response.
Returns:
Callable[[~.UpdateDatabaseDdlRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_database_ddl" not in self._stubs:
self._stubs["update_database_ddl"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl",
request_serializer=spanner_database_admin.UpdateDatabaseDdlRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_database_ddl"]
@property
def drop_database(
self,
) -> Callable[
[spanner_database_admin.DropDatabaseRequest], Awaitable[empty_pb2.Empty]
]:
r"""Return a callable for the drop database method over gRPC.
Drops (aka deletes) a Cloud Spanner database. Completed backups
for the database will be retained according to their
``expire_time``. Note: Cloud Spanner might continue to accept
requests for a few seconds after the database has been deleted.
Returns:
Callable[[~.DropDatabaseRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "drop_database" not in self._stubs:
self._stubs["drop_database"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase",
request_serializer=spanner_database_admin.DropDatabaseRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["drop_database"]
@property
def get_database_ddl(
self,
) -> Callable[
[spanner_database_admin.GetDatabaseDdlRequest],
Awaitable[spanner_database_admin.GetDatabaseDdlResponse],
]:
r"""Return a callable for the get database ddl method over gRPC.
Returns the schema of a Cloud Spanner database as a list of
formatted DDL statements. This method does not show pending
schema updates, those may be queried using the
[Operations][google.longrunning.Operations] API.
Returns:
Callable[[~.GetDatabaseDdlRequest],
Awaitable[~.GetDatabaseDdlResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_database_ddl" not in self._stubs:
self._stubs["get_database_ddl"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl",
request_serializer=spanner_database_admin.GetDatabaseDdlRequest.serialize,
response_deserializer=spanner_database_admin.GetDatabaseDdlResponse.deserialize,
)
return self._stubs["get_database_ddl"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy on a database or backup resource.
Replaces any existing policy.
Authorization requires ``spanner.databases.setIamPolicy``
permission on
[resource][google.iam.v1.SetIamPolicyRequest.resource]. For
backups, authorization requires ``spanner.backups.setIamPolicy``
permission on
[resource][google.iam.v1.SetIamPolicyRequest.resource].
Returns:
Callable[[~.SetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy for a database or backup
resource. Returns an empty policy if a database or backup exists
but does not have a policy set.
Authorization requires ``spanner.databases.getIamPolicy``
permission on
[resource][google.iam.v1.GetIamPolicyRequest.resource]. For
backups, authorization requires ``spanner.backups.getIamPolicy``
permission on
[resource][google.iam.v1.GetIamPolicyRequest.resource].
Returns:
Callable[[~.GetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that the caller has on the specified
database or backup resource.
Attempting this RPC on a non-existent Cloud Spanner database
will result in a NOT_FOUND error if the user has
``spanner.databases.list`` permission on the containing Cloud
Spanner instance. Otherwise returns an empty set of permissions.
Calling this method on a backup that does not exist will result
in a NOT_FOUND error if the user has ``spanner.backups.list``
permission on the containing instance.
Returns:
Callable[[~.TestIamPermissionsRequest],
Awaitable[~.TestIamPermissionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
@property
def create_backup(
self,
) -> Callable[
[gsad_backup.CreateBackupRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create backup method over gRPC.
Starts creating a new Cloud Spanner Backup. The returned backup
[long-running operation][google.longrunning.Operation] will have
a name of the format
``projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>``
and can be used to track creation of the backup. The
[metadata][google.longrunning.Operation.metadata] field type is
[CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
The [response][google.longrunning.Operation.response] field type
is [Backup][google.spanner.admin.database.v1.Backup], if
successful. Cancelling the returned operation will stop the
creation and delete the backup. There can be only one pending
backup creation per database. Backup creation of different
databases can run concurrently.
Returns:
Callable[[~.CreateBackupRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_backup" not in self._stubs:
self._stubs["create_backup"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup",
request_serializer=gsad_backup.CreateBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_backup"]
@property
def get_backup(
self,
) -> Callable[[backup.GetBackupRequest], Awaitable[backup.Backup]]:
r"""Return a callable for the get backup method over gRPC.
Gets metadata on a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
Returns:
Callable[[~.GetBackupRequest],
Awaitable[~.Backup]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_backup" not in self._stubs:
self._stubs["get_backup"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup",
request_serializer=backup.GetBackupRequest.serialize,
response_deserializer=backup.Backup.deserialize,
)
return self._stubs["get_backup"]
@property
def update_backup(
self,
) -> Callable[[gsad_backup.UpdateBackupRequest], Awaitable[gsad_backup.Backup]]:
r"""Return a callable for the update backup method over gRPC.
Updates a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
Returns:
Callable[[~.UpdateBackupRequest],
Awaitable[~.Backup]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_backup" not in self._stubs:
self._stubs["update_backup"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup",
request_serializer=gsad_backup.UpdateBackupRequest.serialize,
response_deserializer=gsad_backup.Backup.deserialize,
)
return self._stubs["update_backup"]
@property
def delete_backup(
self,
) -> Callable[[backup.DeleteBackupRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete backup method over gRPC.
Deletes a pending or completed
[Backup][google.spanner.admin.database.v1.Backup].
Returns:
Callable[[~.DeleteBackupRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_backup" not in self._stubs:
self._stubs["delete_backup"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup",
request_serializer=backup.DeleteBackupRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_backup"]
@property
def list_backups(
self,
) -> Callable[[backup.ListBackupsRequest], Awaitable[backup.ListBackupsResponse]]:
r"""Return a callable for the list backups method over gRPC.
Lists completed and pending backups. Backups returned are
ordered by ``create_time`` in descending order, starting from
the most recent ``create_time``.
Returns:
Callable[[~.ListBackupsRequest],
Awaitable[~.ListBackupsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backups" not in self._stubs:
self._stubs["list_backups"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups",
request_serializer=backup.ListBackupsRequest.serialize,
response_deserializer=backup.ListBackupsResponse.deserialize,
)
return self._stubs["list_backups"]
@property
def restore_database(
self,
) -> Callable[
[spanner_database_admin.RestoreDatabaseRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the restore database method over gRPC.
Create a new database by restoring from a completed backup. The
new database must be in the same project and in an instance with
the same instance configuration as the instance containing the
backup. The returned database [long-running
operation][google.longrunning.Operation] has a name of the
format
``projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>``,
and can be used to track the progress of the operation, and to
cancel it. The [metadata][google.longrunning.Operation.metadata]
field type is
[RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
The [response][google.longrunning.Operation.response] type is
[Database][google.spanner.admin.database.v1.Database], if
successful. Cancelling the returned operation will stop the
restore and delete the database. There can be only one database
being restored into an instance at a time. Once the restore
operation completes, a new restore operation can be initiated,
without waiting for the optimize operation associated with the
first restore to complete.
Returns:
Callable[[~.RestoreDatabaseRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_database" not in self._stubs:
self._stubs["restore_database"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase",
request_serializer=spanner_database_admin.RestoreDatabaseRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["restore_database"]
@property
def list_database_operations(
self,
) -> Callable[
[spanner_database_admin.ListDatabaseOperationsRequest],
Awaitable[spanner_database_admin.ListDatabaseOperationsResponse],
]:
r"""Return a callable for the list database operations method over gRPC.
Lists database
[longrunning-operations][google.longrunning.Operation]. A
database operation has a name of the form
``projects/<project>/instances/<instance>/databases/<database>/operations/<operation>``.
The long-running operation
[metadata][google.longrunning.Operation.metadata] field type
``metadata.type_url`` describes the type of the metadata.
Operations returned include those that have
completed/failed/canceled within the last 7 days, and pending
operations.
Returns:
Callable[[~.ListDatabaseOperationsRequest],
Awaitable[~.ListDatabaseOperationsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_database_operations" not in self._stubs:
self._stubs["list_database_operations"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations",
request_serializer=spanner_database_admin.ListDatabaseOperationsRequest.serialize,
response_deserializer=spanner_database_admin.ListDatabaseOperationsResponse.deserialize,
)
return self._stubs["list_database_operations"]
@property
def list_backup_operations(
self,
) -> Callable[
[backup.ListBackupOperationsRequest],
Awaitable[backup.ListBackupOperationsResponse],
]:
r"""Return a callable for the list backup operations method over gRPC.
Lists the backup [long-running
operations][google.longrunning.Operation] in the given instance.
A backup operation has a name of the form
``projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>``.
The long-running operation
[metadata][google.longrunning.Operation.metadata] field type
``metadata.type_url`` describes the type of the metadata.
Operations returned include those that have
completed/failed/canceled within the last 7 days, and pending
operations. Operations returned are ordered by
``operation.metadata.value.progress.start_time`` in descending
order starting from the most recently started operation.
Returns:
Callable[[~.ListBackupOperationsRequest],
Awaitable[~.ListBackupOperationsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backup_operations" not in self._stubs:
self._stubs["list_backup_operations"] = self.grpc_channel.unary_unary(
"/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations",
request_serializer=backup.ListBackupOperationsRequest.serialize,
response_deserializer=backup.ListBackupOperationsResponse.deserialize,
)
return self._stubs["list_backup_operations"]
def close(self):
return self.grpc_channel.close()
__all__ = ("DatabaseAdminGrpcAsyncIOTransport",)
| 45.175563
| 104
| 0.652286
|
aa61a97bb8d344b8dd8ba4839dc4a1e9bcd0a2b9
| 9,123
|
py
|
Python
|
python_modules/dagster/dagster/core/engine/engine_multiprocess.py
|
habibutsu/dagster
|
0b590a18477ad0da6131acec123295ab4a8d53b3
|
[
"Apache-2.0"
] | 3
|
2020-04-28T16:27:33.000Z
|
2020-07-22T07:43:30.000Z
|
python_modules/dagster/dagster/core/engine/engine_multiprocess.py
|
habibutsu/dagster
|
0b590a18477ad0da6131acec123295ab4a8d53b3
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/engine/engine_multiprocess.py
|
habibutsu/dagster
|
0b590a18477ad0da6131acec123295ab4a8d53b3
|
[
"Apache-2.0"
] | 1
|
2021-02-21T12:16:47.000Z
|
2021-02-21T12:16:47.000Z
|
import os
from dagster import check
from dagster.core.errors import DagsterSubprocessError
from dagster.core.events import DagsterEvent, EngineEventData
from dagster.core.execution.api import create_execution_plan, execute_plan_iterator
from dagster.core.execution.config import MultiprocessExecutorConfig
from dagster.core.execution.context.system import SystemPipelineExecutionContext
from dagster.core.execution.memoization import copy_required_intermediates_for_execution
from dagster.core.execution.plan.plan import ExecutionPlan
from dagster.core.instance import DagsterInstance
from dagster.utils import get_multiprocessing_context, start_termination_thread
from dagster.utils.timing import format_duration, time_execution_scope
from .child_process_executor import (
ChildProcessCommand,
ChildProcessEvent,
ChildProcessSystemErrorEvent,
execute_child_process_command,
)
from .engine_base import Engine
class InProcessExecutorChildProcessCommand(ChildProcessCommand):
def __init__(
self, environment_dict, pipeline_run, executor_config, step_key, instance_ref, term_event
):
self.environment_dict = environment_dict
self.executor_config = executor_config
self.pipeline_run = pipeline_run
self.step_key = step_key
self.instance_ref = instance_ref
self.term_event = term_event
def execute(self):
check.inst(self.executor_config, MultiprocessExecutorConfig)
pipeline_def = self.executor_config.load_pipeline(self.pipeline_run)
environment_dict = dict(self.environment_dict, execution={'in_process': {}})
start_termination_thread(self.term_event)
execution_plan = create_execution_plan(
pipeline_def, environment_dict, self.pipeline_run
).build_subset_plan([self.step_key])
for step_event in execute_plan_iterator(
execution_plan,
self.pipeline_run,
environment_dict=environment_dict,
instance=DagsterInstance.from_ref(self.instance_ref),
):
yield step_event
def execute_step_out_of_process(step_context, step, errors, term_events):
command = InProcessExecutorChildProcessCommand(
step_context.environment_dict,
step_context.pipeline_run,
step_context.executor_config,
step.key,
step_context.instance.get_ref(),
term_events[step.key],
)
for ret in execute_child_process_command(command):
if ret is None or isinstance(ret, DagsterEvent):
yield ret
elif isinstance(ret, ChildProcessEvent):
if isinstance(ret, ChildProcessSystemErrorEvent):
errors[ret.pid] = ret.error_info
elif isinstance(ret, KeyboardInterrupt):
yield DagsterEvent.engine_event(
step_context,
'Multiprocess engine: received KeyboardInterrupt - forwarding to active child processes',
EngineEventData.interrupted(list(term_events.keys())),
)
for term_event in term_events.values():
term_event.set()
else:
check.failed('Unexpected return value from child process {}'.format(type(ret)))
class MultiprocessEngine(Engine): # pylint: disable=no-init
@staticmethod
def execute(pipeline_context, execution_plan):
check.inst_param(pipeline_context, 'pipeline_context', SystemPipelineExecutionContext)
check.inst_param(execution_plan, 'execution_plan', ExecutionPlan)
intermediates_manager = pipeline_context.intermediates_manager
limit = pipeline_context.executor_config.max_concurrent
yield DagsterEvent.engine_event(
pipeline_context,
'Executing steps using multiprocess engine: parent process (pid: {pid})'.format(
pid=os.getpid()
),
event_specific_data=EngineEventData.multiprocess(
os.getpid(), step_keys_to_execute=execution_plan.step_keys_to_execute
),
)
# It would be good to implement a reference tracking algorithm here so we could
# garbage collection results that are no longer needed by any steps
# https://github.com/dagster-io/dagster/issues/811
with time_execution_scope() as timer_result:
for event in copy_required_intermediates_for_execution(
pipeline_context, execution_plan
):
yield event
active_execution = execution_plan.start()
active_iters = {}
errors = {}
term_events = {}
step_results = {}
stopping = False
while (not stopping and not active_execution.is_complete) or active_iters:
try:
# start iterators
while len(active_iters) < limit and not stopping:
steps = active_execution.get_steps_to_execute(
limit=(limit - len(active_iters))
)
if not steps:
break
for step in steps:
step_context = pipeline_context.for_step(step)
term_events[step.key] = get_multiprocessing_context().Event()
active_iters[step.key] = execute_step_out_of_process(
step_context, step, errors, term_events
)
# process active iterators
empty_iters = []
for key, step_iter in active_iters.items():
try:
event_or_none = next(step_iter)
if event_or_none is None:
continue
else:
yield event_or_none
if event_or_none.is_step_success:
step_results[key] = True
if event_or_none.is_step_failure:
step_results[key] = False
except StopIteration:
empty_iters.append(key)
# clear and mark complete finished iterators
for key in empty_iters:
del active_iters[key]
if term_events[key].is_set():
stopping = True
del term_events[key]
was_success = step_results.get(key)
if was_success == True:
active_execution.mark_success(key)
elif was_success == False:
active_execution.mark_failed(key)
else:
# check errors list?
pipeline_context.log.error(
'Step {key} finished without success or failure event, assuming failure.'.format(
key=key
)
)
active_execution.mark_failed(key)
# process skips from failures or uncovered inputs
for event in active_execution.skipped_step_events_iterator(pipeline_context):
yield event
# In the very small chance that we get interrupted in this coordination section and not
# polling the subprocesses for events - try to clean up greacefully
except KeyboardInterrupt:
yield DagsterEvent.engine_event(
pipeline_context,
'Multiprocess engine: received KeyboardInterrupt - forwarding to active child processes',
EngineEventData.interrupted(list(term_events.keys())),
)
for event in term_events.values():
event.set()
errs = {pid: err for pid, err in errors.items() if err}
if errs:
raise DagsterSubprocessError(
'During multiprocess execution errors occured in child processes:\n{error_list}'.format(
error_list='\n'.join(
[
'In process {pid}: {err}'.format(pid=pid, err=err.to_string())
for pid, err in errs.items()
]
)
),
subprocess_error_infos=list(errs.values()),
)
yield DagsterEvent.engine_event(
pipeline_context,
'Multiprocess engine: parent process exiting after {duration} (pid: {pid})'.format(
duration=format_duration(timer_result.millis), pid=os.getpid()
),
event_specific_data=EngineEventData.multiprocess(os.getpid()),
)
| 43.236967
| 113
| 0.58073
|
b56d64da01fe3dafb56f152af31e68d05e5ffc9c
| 2,524
|
py
|
Python
|
Python/DesktopApp/resetApp.py
|
Matt-Conrad/CXR_View_Classification
|
8ba437f136655b6e674a49613ffd858f2e9623a5
|
[
"MIT"
] | null | null | null |
Python/DesktopApp/resetApp.py
|
Matt-Conrad/CXR_View_Classification
|
8ba437f136655b6e674a49613ffd858f2e9623a5
|
[
"MIT"
] | 29
|
2020-11-20T17:39:07.000Z
|
2021-11-18T17:19:10.000Z
|
Python/DesktopApp/resetApp.py
|
Matt-Conrad/CXR_View_Classification
|
8ba437f136655b6e674a49613ffd858f2e9623a5
|
[
"MIT"
] | null | null | null |
from metadata_to_db.databaseHandler import DatabaseHandler
from cxrConfigHandler import CxrConfigHandler
from shutil import rmtree, copyfile
import atexit
import os
class AppResetter:
def __init__(self):
configFilename = "config.ini"
parentFolder = os.path.dirname(os.path.abspath(__file__))
miscFolderRelPath = os.path.join(os.path.dirname(os.path.dirname(parentFolder)), "miscellaneous")
configFilePath = os.path.join(parentFolder, configFilename)
copyfile(os.path.join(miscFolderRelPath, configFilename), configFilePath)
atexit.register(self.cleanUp)
self.configHandler = CxrConfigHandler(configFilePath)
self.dbHandler = DatabaseHandler(self.configHandler)
def cleanUp(self):
self.deleteFile(self.configHandler.getConfigFilePath())
def toBeforeStage1(self):
self.deleteFile(self.configHandler.getTgzFilePath())
self.deleteUncompressedFolder()
self.deleteDb()
self.cleanupTrainOutput()
def toBeforeStage2(self):
self.deleteUncompressedFolder()
self.deleteDb()
self.cleanupTrainOutput()
def toBeforeStage3(self):
self.deleteDb()
self.cleanupTrainOutput()
def toBeforeStage4(self):
self.deleteTable(self.configHandler.getTableName("features"))
self.deleteTable(self.configHandler.getTableName("label"))
self.cleanupTrainOutput()
def toBeforeStage5(self):
self.deleteTable(self.configHandler.getTableName("label"))
self.cleanupTrainOutput()
def toBeforeStage6(self):
self.cleanupTrainOutput()
def cleanupTrainOutput(self):
self.deleteFile(os.path.join(self.configHandler.getParentFolder(), "full_set_classifier.joblib"))
self.deleteFile(os.path.join(self.configHandler.getParentFolder(), "test_images.csv"))
def deleteFile(self, fileNameOrPath):
try:
os.remove(fileNameOrPath)
except:
pass
def deleteDb(self):
try:
self.dbHandler.dropDb(self.configHandler.getDbInfo()['database'])
except:
pass
def deleteTable(self, table_name):
try:
self.dbHandler.dropTable(table_name)
except:
pass
def deleteUncompressedFolder(self):
try:
rmtree(self.configHandler.getUnpackFolderPath())
except:
pass
if __name__ == "__main__":
resetter = AppResetter()
resetter.toBeforeStage1()
| 31.949367
| 105
| 0.674326
|
6c9866e6ec943ed08ab1b900f2e5e719d3ddd554
| 1,304
|
py
|
Python
|
pontoon/tags/views.py
|
10allday-Software/pontoon
|
fd1d0671072e2cef5346ef973c663cc2682234b9
|
[
"BSD-3-Clause"
] | 2
|
2020-08-27T13:31:04.000Z
|
2021-02-25T14:37:40.000Z
|
pontoon/tags/views.py
|
10allday-Software/pontoon
|
fd1d0671072e2cef5346ef973c663cc2682234b9
|
[
"BSD-3-Clause"
] | 16
|
2021-09-03T15:29:49.000Z
|
2021-09-03T15:30:12.000Z
|
pontoon/tags/views.py
|
10allday-Software/pontoon
|
fd1d0671072e2cef5346ef973c663cc2682234b9
|
[
"BSD-3-Clause"
] | null | null | null |
from django.http import Http404
from .utils import TagsTool
from pontoon.base.models import Project
from pontoon.base.utils import is_ajax
from django.views.generic import DetailView
class ProjectTagView(DetailView):
"""This view provides both the html view and the JSON view for
retrieving results in the /projects/$project/tags/$tag view
"""
model = Project
slug_url_kwarg = "project"
template_name = "tags/tag.html"
def get_queryset(self):
return super().get_queryset().visible_for(self.request.user)
def get(self, request, *args, **kwargs):
if is_ajax(request):
return self.get_AJAX(request, *args, **kwargs)
return super().get(request, *args, **kwargs)
def get_AJAX(self, request, *args, **kwargs):
self.template_name = "projects/includes/teams.html"
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
try:
tag = TagsTool(projects=[self.object], priority=True,)[
self.kwargs["tag"]
].get()
except IndexError:
raise Http404
if is_ajax(self.request):
return dict(project=self.object, locales=list(tag.iter_locales()), tag=tag,)
return dict(project=self.object, tag=tag)
| 30.325581
| 88
| 0.650307
|
17d8827b98b8f7d59fc84c18956719148106ba2f
| 593
|
py
|
Python
|
template/scripts/script_template.py
|
nhamilto/template
|
45d3dc35836d902c1feeef20af4b2b7f45577c36
|
[
"Apache-2.0"
] | null | null | null |
template/scripts/script_template.py
|
nhamilto/template
|
45d3dc35836d902c1feeef20af4b2b7f45577c36
|
[
"Apache-2.0"
] | null | null | null |
template/scripts/script_template.py
|
nhamilto/template
|
45d3dc35836d902c1feeef20af4b2b7f45577c36
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
DESCRIPTION
TODO This describes how to use this script. This docstring
will be printed by the script if there is an error or
if the user requests help (-h or --help).
AUTHOR
Nicholas Hamilton
nicholas.hamilton@nrel.gov
Date: BUILDDATE
"""
import sys, os, traceback, optparse
import datetime
import numpy as np
import scipy as sp
import pandas as pd
def main():
'''
[summary]
'''
global options, args
# TODO: Do something more interesting here...
logger.debug('Hello world!')
if __name__ == '__main__':
main()
| 16.942857
| 62
| 0.671164
|
adb20bd56ed3d1c39ee7b0dc70c958f168f1e0cc
| 6,495
|
py
|
Python
|
betweenness.py
|
spicyShoyo/hsfinalpoj
|
f843f5ee33914950a2911042f19f5cbdf0df3b41
|
[
"MIT"
] | null | null | null |
betweenness.py
|
spicyShoyo/hsfinalpoj
|
f843f5ee33914950a2911042f19f5cbdf0df3b41
|
[
"MIT"
] | null | null | null |
betweenness.py
|
spicyShoyo/hsfinalpoj
|
f843f5ee33914950a2911042f19f5cbdf0df3b41
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
'''
This class calculate the betweeness of a node
'''
class BetweenessExact:
def __init__(self, network, num_nodes):
self.network = network
self.num_nodes = num_nodes
self.new_bet_dic = {}
def bfs(self, cur_node):
q = [cur_node]
degree = {}
degree[cur_node] = 1
parent = {}
child = {}
level = {}
child[cur_node] = []
level[cur_node] = 0
parent[cur_node] = [-1]
level_list = {}
level_list[0] = [cur_node]
score = {}
score[0] = 1
cur_level = 0
bet_dic = {}
while len(q) != 0:
i = q.pop(0)
if level[i] == cur_level:
cur_level += 1
if cur_level not in level_list:
level_list[cur_level] = []
if i not in self.network.adj_dic:
print("No edge: ", i)
continue
for j in self.network.adj_dic[i]:
if j not in level:
level_list[cur_level].append(j)
level[j] = cur_level
parent[j] = [i]
child[j] = []
child[i].append(j)
q.append(j)
degree[j] = degree[i]
score[j] = 1
elif level[j] == cur_level:
parent[j].append(i)
child[i].append(j)
degree[j] += degree[i]
else:
continue
if len(level_list[cur_level]) == 0:
del level_list[cur_level]
cur_level -= 1
for l in range(cur_level, 0, -1):
cur_nodes = level_list[l]
for cur_node in cur_nodes:
for child_node in child[cur_node]:
cur_edge = frozenset((cur_node, child_node))
score[cur_node] += bet_dic[cur_edge]
sum_degree = 0
for parent_node in parent[cur_node]:
sum_degree += degree[parent_node]
for parent_node in parent[cur_node]:
cur_edge = frozenset((parent_node, cur_node))
if cur_edge not in bet_dic:
bet_dic[cur_edge] = 0
bet_dic[cur_edge] += score[cur_node] * degree[parent_node] / sum_degree
for e in bet_dic:
if e not in self.new_bet_dic:
self.new_bet_dic[e] = bet_dic[e]
else:
self.new_bet_dic[e] +=bet_dic[e]
def calc_bet(self):
for i in range(self.num_nodes):
# if i % 500 == 0:
# print("betweeness in progress: ", i)
self.bfs(i)
cur_best = 0
best_key = None
for key in self.new_bet_dic:
if self.new_bet_dic[key] > cur_best:
cur_best = self.new_bet_dic[key]
best_key = key
print(best_key, cur_best)
return list(best_key)
class BetweenessAppro:
def __init__(self, network, num_nodes):
self.network = network
self.num_nodes = num_nodes
self.new_bet_dic = {}
self.took = {}
self.done = {}
def bfs(self, cur_node):
q = [cur_node]
degree = {}
degree[cur_node] = 1
parent = {}
child = {}
level = {}
child[cur_node] = []
level[cur_node] = 0
parent[cur_node] = [-1]
level_list = {}
level_list[0] = [cur_node]
score = {}
score[0] = 1
cur_level = 0
bet_dic = {}
while len(q) != 0:
i = q.pop(0)
if level[i] == cur_level:
cur_level += 1
if cur_level not in level_list:
level_list[cur_level] = []
if i not in self.network.adj_dic:
print("No edge: ", i)
continue
for j in self.network.adj_dic[i]:
if j not in level:
level_list[cur_level].append(j)
level[j] = cur_level
parent[j] = [i]
child[j] = []
child[i].append(j)
q.append(j)
degree[j] = degree[i]
score[j] = 1
elif level[j] == cur_level:
parent[j].append(i)
child[i].append(j)
degree[j] += degree[i]
else:
continue
if len(level_list[cur_level]) == 0:
del level_list[cur_level]
cur_level -= 1
for l in range(cur_level, 0, -1):
cur_nodes = level_list[l]
for cur_node in cur_nodes:
for child_node in child[cur_node]:
cur_edge = frozenset((cur_node, child_node))
score[cur_node] += bet_dic[cur_edge]
sum_degree = 0
for parent_node in parent[cur_node]:
sum_degree += degree[parent_node]
for parent_node in parent[cur_node]:
cur_edge = frozenset((parent_node, cur_node))
if cur_edge not in bet_dic:
bet_dic[cur_edge] = 0
bet_dic[cur_edge] += score[cur_node] * degree[parent_node] / sum_degree
for e in bet_dic:
if e not in self.new_bet_dic:
self.new_bet_dic[e] = bet_dic[e]
if self.new_bet_dic[e] < 5 * self.num_nodes:
self.done[e] = 1
else:
self.new_bet_dic[e] +=bet_dic[e]
if e in self.done:
if self.new_bet_dic[e] >= 5 * self.num_nodes:
del self.done[e]
def sample(self):
res = random.randint(0, self.num_nodes - 1)
while res in self.took:
res = random.randint(0, self.num_nodes - 1)
self.took[res] = 1
return res
def calc_bet(self):
for k in range(self.num_nodes // 10): #k = |V|/10
self.bfs(self.sample())
if len(self.done) == 0:
break #delta(e) >=c|V|
cur_best = 0
best_key = None
for key in self.new_bet_dic:
if self.new_bet_dic[key] > cur_best:
cur_best = self.new_bet_dic[key]
best_key = key
return list(best_key)
| 32.969543
| 91
| 0.464049
|
46cc91cff768279963449c768dbc47b27a2f17c8
| 242,548
|
py
|
Python
|
eutester/aws/ec2/ec2ops.py
|
nephomaniac/eutester
|
cc71456dd37930096cb8d0a6cac8b42fc8479543
|
[
"BSD-2-Clause"
] | null | null | null |
eutester/aws/ec2/ec2ops.py
|
nephomaniac/eutester
|
cc71456dd37930096cb8d0a6cac8b42fc8479543
|
[
"BSD-2-Clause"
] | null | null | null |
eutester/aws/ec2/ec2ops.py
|
nephomaniac/eutester
|
cc71456dd37930096cb8d0a6cac8b42fc8479543
|
[
"BSD-2-Clause"
] | null | null | null |
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2014, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: vic.iglesias@eucalyptus.com
import re
import os
import copy
import socket
import hmac
import hashlib
import base64
from prettytable import PrettyTable, ALL
import time
import types
import traceback
from datetime import datetime, timedelta
from subprocess import Popen, PIPE
from boto.ec2.image import Image
from boto.ec2.instance import Reservation, Instance
from boto.ec2.keypair import KeyPair
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.ec2.volume import Volume
from boto.ec2.bundleinstance import BundleInstanceTask
from boto.exception import EC2ResponseError
from boto.ec2.regioninfo import RegionInfo
from boto.resultset import ResultSet
from boto.ec2.securitygroup import SecurityGroup, IPPermissions
from boto.ec2.address import Address
from boto.vpc.subnet import Subnet as BotoSubnet
from boto.vpc import VPCConnection
import boto
from eutester import TestConnection
from cloud_utils.net_utils import sshconnection
from cloud_utils.log_utils import printinfo, get_traceback
from eutester.aws.ec2.euinstance import EuInstance
from eutester.aws.ec2.windows_instance import WinInstance
from eutester.aws.ec2.euvolume import EuVolume
from eutester.aws.ec2.eusnapshot import EuSnapshot
from eutester.aws.ec2.euzone import EuZone
from eutester.aws.ec2.conversiontask import ConversionTask
class EucaSubnet(BotoSubnet):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self._defaultForAz = None
self._mapPublicIpOnLaunch = None
@property
def defaultForAz(self):
return self._defaultForAz
@defaultForAz.setter
def defaultForAz(self, value):
if re.search('true', value, flags=re.IGNORECASE):
self._defaultForAz = True
else:
self._defaultForAz = False
@property
def mapPublicIpOnLaunch(self):
return self._mapPublicIpOnLaunch
@mapPublicIpOnLaunch.setter
def mapPublicIpOnLaunch(self, value):
if re.search('true', value, flags=re.IGNORECASE):
self._mapPublicIpOnLaunch = True
else:
self._mapPublicIpOnLaunch = False
def endElement(self, name, value, connection):
BotoSubnet.endElement(self, name, value, connection)
if name == 'mapPublicIpOnLaunch':
self.mapPublicIpOnLaunch = value
elif name == 'defaultForAz':
self.defaultForAz = value
EC2RegionData = {
'us-east-1': 'ec2.us-east-1.amazonaws.com',
'us-west-1': 'ec2.us-west-1.amazonaws.com',
'eu-west-1': 'ec2.eu-west-1.amazonaws.com',
'ap-northeast-1': 'ec2.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'ec2.ap-southeast-1.amazonaws.com'}
class EC2ops(VPCConnection, TestConnection):
enable_root_user_data = """#cloud-config
disable_root: false"""
EUCARC_URL_NAME = 'ec2_url'
def __init__(self, eucarc=None, credpath=None,
aws_access_key_id=None, aws_secret_access_key=None,
is_secure=False, port=None, host=None, region=None, endpoint=None,
boto_debug=0, path=None, APIVersion=None, validate_certs=None,
test_resources=None, logger=None):
# Init test connection first to sort out base parameters...
TestConnection.__init__(self,
eucarc=eucarc,
credpath=credpath,
test_resources=test_resources,
logger=logger,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
is_secure=is_secure,
port=port,
host=host,
APIVersion=APIVersion,
validate_certs=validate_certs,
boto_debug=boto_debug,
test_resources=test_resources,
path=path)
self.key_dir = "./"
self.ec2_source_ip = None #Source ip on local test machine used to reach instances
if self.boto_debug:
self.show_connection_kwargs()
# Init IAM connection...
try:
VPCConnection.__init__(self, **self._connection_kwargs)
except:
self.show_connection_kwargs()
raise
def create_tags(self, resource_ids, tags):
"""
Add tags to the given resource
:param resource_ids: List of resources IDs to tag
:param tags: Dict of key value pairs to add, for just a name include a key with a '' value
"""
self.logger.debug("Adding the following tags:" + str(tags))
self.logger.debug("To Resources: " + str(resource_ids))
self.create_tags(resource_ids=resource_ids, tags=tags)
def delete_tags(self, resource_ids, tags):
"""
Add tags to the given resource
:param resource_ids: List of resources IDs to tag
:param tags: Dict of key value pairs to add, for just a name include a key with a '' value
"""
self.logger.debug("Deleting the following tags:" + str(tags))
self.logger.debug("From Resources: " + str(resource_ids))
self.delete_tags(resource_ids=resource_ids, tags=tags)
def add_keypair(self, key_name=None):
"""
Add a keypair with name key_name unless it already exists
:param key_name: The name of the keypair to add and download.
"""
if key_name is None:
key_name = "keypair-" + str(int(time.time()))
self.logger.debug("Looking up keypair " + key_name)
key = []
try:
key = self.get_all_key_pairs(keynames=[key_name])
except EC2ResponseError:
pass
if not key:
self.logger.debug('Creating keypair: %s' % key_name)
# Create an SSH key to use when logging into instances.
key = self.create_key_pair(key_name)
# AWS will store the public key but the private key is
# generated and returned and needs to be stored locally.
# The save method will also chmod the file to protect
# your private key.
key.save(self.key_dir)
#Add the fingerprint header to file
keyfile = open(self.key_dir + key.name + '.pem', 'r')
data = keyfile.read()
keyfile.close()
keyfile = open(self.key_dir+key.name+'.pem', 'w')
keyfile.write('KEYPAIR ' + str(key.name) + ' '+str(key.fingerprint)+"\n")
keyfile.write(data)
keyfile.close()
self.test_resources["keypairs"].append(key)
return key
else:
self.logger.debug("Key " + key_name + " already exists")
def verify_local_keypath(self, keyname, path=None, exten=".pem"):
"""
Convenience function to verify if a given ssh key 'keyname' exists on the local server at 'path'
:returns: the keypath if the key is found.
>>> instance= self.get_instances(state='running')[0]
>>> keypath = self.get_local_keypath(instance.key_name)
"""
if path is None:
path = os.getcwd()
keypath = path + "/" + keyname + exten
try:
os.stat(keypath)
self.logger.debug("Found key at path:"+str(keypath))
except:
raise Exception("key not found at the provided path:"+str(keypath))
return keypath
@printinfo
def get_all_current_local_keys(self, path=None, exten=".pem"):
"""
Convenience function to provide a list of all keys in the local dir at 'path' that exist on the server to help
avoid producing additional keys in test dev.
:param path: Filesystem path to search in
:param exten: extension of private key file
:return: list of key names
"""
keylist = []
keys = self.get_all_key_pairs()
keyfile = None
for k in keys:
self.logger.debug('Checking local path:' + str(path) + " for keyfile: " + str(k.name) + str(exten))
try:
#will raise exception if keypath is not found
keypath = self.verify_local_keypath(k.name, path, exten)
if not keypath:
continue
keyfile = open(keypath,'r')
for line in keyfile.readlines():
if re.search('KEYPAIR',line):
fingerprint = line.split()[2]
break
keyfile.close()
if fingerprint == k.fingerprint:
self.logger.debug('Found file with matching finger print for key:'+k.name)
keylist.append(k)
except:
self.logger.debug('Did not find local match for key:'+str(k.name))
finally:
if keyfile and not keyfile.closed:
keyfile.close()
return keylist
def delete_keypair(self,keypair):
"""
Delete the keypair object passed in and check that it no longer shows up
:param keypair: Keypair object to delete and check
:return: boolean of whether the operation succeeded
"""
name = keypair.name
self.logger.debug( "Sending delete for keypair: " + name)
keypair.delete()
try:
keypair = self.get_all_key_pairs(keynames=[name])
except EC2ResponseError:
keypair = []
if len(keypair) > 0:
self.fail("Keypair found after attempt to delete it")
return False
return True
@printinfo
def get_windows_instance_password(self,
instance,
private_key_path=None,
key=None,
dir=None,
exten=".pem",
encoded=True):
"""
Get password for a windows instance.
:param instance: euinstance object
:param private_key_path: private key file used to decrypt password
:param key: name of private key
:param dir: Path to private key
:param exten: extension of private key
:param encoded: boolean of whether string returned from server is Base64 encoded
:return: decrypted password
:raise: Exception when private key cannot be found on filesystem
"""
self.logger.debug("get_windows_instance_password, instance:"+str(instance.id)+", keypath:"+str(private_key_path)+
", dir:"+str(dir)+", exten:"+str(exten)+", encoded:"+str(encoded))
key = key or self.get_keypair(instance.key_name)
if private_key_path is None and key is not None:
private_key_path = str(self.verify_local_keypath(key.name, dir, exten))
if not private_key_path:
raise Exception('get_windows_instance_password, keypath not found?')
encrypted_string = self.get_password_data(instance.id)
if encoded:
string_to_decrypt = base64.b64decode(encrypted_string)
else:
string_to_decrypt = encrypted_string
popen = Popen(['openssl', 'rsautl', '-decrypt', '-inkey',
private_key_path, '-pkcs'], stdin=PIPE, stdout=PIPE)
(stdout, _) = popen.communicate(string_to_decrypt)
return stdout
@printinfo
def add_group(self, group_name=None, description=None, fail_if_exists=False ):
"""
Add a security group to the system with name group_name, if it exists dont create it
:param group_name: Name of the security group to create
:param fail_if_exists: IF set, will fail if group already exists, otherwise will return the existing group
:return: boto group object upon success or None for failure
"""
if group_name is None:
group_name = "group-" + str(int(time.time()))
if self.check_group(group_name):
if fail_if_exists:
self.fail( "Group " + group_name + " already exists")
else:
self.logger.debug( "Group " + group_name + " already exists")
group = self.get_all_security_groups(group_name)[0]
return self.get_security_group(name=group_name)
else:
self.logger.debug( 'Creating Security Group: %s' % group_name)
# Create a security group to control access to instance via SSH.
if not description:
description = group_name
group = self.create_security_group(group_name, description)
self.test_resources["security-groups"].append(group)
return self.get_security_group(name=group_name)
def delete_group(self, group):
"""
Delete the security group object passed in and check that it no longer shows up
:param group: Group object to delete and check
:return: bool whether operation succeeded
"""
name = group.name
self.logger.debug( "Sending delete for security group: " + name )
group.delete()
if self.check_group(name):
self.fail("Group still found after attempt to delete it")
return False
return True
def check_group(self, group_name):
"""
Check if a group with group_name exists in the system
:param group_name: Group name to check for existence
:return: bool whether operation succeeded
"""
self.logger.debug( "Looking up group " + group_name )
try:
group = self.get_all_security_groups(groupnames=[group_name])
except EC2ResponseError:
return False
if not group:
return False
else:
return True
@printinfo
def authorize_group_by_name(self,
group_name="default",
port=22,
end_port=None,
protocol="tcp",
cidr_ip="0.0.0.0/0",
src_security_group=None,
src_security_group_name=None,
src_security_group_owner_id=None,
force_args=False):
"""
Authorize the group with group_name
:param group_name: Name of the group to authorize, default="default"
:param port: Port to open, default=22
:param end_port: End of port range to open, defaults to 'port' arg.
:param protocol: Protocol to authorize, default=tcp
:param cidr_ip: CIDR subnet to authorize, default="0.0.0.0/0" everything
:param src_security_group_name: Grant access to 'group' from src_security_group_name, default=None
:return:
"""
if not force_args:
if src_security_group or src_security_group_name:
cidr_ip=None
port=None
protocol=None
if src_security_group:
src_security_group_owner_id= src_security_group_owner_id or src_security_group.owner_id
src_security_group_name = src_security_group_name or src_security_group.name
if src_security_group_name and not src_security_group_owner_id:
group = self.get_security_group(name=src_security_group_name)
src_security_group_owner_id = group.owner_id
if end_port is None:
end_port = port
old_api_version = self.APIVersion
try:
#self.ec2.APIVersion = "2009-10-31"
if src_security_group_name:
self.logger.debug( "Attempting authorization of: {0}, from group:{1},"
" on port range: {2} to {3}, proto:{4}"
.format(group_name, src_security_group, port,
end_port, protocol))
else:
self.logger.debug( "Attempting authorization of:{0}, on port "
"range: {1} to {2}, proto:{3} from {4}"
.format(group_name, port, end_port,
protocol, cidr_ip))
self.authorize_security_group_deprecated(group_name,
ip_protocol=protocol,
from_port=port,
to_port=end_port,
cidr_ip=cidr_ip,
src_security_group_name=src_security_group_name,
src_security_group_owner_id=src_security_group_owner_id)
return True
except self.ResponseError, e:
if e.code == 'InvalidPermission.Duplicate':
self.logger.debug( 'Security Group: %s already authorized' % group_name )
else:
raise
finally:
self.APIVersion = old_api_version
def authorize_group(self,
group,
port=22,
end_port=None,
protocol="tcp",
cidr_ip="0.0.0.0/0",
src_security_group=None,
src_security_group_name=None,
src_security_group_owner_id=None,
force_args=False):
"""
Authorize the boto.group object
:param group: boto.group object
:param port: Port to open, default=22
:param end_port: End of port range to open, defaults to 'port' arg.
:param protocol: Protocol to authorize, default=tcp
:param cidr_ip: CIDR subnet to authorize, default="0.0.0.0/0" everything
:param src_security_group_name: Grant access to 'group' from src_security_group_name, default=None
:param force_args: boolean to send arguments w/o the test method sanitizing them
:return: True on success
:raise: Exception if operation fails
"""
return self.authorize_group_by_name(group.name,
port=port,
end_port=end_port,
protocol=protocol,
cidr_ip=cidr_ip,
src_security_group=src_security_group,
src_security_group_name=src_security_group_name,
src_security_group_owner_id=src_security_group_owner_id,
force_args=force_args)
def revoke_all_rules(self, group):
if not isinstance(group, SecurityGroup):
group = self.get_security_group(name=group)
else:
# group obj does not have update() yet...
group = self.get_security_group(id=group.id)
if not group:
raise ValueError('Security group "{0}" not found'.format(group))
self.show_security_group(group)
assert isinstance(group, SecurityGroup)
rules = copy.copy(group.rules)
for r in rules:
self.logger.debug('Attempting to revoke rule:{0}, grants:{1}'
.format(r, r.grants))
assert isinstance(r, IPPermissions)
for grant in r.grants:
if grant.cidr_ip:
self.logger.debug('{0}.revoke(ip_protocol:{1}, from_port:{2}, '
'to_port{3}, cidr_ip:{4})'.format(group.name,
r.ip_protocol,
r.from_port,
r.to_port,
grant))
group.revoke(ip_protocol=r.ip_protocol, from_port=r.from_port,
to_port=r.to_port, cidr_ip=grant.cidr_ip)
if grant.name or grant.group_id:
group.revoke(ip_protocol=r.ip_protocol,
from_port=r.from_port,
to_port=r.to_port,
src_group=grant,
cidr_ip=None )
self.logger.debug('{0}.revoke(ip_protocol:{1}, from_port:{2}, '
'to_port:{3}, src_group:{4})'.format(group.name,
r.ip_protocol,
r.from_port,
r.to_port,
grant))
group = self.get_security_group(id=group.id)
self.logger.debug('AFTER removing all rules...')
self.show_security_group(group)
return group
def show_security_group(self, group):
try:
from prettytable import PrettyTable, ALL
except ImportError as IE:
self.logger.debug('No pretty table import failed:' + str(IE))
return
group = self.get_security_group(id=group.id)
if not group:
raise ValueError('Show sec group failed. Could not fetch group:'
+ str(group))
header = PrettyTable(["Security Group:" + group.name + "/" + group.id])
table = PrettyTable(["CIDR_IP", "SRC_GRP_NAME",
"SRC_GRP_ID", "OWNER_ID", "PORT",
"END_PORT", "PROTO"])
table.align["CIDR_IP"] = 'l'
table.padding_width = 1
for rule in group.rules:
port = rule.from_port
end_port = rule.to_port
proto = rule.ip_protocol
for grant in rule.grants:
table.add_row([grant.cidr_ip, grant.name,
grant.group_id, grant.owner_id, port,
end_port, proto])
table.hrules = ALL
header.add_row([str(table)])
self.logger.debug("\n{0}".format(str(header)))
def revoke(self, group,
port=22,
protocol="tcp",
cidr_ip="0.0.0.0/0",
src_security_group_name=None,
src_security_group_owner_id=None):
if isinstance(group, SecurityGroup):
group_name = group.name
else:
group_name = group
if src_security_group_name:
self.logger.debug( "Attempting revoke of " + group_name + " from " + str(src_security_group_name) +
" on port " + str(port) + " " + str(protocol) )
else:
self.logger.debug( "Attempting revoke of " + group_name + " on port " + str(port) + " " + str(protocol) )
self.revoke_security_group(group_name,
ip_protocol=protocol,
from_port=port,
to_port=port,
cidr_ip=cidr_ip,
src_security_group_name=src_security_group_name,
src_security_group_owner_id=src_security_group_owner_id)
def terminate_single_instance(self, instance, timeout=300 ):
"""
Terminate an instance
:param instance: boto.instance object to terminate
:param timeout: Time in seconds to wait for terminated state
:return: True on success
"""
instance.terminate()
return self.wait_for_instance(instance, state='terminated', timeout=timeout)
def wait_for_instance(self,instance, state="running", poll_count = None, timeout=480):
"""
Wait for the instance to enter the state
:param instance: Boto instance object to check the state on
:param state: state that we are looking for
:param poll_count: Number of 10 second poll intervals to wait before failure (for legacy test script support)
:param timeout: Time in seconds to wait before failure
:return: True on success
:raise: Exception when instance does not enter proper state
"""
if poll_count is not None:
timeout = poll_count*10
self.logger.debug( "Beginning poll loop for instance " + str(instance) + " to go to " + str(state) )
instance.update()
instance_original_state = instance.state
start = time.time()
elapsed = 0
### If the instance changes state or goes to the desired state before my poll count is complete
while( elapsed < timeout ) and (instance.state != state) and (instance.state != 'terminated'):
#poll_count -= 1
self.logger.debug( "Instance("+instance.id+") State("+instance.state+"), elapsed:"+str(elapsed)+"/"+str(timeout))
time.sleep(10)
instance.update()
elapsed = int(time.time()- start)
if instance.state != instance_original_state:
break
self.logger.debug("Instance("+instance.id+") State("+instance.state+") time elapsed (" +str(elapsed).split('.')[0]+")")
if instance.state != state:
raise Exception( str(instance) + " did not enter "+str(state)+" state after elapsed:"+str(elapsed))
self.logger.debug( str(instance) + ' is now in ' + instance.state )
return True
def wait_for_reservation(self,reservation, state="running",timeout=480):
"""
Wait for an entire reservation to enter the state
:param reservation: Boto reservation object to check the state on
:param state: state that we are looking for
:param timeout: How long in seconds to wait for state
:return: True on success
"""
aggregate_result = True
instance_list = reservation
if isinstance(reservation, Reservation):
instance_list = reservation.instances
self.logger.debug( "Beginning poll loop for the " + str(len(instance_list)) + " instance found in " + str(instance_list) )
for instance in instance_list:
if not self.wait_for_instance(instance, state, timeout=timeout):
aggregate_result = False
return aggregate_result
@printinfo
def create_volume(self, zone, size=1, eof=True, snapshot=None, timeout=0, poll_interval=10,timepergig=120):
"""
Create a new EBS volume then wait for it to go to available state, size or snapshot is mandatory
:param zone: Availability zone to create the volume in
:param size: Size of the volume to be created
:param count: Number of volumes to be created
:param eof: Boolean, indicates whether to end on first instance of failure
:param snapshot: Snapshot to create the volume from
:param timeout: Time to wait before failing. timeout of 0 results in size of volume * timepergig seconds
:param poll_interval: How often in seconds to poll volume state
:param timepergig: Time to wait per gigabyte size of volume, used when timeout is set to 0
:return:
"""
return self.create_volumes(zone, size=size, count=1, mincount=1, eof=eof, snapshot=snapshot, timeout=timeout, poll_interval=poll_interval,timepergig=timepergig)[0]
@printinfo
def create_volumes(self,
zone,
size = 1,
count = 1,
mincount = None,
eof = True,
monitor_to_state = 'available',
delay = 0,
snapshot = None,
timeout=0,
poll_interval = 10,
timepergig = 120 ):
"""
Definition:
Create a multiple new EBS volumes then wait for them to go to available state,
size or snapshot is mandatory
:param zone: Availability zone to create the volume in
:param size: Size of the volume to be created
:param count: Number of volumes to be created
:param mincount: Minimum number of volumes to be created to be considered a success.Default = 'count'
:param eof: Boolean, indicates whether to end on first instance of failure
:param monitor_to_state: String, if not 'None' will monitor created volumes to the provided state
:param snapshot: Snapshot to create the volume from
:param timeout: Time to wait before failing. timeout of 0 results in size of volume * timepergig seconds
:param poll_interval: How often in seconds to poll volume state
:param timepergig: Time to wait per gigabyte size of volume, used when timeout is set to 0
:return: list of volumes
"""
start = time.time()
elapsed = 0
volumes = []
mincount = mincount or count
if mincount > count:
raise Exception('Mincount can not be greater than count')
#if timeout is set to 0, use size to create a reasonable timeout for this volume creation
if timeout == 0:
if snapshot is not None:
timeout = timepergig * int(snapshot.volume_size)
else:
timeout = timepergig * size
if snapshot and not hasattr(snapshot,'eutest_volumes'):
snapshot = self.get_snapshot(snapshot.id)
self.logger.debug( "Sending create volume request, count:"+str(count) )
for x in xrange(0,count):
vol = None
try:
cmdstart = time.time()
vol = self.create_volume(size, zone, snapshot)
cmdtime = time.time() - cmdstart
if vol:
vol = EuVolume.make_euvol_from_vol(vol, tester=self, cmdstart=cmdstart)
vol.eutest_cmdstart = cmdstart
vol.eutest_createorder = x
vol.eutest_cmdtime = "{0:.2f}".format(cmdtime)
vol.size = size
volumes.append(vol)
except Exception, e:
if eof:
#Clean up any volumes from this operation and raise exception
for vol in volumes:
vol.delete()
raise e
else:
self.logger.debug("Caught exception creating volume,eof is False, continuing. Error:"+str(e))
if delay:
time.sleep(delay)
if len(volumes) < mincount:
#Clean up any volumes from this operation and raise exception
for vol in volumes:
vol.delete()
raise Exception("Created "+str(len(volumes))+"/"+str(count)+
' volumes. Less than minimum specified:'+str(mincount))
self.logger.debug( str(len(volumes))+"/"+str(count)+" requests for volume creation succeeded." )
if volumes:
self.print_euvolume_list(volumes)
if not monitor_to_state:
self.test_resources["volumes"].extend(volumes)
if snapshot:
snapshot.eutest_volumes.extend(volumes)
return volumes
#If we begain the creation of the min volumes, monitor till completion, otherwise cleanup and fail out
retlist = self.monitor_created_euvolumes_to_state(volumes,
eof=eof,
mincount=mincount,
state=monitor_to_state,
poll_interval=poll_interval,
timepergig=timepergig)
self.test_resources["volumes"].extend(retlist)
if snapshot:
snapshot.eutest_volumes.extend(retlist)
return retlist
@printinfo
def monitor_created_euvolumes_to_state(self,
volumes,
eof=True,
mincount=None,
state='available',
poll_interval=10,
deletefailed=True,
size=1,
timepergig=120):
"""
Description:
Monitors a list of created volumes until 'state' or failure. Allows for a variety of volumes, using
different types and creation methods to be monitored by a central method.
:param volumes: list of created volumes
:param eof: boolean, if True will end on first failure
:param mincount: minimum number of successful volumes, else fail
:param state: string indicating the expected state to monitor to
:param deletefailed: delete all failed volumes, in eof case deletes 'volumes' list.
In non-eof, if mincount is met, will delete any failed volumes.
:param timepergig: integer, time allowed per gig before failing.
:param poll_interval: int seconds to wait between polling for status
:param size: int size in gigs to request for volume creation
"""
retlist = []
failed = []
elapsed = 0
if not volumes:
raise Exception("Volumes list empty in monitor_created_volumes_to_state")
count = len(volumes)
mincount = mincount or count
self.logger.debug("Monitoring "+str(count)+" volumes for at least "+str(mincount)+" to reach state:"+str(state))
origlist = copy.copy(volumes)
self.logger.debug("Monitoring "+str(count)+" volumes for at least "+str(mincount)+" to reach state:"+str(state))
for volume in volumes:
if not isinstance(volume, EuVolume):
raise Exception("object not of type EuVolume. Found type:"+str(type(volume)))
#volume = EuVolume()
# Wait for the volume to be created.
self.logger.debug( "Polling "+str(len(volumes))+" volumes for status:\""+str(state)+"\"...")
start = time.time()
while volumes:
for volume in volumes:
volume.update()
voltimeout = timepergig * (volume.size or size)
elapsed = time.time()-start
self.logger.debug("Volume #"+str(volume.eutest_createorder)+" ("+volume.id+") State("+volume.status+
"), seconds elapsed: " + str(int(elapsed))+'/'+str(voltimeout))
if volume.status == state:
#add to return list and remove from volumes list
retlist.append(volumes.pop(volumes.index(volume)))
else:
if elapsed > voltimeout:
volume.status = 'timed-out'
if volume.status == 'failed' or volume.status == 'timed-out':
if eof:
#Clean up any volumes from this operation and raise exception
self.logger.debug(str(volume.id) + " - Failed current status:" + str(volume.status))
if deletefailed:
self.logger.debug('Failure caught in monitor volumes, attempting to delete all volumes...')
for vol in origlist:
try:
self.delete_volume(vol)
except Exception, e:
self.logger.debug('Could not delete volume:'+str(vol.id)+", err:"+str(e))
raise Exception(str(volume) + ", failed to reach state:"+str(state)+", vol status:"+
str(volume.eutest_laststatus)+", test status:"+str(vol.status))
else:
#End on failure is not set, so record this failure and move on
msg = str(volume) + " went to: " + volume.status
self.logger.debug(msg)
volume.eutest_failmsg = msg
failed.append(volumes.pop(volumes.index(volume)))
#Fail fast if we know we've exceeded our mincount already
if (count - len(failed)) < mincount:
if deletefailed:
buf = ""
for failedvol in failed:
retlist.remove(failedvol)
buf += str(failedvol.id)+"-state:"+str(failedvol.status) + ","
self.logger.debug(buf)
for vol in origlist:
self.logger.debug('Failure caught in monitor volumes, attempting to delete all volumes...')
try:
self.delete_volume(vol)
except Exception, e:
self.logger.debug('Could not delete volume:'+str(vol.id)+", err:"+str(e))
raise Exception("Mincount of volumes did not enter state:"+str(state)+" due to faults")
self.logger.debug("----Time Elapsed:"+str(int(elapsed))+", Waiting on "+str(len(volumes))+
" volumes to enter state:"+str(state)+"-----")
if volumes:
time.sleep(poll_interval)
else:
break
#We have at least mincount of volumes, delete any failed volumes
if failed and deletefailed:
self.logger.debug( "Deleting volumes that never became available...")
for volume in failed:
self.logger.debug('Failure caught in monitor volumes, attempting to delete all volumes...')
try:
self.delete_volume(volume)
except Exception, e:
self.logger.debug('Could not delete volume:'+str(volume.id)+", err:"+str(e))
buf = str(len(failed))+'/'+str(count)+ " Failed volumes after " +str(elapsed)+" seconds:"
for failedvol in failed:
retlist.remove(failedvol)
buf += str(failedvol.id)+"-state:"+str(failedvol.status)+","
self.logger.debug(buf)
self.print_euvolume_list(origlist)
return retlist
@printinfo
def monitor_euvolumes_to_status(self,
euvolumes,
status = None,
attached_status = None,
poll_interval=10,
timeout=180,
eof=True,
validate_args=True):
"""
(See: monitor_created_euvolumes_to_state() if monitoring newly created volumes, otherwise this method is
intended for monitoring attached and in-use states of volume(s). )
Definition: monitors a list of euvolumes to a given state.
Some example valid states:
status = available, attached_status = None
status = in-use, attached_status = attached, attaching, detaching
:param euvolumes: list of euvolumes to monitor
:param status: state of volume expected: ie 'in-use', 'available', 'deleted'
:param attached_status: state of volume's attached data. ie 'attached', 'attaching', 'detaching', 'none'
:param poll_interval: integer seconds between polling for status updates
:param timeout: time to wait before failing
:param eof: exit on first failure encountered, otherwise wait until other volumes pass/fail. Default=True
:param validate_args: boolean, Will check args for a valid status/available_status pair.
If False will monitor to a non-valid state for testing purposes
"""
good = []
failed = []
monitor = []
failmsg = ""
self.logger.debug('Monitor_euvolumes_to_state:'+str(status)+"/"+str(attached_status))
if attached_status and not status:
status = 'in-use'
#check for valid states in given arguments...
if validate_args:
if (status != 'available') and (status != 'in-use') and (status != 'deleted') and (status != 'failed'):
raise Exception('Invalid volume states in monitor request:'+str(status)+" != in-use or available")
if attached_status is None:
if status != 'available':
raise Exception('Invalid volume states in monitor request:'+str(status)+"/"+str(attached_status))
else:
if (attached_status == 'attached') or (attached_status == 'attaching') or \
(attached_status == 'detaching') or (attached_status == 'detaching'):
if status != 'in-use':
raise Exception('Invalid volume states in monitor request:'+str(status)+"/"+str(attached_status))
else:
raise Exception('Invalid volume states in monitor request:'+str(status)+"/"+str(attached_status)+
" != attached, attaching, detaching")
start = time.time()
elapsed = 0
self.logger.debug('Updating volume list before monitoring...')
for vol in euvolumes:
try:
vol = self.get_volume(vol.id)
if not isinstance(vol, EuVolume):
vol = EuVolume.make_euvol_from_vol(vol,self)
monitor.append(vol)
except:
self.logger.debug(get_traceback())
self.print_euvolume_list(monitor)
while monitor and (elapsed < timeout):
elapsed = int(time.time()-start)
for vol in monitor:
last_attached_status = vol.eutest_attached_status
vol.update()
if vol.eutest_attached_instance_id:
instance_debug_str = ', (att_instance'+str(vol.eutest_attached_instance_id)+")"
else:
instance_debug_str = ""
self.logger.debug("Monitoring volume:"+str(vol.id)+". Currently state/attached_state:'"+str(vol.status)
+ "/" + str(vol.eutest_attached_status)+"', needed: '"+str(status)+"/"+str(attached_status)+
"'"+instance_debug_str)
#fail fast for improper state transitions when attaching:
if attached_status and last_attached_status and not vol.eutest_attached_status:
failmsg += str(vol.id)+" - state:"+str(vol.status)+", reverted from attached state:'"\
+str(last_attached_status)+"' to '"+str(vol.eutest_attached_status)+"', elapsed:" \
+str(elapsed)+"/"+str(timeout)+"\n"
if eof:
raise VolumeStateException(failmsg)
else:
failed.append(monitor.pop(monitor.index(vol)))
continue
if (vol.status == 'deleted' and status != 'deleted') or (vol.status == 'failed' and status != 'failed'):
failmsg += str(vol.id)+" - detected error in state:'"+str(vol.status)+\
"/"+str(vol.eutest_attached_status)+"'"+str(elapsed)+"/"+str(timeout)+"\n"
if eof:
raise Exception(failmsg)
else:
failed.append(monitor.pop(monitor.index(vol)))
continue
if vol.status == status:
if vol.eutest_attached_status == attached_status:
good.append(monitor.pop(monitor.index(vol)))
self.logger.debug('Waiting for '+str(len(monitor))+ " remaining Volumes. Sleeping for poll_interval: "
+str(poll_interval)+" seconds ...")
self.print_euvolume_list(euvolumes)
time.sleep(poll_interval)
self.logger.debug('Done with monitor volumes after '+str(elapsed)+"/"+str(timeout)+"...")
self.print_euvolume_list(euvolumes)
if monitor:
for vol in monitor:
failmsg += str(vol.id)+" -TIMED OUT current state/attached_state:'" \
+str(vol.status) + "/" + str(vol.eutest_attached_status) + "' ! = '" \
+ str(status)+"/" + str(attached_status)+ "', elapsed:" \
+str(elapsed)+"/"+str(timeout)+"\n"
failed.extend(monitor)
#finally raise an exception if any failures were detected al long the way...
if failmsg:
self.print_euvolume_list(failed)
raise Exception(failmsg)
return good
def print_euvolume_list(self, euvolumelist=None):
"""
:param euvolumelist: list of euvolume
"""
buf=""
euvolumes = []
if not euvolumelist:
euvolumelist = self.get_volumes()
if not euvolumelist:
self.logger.debug('No volumes to print')
return
for volume in euvolumelist:
if not isinstance(volume, EuVolume):
self.logger.debug("object not of type EuVolume. Found type:"+str(type(volume)))
volume = EuVolume.make_euvol_from_vol(volume=volume, tester=self)
euvolumes.append(volume)
if not euvolumes:
return
volume = euvolumes.pop()
buf = volume.printself()
for volume in euvolumes:
buf += volume.printself(title=False)
self.logger.debug("\n"+str(buf)+"\n")
def print_eusnapshot_list(self,eusnapshots=None):
"""
:param eusnapshots: list of eusnapshots
"""
buf=""
print_list = []
if not eusnapshots:
eusnapshots = self.get_snapshots()
if not eusnapshots:
self.logger.debug('No snapshots to print')
return
for snapshot in eusnapshots:
if not isinstance(snapshot, EuSnapshot):
self.logger.debug("object not of type EuSnapshot. Found type:"+str(type(snapshot)))
snapshot = EuSnapshot.make_eusnap_from_snap(snapshot=snapshot, tester=self)
print_list.append(snapshot)
snapshot = print_list.pop()
buf = snapshot.printself()
for snapshot in print_list:
buf += snapshot.printself(title=False)
self.logger.debug("\n"+str(buf)+"\n")
def wait_for_volume(self, volume, status="available"):
def get_volume_state():
volume.update()
return volume.status
self.wait_for_result(get_volume_state, status)
def delete_volume(self, volume, poll_interval=10, timeout=180):
"""
Delete the EBS volume then check that it no longer exists
:param poll_interval: int seconds to wait between polls to server for status
:param timeout: int in seconds used for time to wait before failure
:param volume: Volume object to delete
:return: bool, success of the operation
"""
try:
self.delete_volume(volume.id)
except Exception, e:
self.logger.debug('Caught err while sending delete for volume:'+ str(volume.id) + " err:" + str(e))
self.logger.debug('Monitoring to deleted state after catching error...')
self.logger.debug( "Sent delete for volume: " + str(volume.id) + ", monitor to deleted state or failure" )
start = time.time()
elapsed = 0
volume_id = volume.id
volume.update()
while elapsed < timeout:
try:
chk_volume = self.get_volume(volume_id=volume_id)
if not chk_volume:
self.logger.debug(str(volume_id) + ', Volume no longer exists on system, deleted')
break
chk_volume.update()
self.logger.debug( str(chk_volume) + " in " + chk_volume.status + " sleeping:"+str(poll_interval)+", elapsed:"+str(elapsed))
if chk_volume.status == "deleted":
break
time.sleep(poll_interval)
elapsed = int(time.time()-start)
except EC2ResponseError as e:
if e.status == 400:
self.logger.debug(str(volume_id) + "no longer exists in system")
if volume in self.test_resources['volumes']:
self.test_resources['volumes'].remove(volume)
return True
else:
raise e
if volume in self.test_resources['volumes']:
self.test_resources['volumes'].remove(volume)
return True
if volume.status != 'deleted':
self.fail(str(volume) + " left in " + volume.status + ',elapsed:'+str(elapsed))
return False
return True
def delete_volumes(self, volume_list, poll_interval=10, force_send=False, timeout=180):
"""
Deletes a list of EBS volumes then checks for proper state transition
:param volume_list: List of volume objects to be deleted
:param poll_interval: integer, seconds between polls on volumes' state
:param timeout: integer time allowed before this method fails
"""
errmsg = ''
errlist = []
if volume_list:
vollist = copy.copy(volume_list)
else:
raise Exception("delete_volumes: volume_list was empty")
for volume in vollist:
try:
self.logger.debug( "Sending delete for volume: " + str(volume.id))
if volume in self.test_resources['volumes']:
self.test_resources['volumes'].remove(volume)
volumes = self.get_all_volumes([volume.id])
if len(volumes) == 1:
volume = volumes[0]
#previous_status = volume.status
#self.ec2.delete_volume(volume.id)
elif len(volumes) == 0:
vollist.remove(volume)
continue
previous_status = volume.status
self.delete_volume(volume.id)
except EC2ResponseError, be:
err = "ERROR: " + str(volume.id) + ", " + str(be.status)+ ", " + str(be.reason) + \
", " +str(be.error_message) + "\n"
if previous_status == 'deleting':
self.logger.debug(str(volume.id)+ ":" + str(previous_status) + ', err:' + str(err))
else:
errmsg += err
errlist.append(volume)
self.logger.debug(err)
for volume in errlist:
if volume in vollist:
vollist.remove(volume)
start = time.time()
elapsed = 0
while vollist and elapsed < timeout:
for volume in vollist:
volumes = self.get_all_volumes([volume.id])
if len(volumes) == 1:
volume = volumes[0]
elif len(volumes) == 0:
vollist.remove(volume)
self.logger.debug("Volume no longer found")
continue
self.logger.debug(str(volume) + " in " + volume.status)
if volume and volume.status == "deleted"and volume in vollist:
vollist.remove(volume)
if volume in self.test_resources['volumes']:
self.test_resources['volumes'].remove(volume)
elapsed = int(time.time()-start)
time.sleep(poll_interval)
self.logger.debug("---Waiting for:"+str(len(vollist))+" volumes to delete. Sleeping:"+
str(poll_interval)+", elapsed:"+str(elapsed)+"/"+str(timeout)+"---")
if vollist or errmsg:
for volume in vollist:
errmsg += "ERROR:"+str(volume) + " left in " + volume.status + ',elapsed:'+str(elapsed) + "\n"
raise Exception(errmsg)
def delete_all_volumes(self):
"""
Deletes all volumes on the cloud
"""
volumes = self.get_all_volumes()
self.delete_volumes(volumes)
@printinfo
def attach_volume(self, instance, volume, device_path, pause=10, timeout=120):
"""
Attach a volume to an instance
:param instance: instance object to attach volume to
:param volume: volume object to attach
:param device_path: device name to request on guest
:param pause: Time in seconds to wait before checking volume state
:param timeout: Total time in seconds to wait for volume to reach the attached state
:return:
:raise: Exception of failure to reach proper state or enter previous state
"""
self.logger.debug("Sending attach for " + str(volume) + " to be attached to " + str(instance) +
" at requested device " + device_path)
volume.attach(instance.id,device_path )
start = time.time()
elapsed = 0
volume.update()
status = ""
failmsg = ""
laststatus=None
while elapsed < timeout:
volume.update()
attach_status=None
if volume.attach_data is not None:
if re.search("attached",str(volume.attach_data.status)):
self.logger.debug(str(volume) + ", Attached: " + volume.status+ " - " +
str(volume.attach_data.status) + ", elapsed:"+str(elapsed))
return True
else:
attach_status = volume.attach_data.status
if attach_status:
laststatus = attach_status
elif laststatus and not attach_status:
failmsg += str(volume.id)+" - state:"+str(volume.status)+", reverted from attached state:'" \
+str(laststatus)+"' to '"+str(attach_status)+"', elapsed:" \
+str(elapsed)+"/"+str(timeout)+"\n"
raise VolumeStateException(failmsg)
self.logger.debug( str(volume) + ", state:" + volume.status+', attached status:'+str(attach_status) +
", elapsed:"+str(elapsed)+'/'+str(timeout))
self.sleep(pause)
elapsed = int(time.time()-start)
def detach_volume(self, volume, pause = 10, timeout=60):
"""
Detach a volume
:param volume: volume to detach
:param pause: Time in seconds to wait before checking volume state
:param timeout: Total time in seconds to wait for volume to reach the attached state
:return: True on success
"""
attach_data_status = None
instance_id = None
if volume is None:
raise Exception(str(volume) + " does not exist")
volume.detach()
self.logger.debug( "Sent detach for volume: " + volume.id + " which is currently in state: " + volume.status)
start = time.time()
elapsed = 0
while elapsed < timeout:
volume.update()
if volume.status != "in-use":
self.logger.debug(str(volume) + " left in " + volume.status)
return True
if volume.attach_data is not None:
attach_data_status = volume.attach_data.status
if not instance_id:
instance_id = volume.attach_data.instance_id
else:
attach_data_status = None
self.logger.debug( str(volume) + " state:" + volume.status + ", attached_data:"+
str(attach_data_status)+", pause:"+str(pause)+", instance:"+str(instance_id)+", elapsed:"+str(elapsed))
self.sleep(pause)
elapsed = int(time.time() - start)
raise Exception(str(volume.id)+':DETACH FAILED - Volume status remained at:'+
str(volume.status)+', attach_data_status:'+str(attach_data_status)+", instance: "+str(instance_id))
def get_volume_time_attached(self,volume):
"""
Get the seconds elapsed since the volume was attached.
:type volume: boto volume object
:param volume: The volume used to calculate the elapsed time since attached.
:rtype: integer
:returns: The number of seconds elapsed since this volume was attached.
"""
self.logger.debug("Getting time elapsed since volume attached...")
volume.update()
if volume.attach_data is None:
raise Exception('get_time_since_vol_attached: Volume '+str(volume.id)+" not attached")
#get timestamp from attach_data
attached_time = self.get_datetime_from_resource_string(volume.attach_data.attach_time)
#return the elapsed time in seconds
return time.mktime(datetime.utcnow().utctimetuple()) - time.mktime(attached_time.utctimetuple())
@classmethod
def get_volume_time_created(cls,volume):
"""
Get the seconds elapsed since the volume was created.
:type volume: boto volume object
:param volume: The volume used to calculate the elapsed time since created.
:rtype: integer
:returns: The number of seconds elapsed since this volume was created.
"""
volume.update()
#get timestamp from attach_data
create_time = cls.get_datetime_from_resource_string(volume.create_time)
#return the elapsed time in seconds
return time.mktime(datetime.utcnow().utctimetuple()) - time.mktime(create_time.utctimetuple())
@classmethod
def get_snapshot_time_started(cls,snapshot):
"""
Get the seconds elapsed since the snapshot was started.
:type snapshot: boto snapshot object
:param snapshot: The volume used to calculate the elapsed time since started.
:rtype: integer
:returns: The number of seconds elapsed since this snapshot was started.
"""
snapshot.update()
#get timestamp from attach_data
start_time = cls.get_datetime_from_resource_string(snapshot.start_time)
#return the elapsed time in seconds
return time.mktime(datetime.utcnow().utctimetuple()) - time.mktime(start_time.utctimetuple())
@classmethod
def get_instance_time_launched(cls,instance):
"""
Get the seconds elapsed since the volume was attached.
:type volume: boto volume object
:param volume: The volume used to calculate the elapsed time since attached.
:rtype: integer
:returns: The number of seconds elapsed since this volume was attached.
"""
#instance.update()
#get timestamp from launch data
if not instance.launch_time:
return None
launch_time = cls.get_datetime_from_resource_string(instance.launch_time)
#return the elapsed time in seconds
return time.mktime(datetime.utcnow().utctimetuple()) - time.mktime(launch_time.utctimetuple())
@classmethod
def get_datetime_from_resource_string(cls,
timestamp,
time_format="%Y %m %d %H %M %S"):
"""
Convert a typical resource timestamp to datetime time_struct.
:type timestamp: string
:param timestamp: Timestamp held within specific boto resource objects.
Example timestamp format: 2012-09-19T21:24:03.864Z
:rtype: time_struct
:returns: The time_struct representation of the timestamp provided.
"""
t = re.findall('\w+',str(timestamp).replace('T',' '))
#remove milliseconds from list...
t.pop()
#create a time_struct out of our list
return datetime.strptime(" ".join(t), time_format)
@printinfo
def create_snapshot_from_volume(self, volume, wait_on_progress=40, poll_interval=10, timeout=0, description=""):
"""
Create a new EBS snapshot from an existing volume then wait for it to go to the created state.
By default will poll for poll_count. If wait_on_progress is specified than will wait on "wait_on_progress"
overrides # of poll_interval periods, using wait_on_progress # of periods of poll_interval length in seconds
w/o progress before failing. If volume.id is passed, euvolume data will not be transfered to snapshot created.
:param volume_id: (mandatory string) Volume id of the volume to create snapshot from
:param wait_on_progress:(optional integer) # of poll intervals to wait while 0 progress is made before exiting,
overrides "poll_count" when used
:param poll_interval: (optional integer) time to sleep between polling snapshot status
:param timeout: (optional integer) over all time to wait before exiting as failure
:param description: (optional string) string used to describe the snapshot
:return: EuSnapshot
"""
return self.create_snapshots(volume, count=1, mincount=1, eof=True, wait_on_progress=wait_on_progress,
poll_interval=poll_interval, timeout=timeout, description=description)[0]
@printinfo
def create_snapshot(self, volume_id, wait_on_progress=40, poll_interval=10, timeout=0, description=""):
"""
Create a new single EBS snapshot from an existing volume id then wait for it to go to the created state.
By default will poll for poll_count. If wait_on_progress is specified than will wait on "wait_on_progress"
overrides # of poll_interval periods, using wait_on_progress # of periods of poll_interval length in seconds
w/o progress before failing. If volume.id is passed, euvolume data will not be transfered to snapshot created.
:param volume_id: (mandatory string) Volume id of the volume to create snapshot from
:param wait_on_progress:(optional integer) # of poll intervals to wait while 0 progress is made before exiting,
overrides "poll_count" when used
:param poll_interval: (optional integer) time to sleep between polling snapshot status
:param timeout: (optional integer) over all time to wait before exiting as failure
:param description: (optional string) string used to describe the snapshot
:return: EuSnapshot
"""
snapshots = self.create_snapshots_from_vol_id(volume_id, count=1, mincount=1, eof=True,
wait_on_progress=wait_on_progress, poll_interval=poll_interval,
timeout=timeout, description=description)
if len(snapshots) == 1:
return snapshots[0]
else:
raise Exception("create_snapshot: Expected 1 snapshot, got '"+str(len(snapshots))+"' snapshots")
@printinfo
def create_snapshots_from_vol_id(self,
volume_id,
count=1,
mincount=None,
eof=True,
delay=0,
wait_on_progress=40,
poll_interval=10,
timeout=0,
description=""):
"""
Create a new EBS snapshot from an existing volume' string then wait for it to go to the created state.
By default will poll for poll_count. If wait_on_progress is specified than will wait on "wait_on_progress"
overrides # of poll_interval periods, using wait_on_progress # of periods of poll_interval length in seconds
w/o progress before failing
:param volume_id: (mandatory string) Volume id of the volume to create snapshot from
:parram count: (optional Integer) Specify how many snapshots to attempt to create
:param mincount: (optional Integer) Specify the min success count, defaults to 'count'
:param eof: (optional boolean) End on failure.If true will end on first failure, otherwise will continue to try
and fufill mincount
:param wait_on_progress:(optional integer) # of poll intervals to wait while 0 progress is made before exiting,
overrides "poll_count" when used
:param poll_interval: (optional integer) time to sleep between polling snapshot status
:param timeout: (optional integer) over all time to wait before exiting as failure
:param description: (optional string) string used to describe the snapshot
:return: EuSnapshot list
"""
if isinstance(volume_id, Volume):
raise Exception('Expected volume.id got Volume, try create_snapshots or create_snapshot_from_volume methods instead')
volume = EuVolume.make_euvol_from_vol(self.get_volume(volume_id), tester=self)
return self.create_snapshots(volume,
count=count, mincount=mincount, eof=eof, delay=delay,
wait_on_progress=wait_on_progress, poll_interval=poll_interval,
timeout=timeout, description=description)
@printinfo
def create_snapshots(self,
volume,
count=1,
mincount=None,
eof=True,
delay=0,
wait_on_progress=40,
poll_count=48,
poll_interval=10,
timeout=0,
monitor_to_completed=True,
delete_failed = True,
description="Created by eutester"):
"""
Create a new EBS snapshot from an existing volume then wait for it to go to the created state.
By default will poll for poll_count. If wait_on_progress is specified than will wait on "wait_on_progress"
overrides # of poll_interval periods, using wait_on_progress # of periods of poll_interval length in seconds
w/o progress before failing
:param volume: (mandatory Volume object) Volume to create snapshot from
:parram count: (optional Integer) Specify how many snapshots to attempt to create
:param mincount: (optional Integer) Specify the min success count, defaults to 'count'
:param eof: (optional boolean) End on failure.
If true will end on first failure, otherwise will continue to try and fufill mincount
:param wait_on_progress: (optional integer) # of poll intervals to wait while 0 progress is made before exiting,
overrides "poll_count" when used
:param poll_interval: (optional integer) time to sleep between polling snapshot status
:param monitor_to_completed: (optional boolean) If true will monitor created snapshots to the completed state,
else return a list of created snaps
:param timeout: (optional integer) over all time to wait before exiting as failure
:param delete_failed: (optional boolean) automatically delete failed volumes
:param description: (optional string) string used to describe the snapshot
:return: EuSnapshot list
"""
#Fix EuSnapshot for isinstance() use later...
if not hasattr(volume, 'md5'):
volume = EuVolume.make_euvol_from_vol(volume,tester= self)
volume_id = volume.id
snapshots = []
retlist = []
failed = []
mincount = mincount or count
if mincount > count:
raise Exception('Mincount can not be greater than count')
if wait_on_progress > 0:
poll_count = wait_on_progress
last_progress = 0
elapsed = 0
polls = 0
self.logger.debug('Create_snapshots count:'+str(count)+", mincount:"+str(mincount)+', wait_on_progress:'+
str(wait_on_progress)+",eof:"+str(eof))
for x in xrange(0,count):
try:
start = time.time()
snapshot = self.create_snapshot( volume_id, description=str(description))
cmdtime = time.time()-start
if snapshot:
self.logger.debug("Attempting to create snapshot #"+str(x)+ ", id:"+str(snapshot.id))
snapshot = EuSnapshot().make_eusnap_from_snap(snapshot, tester=self ,cmdstart=start)
#Append some attributes for tracking snapshot through creation and test lifecycle.
snapshot.eutest_polls = 0
snapshot.eutest_poll_count = poll_count
snapshot.eutest_last_progress = 0
snapshot.eutest_failmsg = "FAILED"
snapshot.eutest_laststatus = None
snapshot.eutest_timeintest = 0
snapshot.eutest_createorder = x
snapshot.eutest_cmdtime = "{0:.2f}".format(cmdtime)
snapshot.eutest_volume_md5 = volume.md5
snapshot.eutest_volume_md5len = volume.md5len
snapshot.eutest_volume_zone = volume.zone
snapshot.update()
if description and (not re.match(str(snapshot.description), str(description)) ):
raise Exception('Snapshot Description does not match request: Snap.description:"'+
str(snapshot.description)+'" -vs- "'+str(description)+'"')
if snapshot:
snapshots.append(snapshot)
except Exception, e:
self.logger.debug("Caught exception creating snapshot,eof is False, continuing. Error:"+str(e))
if eof:
if delete_failed:
try:
self.delete_snapshots(snapshots)
except: pass
raise e
else:
failed.append(snapshot)
#Check to see if our min count of snapshots succeeded, we allow this for specific tests.
#If not clean up all snapshots from this system created from this operation
if (count - len(failed)) > mincount:
if delete_failed:
snapshots.extend(failed)
try:
self.delete_snapshots(snapshots)
except:pass
raise Exception('Failed to created mincount('+str(mincount)+
') number of snapshots from volume:'+str(volume_id))
#If a delay was given, wait before next snapshot gets created
if delay:
time.sleep(delay)
#If we have failed snapshots,
# but still met our minimum clean up the failed and continue (this might be better as a thread?)...
if failed and delete_failed:
try:
self.delete_snapshots(failed)
except: pass
#Pass the list of created snapshots to monitor method if state was not None,
# otherwise just return the list of newly created
#snapshots.
if monitor_to_completed:
snapshots = self.monitor_eusnaps_to_completed(snapshots,
mincount=mincount,
eof=eof,
wait_on_progress=wait_on_progress,
poll_interval=poll_interval,
timeout=timeout,
delete_failed=delete_failed
)
return snapshots
@printinfo
def monitor_eusnaps_to_completed(self,
snaps,
mincount=None,
eof=True,
wait_on_progress=40,
poll_count=48,
poll_interval=10,
timeout=0,
monitor_to_progress = None,
delete_failed=True ):
"""
Monitor an EBS snapshot list for snapshots to enter the to the completed state.
By default will poll for poll_count. If wait_on_progress is specified than will wait on "wait_on_progress"
overrides # of poll_interval periods, using wait_on_progress # of periods of poll_interval length in seconds
w/o progress before failing
:param snaps: list of eusnapshots to monitor
:param mincount: (optional Integer) Specify the min success count, defaults to length of list provided
:param eof: (optional boolean) End on failure.If true will end on first failure,
otherwise will continue to try and fufill mincount
:param wait_on_progress: (optional integer) # of poll intervals to wait while 0 progress is made before exiting,
overrides "poll_count" when used
:param poll_interval: (optional integer) time to sleep between polling snapshot status
:param timeout: (optional integer) over all time to wait before exiting as failure
:param monitor_to_progress (optional integer): will consider the monitor successful and exit when the snapshot's
progress is >= this value
:param delete_failed: (optional boolean) automatically delete failed volumes
:return: EuSnapshot list
"""
failed = []
retlist = []
elapsed = 0
self.logger.debug("Monitor_snapshot_to_completed starting...")
mincount = mincount or len(snaps)
if mincount > len(snaps):
raise Exception('Mincount can not be greater than count')
if wait_on_progress > 0:
poll_count = wait_on_progress
last_progress = 0
monitor_start = time.time()
for snap in snaps:
if not isinstance(snap, EuSnapshot):
raise Exception("object not of type EuSnapshot. Found type:"+str(type(snap)))
snapshots = copy.copy(snaps)
for snap in snapshots:
if not snap.eutest_polls:
snap.eutest_poll_count = poll_count
self.logger.debug('Waiting for '+str(len(snapshots))+" snapshots to go to completed state...")
while (timeout == 0 or elapsed <= timeout) and snapshots:
self.logger.debug("Waiting for "+str(len(snapshots))+" snapshots to complete creation")
for snapshot in snapshots:
try:
snapshot.eutest_polls += 1
snapshot.update()
snapshot.eutest_laststatus = snapshot.status
if snapshot.status == 'failed':
raise Exception(str(snapshot) + " failed after Polling("+str(snapshot.eutest_polls)+
") ,Waited("+str(elapsed)+" sec), last reported (status:" + snapshot.status+
" progress:"+snapshot.progress+")")
curr_progress = int(snapshot.progress.replace('%',''))
#if progress was made, then reset timer
if (wait_on_progress > 0) and (curr_progress > snapshot.eutest_last_progress):
snapshot.eutest_poll_count = wait_on_progress
else:
snapshot.eutest_poll_count -= 1
snapshot.eutest_last_progress = curr_progress
elapsed = int(time.time()-monitor_start)
if snapshot.eutest_poll_count <= 0:
raise Exception("Snapshot did not make progress for "+str(wait_on_progress)+" polls, after "+
str(elapsed)+" seconds")
self.logger.debug(str(snapshot.id)+", Status:"+snapshot.status+", Progress:"+snapshot.progress+
", Polls w/o progress:"+str(wait_on_progress-snapshot.eutest_poll_count)+"/"+
str(wait_on_progress)+", Time Elapsed:"+str(elapsed)+"/"+str(timeout))
if snapshot.status == 'completed':
self.logger.debug(str(snapshot.id)+" created after " + str(elapsed) + " seconds. Status:"+
snapshot.status+", Progress:"+snapshot.progress)
self.test_resources["snapshots"].append(snapshot)
snapshot.eutest_timeintest = elapsed
snapshot.eutest_failmsg ='SUCCESS'
retlist.append(snapshot)
snapshots.remove(snapshot)
if monitor_to_progress and (curr_progress >= monitor_to_progress):
self.logger.debug(str(snapshot.id)+" reached designated monitor state after " + str(elapsed) + " seconds. Status:"+
snapshot.status+", Progress:"+snapshot.progress)
self.test_resources["snapshots"].append(snapshot)
snapshot.eutest_timeintest = elapsed
retlist.append(snapshot)
snapshots.remove(snapshot)
except Exception, e:
tb = get_traceback()
errbuf = '\n' + str(tb) + '\n' + str(e)
self.logger.debug("Exception caught in snapshot creation, snapshot:"+str(snapshot.id)+".Err:"+str(errbuf))
if eof:
#If exit on fail, delete all snaps and raise exception
self.delete_snapshots(snapshots)
raise e
else:
snapshot.eutest_failmsg = str(e)
snapshot.eutest_timeintest = elapsed
failed.append(snapshot)
snapshots.remove(snapshot)
elapsed = int(time.time()-monitor_start)
if snapshots:
time.sleep(poll_interval)
for snap in snapshots:
snapshot.eutest_failmsg = "Snapshot timed out in creation after "+str(elapsed)+" seconds"
snapshot.eutest_timeintest = elapsed
failed.append(snapshot)
snapshots.remove(snapshot)
#If delete_failed flag is set, delete the snapshots believed to have failed...
if delete_failed:
try:
self.delete_snapshots(failed)
except: pass
#join the lists again for printing debug purposes, retlist should only contain snaps believed to be good
snapshots = copy.copy(retlist)
snapshots.extend(failed)
#Print the results in a formated table
self.print_eusnapshot_list(snapshots)
#Check for failure and failure criteria and return
self.test_resources['snapshots'].extend(snapshots)
if failed and eof:
raise(str(len(failed))+' snapshots failed in create, see debug output for more info')
if len(retlist) < mincount:
raise('Created '+str(len(retlist))+'/'+str(mincount)+
' snapshots is less than provided mincount, see debug output for more info')
return retlist
def get_snapshot(self,snapid=None):
snaps = self.get_snapshots(snapid=snapid, maxcount=1)
if snaps:
return snaps[0]
else:
return None
@printinfo
def get_snapshots(self,
snapid=None,
volume_id=None,
volume_size=None,
volume_md5=None,
filters=None,
maxcount=None,
owner_id=None):
"""
:param snapid: string, snapshot id to use as filter
:param volume_id: string, volume id to use as filter
:param volume_size: int size of volume snap'd to use as filter
:param volume_md5: string md5 checksum of vol snap'd to use as filter
:param maxcount: int max number of snaps to match before returning list
:param owner_id: string owner id to use as filter
:return: list of snapshots found
"""
retlist = []
#Start by comparing resources the current test obj is tracking to see if they are still in sync with the system
snapshots = copy.copy(self.test_resources['snapshots'])
snapshot_list = []
if snapid:
snapshot_list.append(snapid)
ec2_snaps = self.get_all_snapshots(snapshot_ids=snapshot_list, filters=filters, owner=owner_id)
for snap in ec2_snaps:
if snap not in snapshots:
snapshots.append(snap)
for snap in snapshots:
if not snap in ec2_snaps:
self.logger.debug('Snapshot:'+str(snap.id)+' no longer found on system')
if not hasattr(snap,'eutest_volume_md5'):
snap = EuSnapshot.make_eusnap_from_snap(snap, tester=self)
self.logger.debug("Checking snap:"+str(snap.id)+" for match...")
if volume_id and snap.volume_id != volume_id:
continue
if volume_size and snap.volume_size != volume_size:
continue
if volume_md5 and snap.eutest_volume_md5 != volume_md5:
continue
retlist.append(snap)
if maxcount and (len(retlist) >= maxcount):
return retlist
self.logger.debug("Found "+str(len(retlist))+" snapshots matching criteria")
return retlist
@printinfo
def delete_snapshots(self,
snapshots,
valid_states='completed,failed',
base_timeout=60,
add_time_per_snap=10,
wait_for_valid_state=120,
poll_interval=10,
eof=False):
"""
Delete a list of snapshots.
:param snapshots: List of snapshot IDs
:param valid_states: Valid status for snapshot to
enter (Default: 'completed,failed')
:param base_timeout: Timeout for waiting for poll interval
:param add_time_per_snap: Amount of time to add to base_timeout
per snapshot in the list
:param wait_for_valid_state: How long to wait for a valid state to
be reached before attempting delete, as
some states will reject a delete request.
:param poll_interval: Time to wait between checking the snapshot states
:param eof: Whether or not to call an Exception() when first
failure is reached
:raise:
"""
snaps = copy.copy(snapshots)
delete_me = []
start = time.time()
elapsed = 0
valid_delete_states = str(valid_states).split(',')
if not valid_delete_states:
raise Exception("delete_snapshots, error in valid_states "
"provided:" + str(valid_states))
#Wait for snapshot to enter a state that will accept the deletion action, before attempting to delete it...
while snaps and (elapsed < wait_for_valid_state):
elapsed = int(time.time()-start)
check_state_list = copy.copy(snaps)
for snap in check_state_list:
try:
snap_id = self.get_snapshot(snap.id)
if not snap_id:
self.logger.debug("Get Snapshot not found, assuming it's "
"already deleted:" + str(snap.id))
delete_me.append(snap)
break
except EC2ResponseError as ec2e:
if ec2e.status == 400:
self.logger.debug("Get Snapshot not found, assuming it's "
"already deleted:" + str(snap.id) +
", err:" + str(ec2e))
delete_me.append(snap)
else:
snap.update()
self.logger.debug("Checking snapshot:" + str(snap.id) +
" status:"+str(snap.status))
for v_state in valid_delete_states:
v_state = str(v_state).rstrip().lstrip()
if snap.status == v_state:
delete_me.append(snap)
try:
snap.delete()
except EC2ResponseError as ec2e:
self.logger.debug("Snapshot not found, assuming "
"it's already deleted:" +
str(snap.id))
delete_me.append(snap)
break
for snap in delete_me:
if snap in snaps:
snaps.remove(snap)
if snaps:
buf = "\n-------| WAITING ON " + str(len(snaps)) + \
" SNAPSHOTS TO ENTER A DELETE-ABLE STATE:(" + \
str(valid_states) + "), elapsed:" + str(elapsed) + \
'/' + str(wait_for_valid_state) + "|-----"
for snap in snaps:
buf = buf + "\nSnapshot:"+str(snap.id) + ",status:" + \
str(snap.status)+", progress:"+str(snap.progress)
self.logger.debug(buf)
self.logger.debug('waiting poll_interval to recheck snapshots:' +
str(poll_interval) +' seconds')
time.sleep(poll_interval)
#Now poll all the snapshots which a delete() request was made for
if snaps:
buf = ""
for snap in snaps:
buf = buf+','+str(snap.id)
msg = "Following snapshots did not enter a valid state(" + \
str(valid_states) + ") for deletion:" + str(buf)
if eof:
raise Exception(msg)
else:
self.logger.debug(msg)
start = time.time()
elapsed = 0
timeout= base_timeout + (add_time_per_snap*len(delete_me))
# Wait for all snapshots in delete_me list to be deleted or timeout...
while delete_me and (elapsed < timeout):
self.logger.debug('Waiting for remaining ' + str(int(len(delete_me))) +
' snaps to delete...' )
waiting_list = copy.copy(delete_me)
for snapshot in waiting_list:
try:
snapshot.update()
get_snapshot = self.get_all_snapshots(
snapshot_ids=[snapshot.id])
except EC2ResponseError as ec2re:
self.logger.debug("Snapshot not found, assuming "
"it's already deleted:" +
str(snapshot.id))
if not get_snapshot or snapshot.status == 'deleted':
self.logger.debug('Snapshot:'+str(snapshot.id)+" is deleted")
delete_me.remove(snapshot)
#snapshot is deleted remove it from test resources list
for testsnap in self.test_resources['snapshots']:
if snapshot.id == testsnap.id:
self.test_resources['snapshots'].remove(testsnap)
if delete_me and (elapsed < timeout):
time.sleep(poll_interval)
elapsed = int(time.time()-start)
# Record any snapshots not yet deleted as errors...
if delete_me:
buf = ""
for snap in snaps:
buf += "\nSnapshot:" + str(snap.id)+",status:" + \
str(snap.status) + ", progress:"+str(snap.progress) +\
", elapsed:" + str(elapsed) + '/' + str(timeout)
raise Exception("Snapshots did not delete within timeout:" +
str(timeout) + "\n" + str(buf))
def delete_snapshot(self,snapshot,timeout=60):
"""
Delete the snapshot object
:param snapshot: boto.ec2.snapshot object to delete
:param timeout: Time in seconds to wait for deletion
"""
snapshot.delete()
self.logger.debug( "Sent snapshot delete request for snapshot: " + snapshot.id)
return self.delete_snapshots([snapshot], base_timeout=60)
@printinfo
def register_snapshot(self,
snapshot,
root_device_name="/dev/sda",
description="bfebs",
windows=False,
bdmdev=None,
name=None,
ramdisk=None,
kernel=None,
dot=True,
block_device_map=None):
"""Convience function for passing a snapshot instead of its id. See register_snapshot_by_id
:param snapshot: Snapshot object to use as an image
:param root_device_name: root device name to use when registering
:param description: Description of image that will be registered
:param windows: Is the image a Windows image
:param bdmdev: Block device mapping
:param name: Name to register the image as
:param ramdisk: Ramdisk ID to use
:param kernel: Kernel ID to use
:param dot: Delete on terminate flag
:param block_device_map: existing block device map to append snapshot block dev to
"""
return self.register_snapshot_by_id( snap_id=snapshot.id,
root_device_name=root_device_name,
description=description,
windows=windows,
bdmdev=bdmdev,
name=name,
ramdisk=ramdisk,
kernel=kernel,
dot=dot,
block_device_map=block_device_map)
@printinfo
def register_snapshot_by_id( self,
snap_id,
root_device_name="/dev/sda1",
description="bfebs",
windows=False,
bdmdev=None,
name=None,
ramdisk=None,
kernel=None,
size=None,
dot=True,
block_device_map=None,
custom_params=None):
"""
Register an image snapshot
:param snap_id: snapshot id
:param root_device_name: root-device-name for image
:param description: description of image to be registered
:param windows: Is windows image boolean
:param bdmdev: block-device-mapping device for image
:param name: name of image to be registered
:param ramdisk: ramdisk id
:param kernel: kernel id (note for windows this name should be "windows")
:param dot: Delete On Terminate boolean
:param block_device_map: existing block device map to add the snapshot block dev type to
:return: emi id of registered image
"""
custom_params = custom_params or {}
if bdmdev is None:
bdmdev=root_device_name
if name is None:
name="bfebs_"+ snap_id
if windows:
custom_params['Platform'] = "windows"
bdmap = block_device_map or BlockDeviceMapping()
block_dev_type = BlockDeviceType()
block_dev_type.snapshot_id = snap_id
block_dev_type.delete_on_termination = dot
block_dev_type.size = size
bdmap[bdmdev] = block_dev_type
self.logger.debug("Register image with: snap_id:"+str(snap_id)+", root_device_name:"+str(root_device_name)+", desc:"+str(description)+
", windows:"+str(windows)+", bdname:"+str(bdmdev)+", name:"+str(name)+", ramdisk:"+
str(ramdisk)+", kernel:"+str(kernel))
image_id = self._register_image_custom_params(name=name, description=description, kernel_id=kernel, ramdisk_id=ramdisk,
block_device_map=bdmap, root_device_name=root_device_name, **custom_params)
self.logger.debug("Image now registered as " + image_id)
return image_id
@printinfo
def register_image( self,
image_location,
root_device_name=None,
description=None,
architecture=None,
virtualization_type=None,
platform=None,
bdmdev=None,
name=None,
ramdisk=None,
kernel=None,
custom_params=None):
"""
Register an image based on the s3 stored manifest location
:param image_location:
:param root_device_name: root-device-name for image
:param description: description of image to be registered
:param bdmdev: block-device-mapping object for image
:param name: name of image to be registered
:param ramdisk: ramdisk id
:param kernel: kernel id (note for windows this name should be "windows")
:return: image id string
"""
custom_params = custom_params or {}
if platform:
custom_params['Platform']= platform
#Check to see if boto is recent enough to have this param...
image_id = self._register_image_custom_params(name=name,
description=description,
kernel_id=kernel,
image_location=image_location,
ramdisk_id=ramdisk,
architecture=architecture,
virtualization_type=virtualization_type,
block_device_map=bdmdev,
root_device_name=root_device_name,
**custom_params)
self.test_resources["images"].append(self.get_all_images([image_id])[0])
return image_id
def delete_image(self, image, timeout=60):
"""
Delete image by multiple deregistrations.
:param timeout: int seconds to wait before failing operation
:param image: boto image object to deregister
:param delete: boolean, if True will attempt to deregister until removed/deleted, default:False
"""
return self.deregister_image( image )
def deregister_image(self, image):
"""
Deregister an image.
:param image: boto image object to deregister
"""
gotimage = image
self.logger.debug("Deregistering image: " + str(image))
try:
gotimage = self.get_all_images(image_ids=[image.id])[0]
except IndexError, ie:
raise Exception("deregister_image:" + str(image.id) + ", No image found in get_all_images.Error: ")
except Exception, e:
#should return [] if not found, exception indicates an error with the command maybe?
tb = get_traceback()
raise Exception(
'deregister_image: Error attempting to get image:' + str(image.id) + ", err:" + str(tb) + '\n' + str(e))
self.deregister_image(image.id)
try:
# make sure the image was removed (should throw an exception),if not make sure it is in the deregistered state
# if it is still associated with a running instance'
gotimage = self.get_all_images(image_ids=[image.id])[0]
# this will not be executed if image was removed
if( gotimage.state != 'deregistered') :
raise Exception('deregister_image: Error attempting to deregister image:' + str(image.id) + '\n')
except IndexError, ie:
pass
@printinfo
def get_images(self,
emi=None,
name=None,
root_device_type=None,
root_device_name=None,
virtualization_type=None,
location=None,
state="available",
arch=None,
owner_id=None,
filters=None,
basic_image=None,
platform=None,
not_platform=None,
tagkey=None,
tagvalue=None,
max_count=None,
_args_dict=None):
"""
Get a list of images which match the provided criteria.
:param emi: Partial ID of the emi to return, defaults to the 'emi-" prefix to grab any
:param root_device_type: example: 'instance-store' or 'ebs'
:param root_device_name: example: '/dev/sdb'
:param virtualization_type: example: 'hvm' or 'paravirtualized'
:param location: partial on location match example: 'centos'
:param state: example: 'available'
:param arch: example: 'x86_64'
:param owner_id: owners numeric id
:param filters: standard filters
:param basic_image: boolean, avoids returning windows, load balancer and service images
:param not_platform: skip if platform string matches this string. Example: not_platform='windows'
:param max_count: return after finding 'max_count' number of matching images
:param _args_dict: dict which can be populated by annotation to give
insight into the args/kwargs this was called with
:return: image id
:raise: Exception if image is not found
"""
ret_list = []
if not filters:
filters = {}
if emi:
filters['image-id'] = emi
if name:
filters['name'] = name
if root_device_type:
filters['root-device-type'] = root_device_type
if root_device_name:
filters['root-device-name'] = root_device_name
if state:
filters['state'] = state
if virtualization_type:
filters['virtualization-type'] = virtualization_type
if arch:
filters['architecture'] = arch
if owner_id:
filters['owner-id'] = owner_id
if platform:
filters['platform'] = platform
if tagkey:
filters['tag-key'] = tagkey
if tagvalue:
filters['tag-value'] = tagvalue
# if emi is None and not platform:
if basic_image is None and not _args_dict:
# If a specific EMI was not provided, set some sane defaults for
# fetching a test image to work with...
basic_image = True
if name is None:
emi = "mi-"
images = self.get_all_images(filters=filters)
self.logger.debug("Got " + str(len(images)) + " total images " + str(emi) + ", now filtering..." )
for image in images:
if (re.search(emi, image.id) is None) and (re.search(emi, image.name) is None):
continue
if (root_device_type is not None) and (image.root_device_type != root_device_type):
continue
if (virtualization_type is not None):
if hasattr(image, 'virtualization_type'):
if image.virtualization_type != virtualization_type:
continue
else:
self.logger.debug('Filter by virtualization type requested but not supported in this boto version?')
if (root_device_name is not None) and (image.root_device_name != root_device_name):
continue
if (state is not None) and (image.state != state):
continue
if (location is not None) and (not re.search( location, image.location)):
continue
if (name is not None) and (image.name != name):
continue
if (arch is not None) and (image.architecture != arch):
continue
if (owner_id is not None) and (image.owner_id != owner_id):
continue
if basic_image:
not_location = ["windows", "imaging-worker", "loadbalancer"]
skip = False
for loc in not_location:
if (re.search( str(loc), image.location)):
skip = True
break
if skip:
continue
if (not_platform is not None) and (image.platform == not_platform):
continue
self.logger.debug("Returning image:"+str(image.id))
ret_list.append(image)
if max_count and len(ret_list) >= max_count:
return ret_list
if not ret_list:
raise ResourceNotFoundException("Unable to find an EMI")
return ret_list
@printinfo
def get_emi(self,
emi=None,
name=None,
root_device_type=None,
root_device_name=None,
location=None,
state="available",
arch=None,
owner_id=None,
filters=None,
basic_image=True,
platform=None,
not_platform=None,
tagkey=None,
tagvalue=None,
_args_dict=None,
):
"""
Get an emi with name emi, or just grab any emi in the system. Additional 'optional' match criteria can be defined.
:param emi: Partial ID of the emi to return, defaults to the 'emi-" prefix to grab any
:param root_device_type: example: 'instance-store' or 'ebs'
:param root_device_name: example: '/dev/sdb'
:param location: partial on location match example: 'centos'
:param state: example: 'available'
:param arch: example: 'x86_64'
:param owner_id: owners numeric id
:param filters: standard filters, dict.
:param basic_image: boolean, avoids returning windows, load balancer and service images
:param not_platform: skip if platform string matches this string. Example: not_platform='windows'
:param _args_dict: dict which can be populated by annotation to give
insight into the args/kwargs this was called with
:return: image id
:raise: Exception if image is not found
"""
# If no criteria was provided for filter an image, use 'basic_image'
# flag to provide some sane defaults
if basic_image is None:
if not _args_dict:
basic_image = True
else:
basic_image = False
if filters is None and emi is None and \
name is None and location is None:
# Attempt to get a eutester created image if it happens to meet
# the other criteria provided. Otherwise remove filter and
# return the image found without the imposed filters.
filters={'tag-key':'eutester-created'}
try:
return self.get_images(emi=emi,
name=name,
root_device_type=root_device_type,
root_device_name=root_device_name,
location=location,
state=state,
arch=arch,
owner_id=owner_id,
filters=filters,
basic_image=basic_image,
platform=platform,
not_platform=not_platform,
tagkey=tagkey,
tagvalue=tagvalue,
max_count=1)[0]
except:
filters = None
return self.get_images(emi=emi,
name=name,
root_device_type=root_device_type,
root_device_name=root_device_name,
location=location,
state=state,
arch=arch,
owner_id=owner_id,
filters=filters,
basic_image=basic_image,
platform=platform,
not_platform=not_platform,
tagkey=tagkey,
tagvalue=tagvalue,
max_count=1)[0]
def get_all_allocated_addresses(self,account_id=None):
"""
Return all allocated addresses for a given account_id as boto.ec2.address objects
:param account_id: account number to filter on
:return: list of boto.ec2.address objects
"""
self.logger.debug("get_all_allocated_addresses...")
account_id = account_id or self.get_account_id()
ret = []
if account_id:
account_id = str(account_id)
addrs = self.get_all_addresses()
for addr in addrs:
if addr.instance_id and re.search(account_id, str(addr.instance_id)):
ret.append(addr)
return ret
def get_available_addresses(self):
"""
Get all available addresses
:return: a list of all available boto.ec2.address
"""
self.logger.debug("get_available_addresses...")
ret = []
addrs = self.get_all_addresses()
for addr in addrs:
if addr.instance_id and re.search(r"(available|nobody)", addr.instance_id):
ret.append(addr)
return ret
def show_all_addresses_verbose(self, display=True):
"""
Print table to debug output showing all addresses available to cloud admin using verbose filter
"""
address_width = 20
info_width = 64
account_width = 24
buf = ""
line = ""
header = "| " + str("PUBLIC IP").ljust(address_width) + " | " + str("ADDRESS INFO").ljust(info_width) + \
" | " + str("ACCOUNT NAME").ljust(account_width) + " | " + str("REGION") + "\n"
longest = len(header)
try:
ad_list = self.get_all_addresses(addresses='verbose')
for ad in ad_list:
account_name = ""
adline = ""
match = re.findall('\(arn:*.*\)', ad.instance_id)
if match:
try:
match = match[0]
account_id = match.split(':')[4]
account_name = self.get_all_accounts(account_id=account_id)[0]['account_name']
except:pass
if ad.region:
region = ad.region.name
adline = "| " + str(ad.public_ip ).ljust(address_width) + " | " + str(ad.instance_id).ljust(info_width) + \
" | " + str(account_name).ljust(account_width) + " | " + str(region) + "\n"
buf += adline
if len(adline) > longest:
longest = len(adline)
except Exception, e:
tb = get_traceback()
buf = str(tb) + "\n ERROR in show_all_addresses_verbose:" + str(e)
for x in xrange(0,longest):
line += "-"
line += "\n"
buf = "\n" + line + header + line + buf + line
if not display:
return buf
self.logger.debug(buf)
def allocate_address(self, domain=None):
"""
Allocate an address for the current user
:return: boto.ec2.address object allocated
"""
try:
self.logger.debug("Allocating an address")
address = self.allocate_address(domain=domain)
except Exception, e:
tb = get_traceback()
err_msg = 'Unable to allocate address'
self.critical(str(err_msg))
raise Exception(str(tb) + "\n" + str(err_msg))
self.logger.debug("Allocated " + str(address))
return address
def associate_address(self,instance, address, refresh_ssh=True, timeout=75):
"""
Associate an address object with an instance
:param instance: instance object to associate ip with
:param address: address to associate to instance
:param timeout: Time in seconds to wait for operation to complete
:raise: Exception in case of association failure
"""
ip = str(address.public_ip)
old_ip = str(instance.ip_address)
self.logger.debug("Attemtping to associate " + str(ip) + " with " + str(instance.id))
try:
address.associate(instance.id)
except Exception, e:
self.critical("Unable to associate address "+str(ip)+" with instance:"+str(instance.id)+"\n")
raise e
start = time.time()
elapsed = 0
address = self.get_all_addresses(addresses=[ip])[0]
### Ensure address object holds correct instance value
while not address.instance_id:
if elapsed > timeout:
raise Exception('Address ' + str(ip) + ' never associated with instance')
self.logger.debug('Address {0} not attached to {1} but rather {2}'.format(str(address), instance.id, address.instance_id))
self.sleep(5)
address = self.get_all_addresses(addresses=[ip])[0]
elapsed = int(time.time()-start)
poll_count = 15
### Ensure instance gets correct address
while instance.ip_address not in address.public_ip:
if elapsed > timeout:
raise Exception('Address ' + str(address) + ' did not associate with instance after:'+str(elapsed)+" seconds")
self.logger.debug('Instance {0} has IP {1} attached instead of {2}'.format(instance.id, instance.ip_address, address.public_ip) )
self.sleep(5)
instance.update()
elapsed = int(time.time()-start)
self.logger.debug("Associated IP successfully old_ip:"+str(old_ip)+' new_ip:'+str(instance.ip_address))
if refresh_ssh:
if isinstance(instance, EuInstance) or isinstance(instance, WinInstance):
self.sleep(5)
instance.update()
self.logger.debug('Refreshing EuInstance:'+str(instance.id)+' ssh connection to associated addr:'+str(instance.ip_address))
instance.connect_to_instance()
else:
self.logger.debug('WARNING: associate_address called with refresh_ssh set to true, but instance is not EuInstance type:'+str(instance.id))
def disassociate_address_from_instance(self, instance, timeout=75):
"""
Disassociate address from instance and ensure that it no longer holds the IP
:param instance: An instance that has an IP allocated
:param timeout: Time in seconds to wait for address to disassociate
:raise:
"""
self.logger.debug("disassociate_address_from_instance: instance.ip_address:" +
str(instance.ip_address) + " instance:" + str(instance))
ip=str(instance.ip_address)
address = self.get_all_addresses(addresses=[instance.ip_address])[0]
start = time.time()
elapsed = 0
address = self.get_all_addresses(addresses=[address.public_ip])[0]
### Ensure address object hold correct instance value
while address.instance_id and not re.match(instance.id, str(address.instance_id)):
self.logger.debug('Address {0} not attached to Instance "{1}" but rather Instance "{2}" after {3} seconds'.format(str(address), instance.id, address.instance_id, str(elapsed)) )
if elapsed > timeout:
raise Exception('Address ' + str(address) + ' never associated with instance after '+str(elapsed)+' seconds')
address = self.get_all_addresses(addresses=[address.public_ip])[0]
self.sleep(5)
elapsed = int(time.time()-start)
self.logger.debug("Attemtping to disassociate " + str(address) + " from " + str(instance.id))
address.disassociate()
start = time.time()
### Ensure instance gets correct address
### When private addressing is enabled the pub address should be equal to the priv address
### Otherwise we want to the pub address to be anything but its current and not the priv address
while (not instance.private_addressing and instance.ip_address != address.public_ip and instance.ip_address != address.private_ip_address) or \
(instance.private_addressing and instance.ip_address == instance.private_ip_address):
self.logger.debug('Instance {0} has IP "{1}" still using address "{2}" after {3} seconds'.format(instance.id, instance.ip_address, address.public_ip, str(elapsed)) )
if elapsed > timeout:
raise Exception('Address ' + str(address) + ' never disassociated with instance after '+str(elapsed)+' seconds')
instance.update()
self.sleep(5)
elapsed = int(time.time()-start)
address = self.get_all_addresses(addresses=[address.public_ip])[0]
self.logger.debug("Disassociated IP successfully")
def release_address(self, address):
"""
Release all addresses or a particular IP
:param address: Address object to release
:raise: Exception when the address does not release
"""
try:
self.logger.debug("Releasing address: " + str(address))
address.release()
except Exception, e:
raise Exception("Failed to release the address: " + str(address) + ": " + str(e))
def check_device(self, device_path):
"""
Used with instance connections. Checks if a device at a certain path exists
:param device_path: Path to check
:return: bool, if device was found
"""
return self.found("ls -1 " + device_path, device_path)
@printinfo
def get_volumes(self,
volume_id="vol-",
status=None,
attached_instance=None,
attached_dev=None,
snapid=None,
zone=None,
filters=None,
minsize=1,
maxsize=None,
md5=None,
eof=False):
"""
Return list of volumes that matches the criteria. Criteria options to be matched:
:param volume_id: string present within volume id
:param status: examples: 'in-use', 'creating', 'available'
:param attached_instance: instance id example 'i-1234abcd'
:param attached_dev: example '/dev/sdf'
:param snapid: snapshot volume was created from example 'snap-1234abcd'
:param zone: zone of volume example 'PARTI00'
:param minsize: minimum size of volume to be matched
:param maxsize: maximum size of volume to be matched
:param eof: exception on failure to find volume, else returns empty list
:return: List of volumes matching the filters provided
:raise:
"""
retlist = []
if (attached_instance is not None) or (attached_dev is not None):
status='in-use'
volumes = self.get_all_volumes(filters=filters)
for volume in volumes:
if not hasattr(volume,'md5'):
volume = EuVolume.make_euvol_from_vol(volume, tester=self)
if not re.match(volume_id, volume.id):
continue
if (snapid is not None) and (volume.snapshot_id != snapid):
continue
if (zone is not None) and (volume.zone != zone):
continue
if (status is not None) and (volume.status != status):
continue
if (md5 is not None) and hasattr(volume,'md5') and (volume.md5 != md5):
continue
if volume.attach_data is not None:
if (attached_instance is not None) and ( volume.attach_data.instance_id != attached_instance):
continue
if (attached_dev is not None) and (volume.attach_data.device != attached_dev):
continue
if volume.size < minsize:
continue
if maxsize is not None and volume.size > maxsize:
continue
if not hasattr(volume,'md5'):
volume = EuVolume.make_euvol_from_vol(volume)
retlist.append(volume)
if eof and retlist == []:
raise ResourceNotFoundException("Unable to find matching volume")
else:
return retlist
def get_volume(self,
volume_id="vol-",
status=None,
attached_instance=None,
attached_dev=None,
snapid=None,
zone=None,
minsize=1,
maxsize=None,
eof=True):
"""
Return first volume that matches the criteria.
:param volume_id: string present within volume id
:param status: examples: 'in-use', 'creating', 'available'
:param attached_instance: instance id example 'i-1234abcd'
:param attached_dev: example '/dev/sdf'
:param snapid: snapshot volume was created from example 'snap-1234abcd'
:param zone: zone of volume example 'PARTI00'
:param minsize: minimum size of volume to be matched
:param maxsize: maximum size of volume to be matched
:param eof: exception on failure to find volume, else returns None
:return: List of volumes matching the filters provided
:raise:
"""
vol = None
try:
vol = self.get_volumes(volume_id=volume_id, status=status, attached_instance=attached_instance,
attached_dev=attached_dev, snapid=snapid, zone=zone, minsize=minsize,
maxsize=maxsize, eof=eof)[0]
except Exception, e:
if eof:
raise e
return vol
@printinfo
def run_instance(self,
image=None,
keypair=None,
group="default",
name=None,
type=None,
zone=None,
min=1,
max=1,
user_data=None,
private_addressing=False,
username="root",
password=None,
is_reachable=True,
monitoring_enabled=False,
timeout=600):
"""
Run instance/s and wait for them to go to the running state
:param image: Image object to use, default is pick the first emi found in the system
:param keypair: Keypair name to use for the instances, defaults to none
:param group: Security group name to apply to this set of instnaces, defaults to none
:param type: VM type to use for these instances, defaults to m1.small
:param zone: Availability zone to run these instances
:param min: Minimum instnaces to launch, default 1
:param max: Maxiumum instances to launch, default 1
:param user_data: User-data string to pass to instance
:param private_addressing: Runs an instance with only private IP address
:param username: username to use when connecting via ssh
:param password: password to use when connecting via ssh
:param is_reachable: Instance can be reached on its public IP (Default=True)
:param timeout: Time in seconds for instance to enter running state
:return: Reservation object
:raise:
"""
if image is None:
image = self.get_emi()
if not isinstance(image, Image):
image = self.get_emi(emi=str(image))
if image is None:
raise Exception("emi is None. run_instance could not auto find an emi?")
if not user_data:
user_data = self.enable_root_user_data
if private_addressing is True:
addressing_type = "private"
is_reachable= False
else:
addressing_type = None
#In the case a keypair object was passed instead of the keypair name
if keypair:
if isinstance(keypair, KeyPair):
keypair = keypair.name
if monitoring_enabled :
enabled=True
else:
enabled=False
start = time.time()
self.logger.debug( "Attempting to run "+ str(image.root_device_type) +" image " + str(image) + " in group " + str(group))
reservation = image.run(key_name=keypair,security_groups=[group],instance_type=type, placement=zone,
min_count=min, max_count=max, user_data=user_data, addressing_type=addressing_type,
monitoring_enabled=enabled)
self.test_resources["reservations"].append(reservation)
if (len(reservation.instances) < min) or (len(reservation.instances) > max):
fail = "Reservation:"+str(reservation.id)+" returned "+str(len(reservation.instances))+\
" instances, not within min("+str(min)+") and max("+str(max)+")"
try:
self.wait_for_reservation(reservation,timeout=timeout)
except Exception, e:
self.logger.debug(get_traceback())
self.critical("An instance did not enter proper running state in " + str(reservation) )
self.critical("Terminatng instances in " + str(reservation))
self.terminate_instances(reservation)
raise Exception("Instances in " + str(reservation) + " did not enter proper state")
for instance in reservation.instances:
if instance.state != "running":
self.critical("Instance " + instance.id + " now in " + instance.state + " state in zone: " + instance.placement )
else:
self.logger.debug( "Instance " + instance.id + " now in " + instance.state + " state in zone: " + instance.placement )
#
# check to see if public and private DNS names and IP addresses are the same
#
if (instance.ip_address == instance.private_ip_address) or \
(instance.ip_address == instance.private_ip_address) and \
( private_addressing is False ):
self.logger.debug(str(instance) + " got Public IP: " + str(instance.ip_address) + " Private IP: " +
str(instance.private_ip_address) + " Public DNS Name: " + str(instance.public_dns_name) +
" Private DNS Name: " + str(instance.private_dns_name))
self.critical("Instance " + instance.id + " has he same public and private IPs of " + str(instance.ip_address))
else:
self.logger.debug(str(instance) + " got Public IP: " + str(instance.ip_address) + " Private IP: " +
str(instance.private_ip_address) + " Public DNS Name: " + str(instance.public_dns_name) +
" Private DNS Name: " + str(instance.private_dns_name))
if not private_addressing:
try:
self.wait_for_valid_ip(instance)
except Exception, e:
tb = get_traceback()
ip_err = str(tb) + "\nWARNING in wait_for_valid_ip: "+str(e)
self.logger.debug(ip_err)
self.terminate_instances(reservation)
raise Exception("Reservation " + str(reservation) + " has been terminated because instance " +
str(instance) + " did not receive a valid IP")
if is_reachable:
self.ping(instance.ip_address, 20)
## Add name tag
if name:
self.create_tags([reservation.instances], {"Name:": name})
#calculate remaining time to wait for establishing an ssh session/euinstance
timeout -= int(time.time() - start)
#if we can establish an SSH session convert the instances to the test class euinstance for access to instance specific test methods
if is_reachable:
self.logger.debug("Converting " + str(reservation) + " into euinstances")
return self.convert_reservation_to_euinstance(reservation, username=username, password=password, private_addressing=private_addressing,
keyname=keypair, timeout=timeout)
else:
return reservation
@printinfo
def run_image(self,
image=None,
keypair=None,
group="default",
type=None,
zone=None,
min=1,
max=1,
block_device_map=None,
user_data=None,
private_addressing=False,
username="root",
password=None,
auto_connect=True,
clean_on_fail=True,
monitor_to_running = True,
return_reservation=False,
timeout=480,
**boto_run_args):
"""
:param image: image object or string image_id to create instances with
:param keypair: keypair to create instances with
:param group: security group to run instances in
:param type: vmtype to run instances as
:param zone: availability zone (aka cluster, aka parition) to run instances in
:param min: minimum amount of instances to try to run
:param max: max amount of instances to try to run
:param user_data: user_data to run instances with
:param private_addressing: boolean to run instances without public ips
:param username: username for connecting ssh to instances
:param password: password for connnecting ssh to instances
:param auto_connect: boolean flag whether or not ssh connections should be automatically attempted
:param clean_on_fail: boolean flag whether or not to attempt to delete/remove failed instances-(not implemented)
:param monitor_to_running: boolean flag whether or not to monitor instances to a running state
:pararm block_device_map: block device map obj
:param timeout: time allowed before failing this operation
:return: list of euinstances
"""
reservation = None
try:
instances = []
if image is None:
images = self.get_all_images()
for emi in images:
if re.match("emi",emi.id):
image = emi
if not isinstance(image, Image):
image = self.get_emi(emi=str(image))
if image is None:
raise Exception("emi is None. run_instance could not auto find an emi?")
if not user_data:
user_data = self.enable_root_user_data
if private_addressing is True:
addressing_type = "private"
connect = False
else:
addressing_type = None
#In the case a keypair object was passed instead of the keypair name
if keypair:
if isinstance(keypair, KeyPair):
keypair = keypair.name
self.logger.debug('Euinstance list prior to running image...')
try:
self.print_euinstance_list()
except Exception, e:
self.logger.debug('Failed to print euinstance list before running image, err:' +str(e))
#self.logger.debug( "Attempting to run "+ str(image.root_device_type) +" image " + str(image) + " in group " + str(group))
cmdstart=time.time()
reservation = image.run(key_name=keypair,security_groups=[group],instance_type=type, placement=zone,
min_count=min, max_count=max, user_data=user_data, addressing_type=addressing_type,
block_device_map=block_device_map, **boto_run_args)
self.test_resources["reservations"].append(reservation)
if (len(reservation.instances) < min) or (len(reservation.instances) > max):
fail = "Reservation:"+str(reservation.id)+" returned "+str(len(reservation.instances))+\
" instances, not within min("+str(min)+") and max("+str(max)+")"
if image.root_device_type == 'ebs':
self.wait_for_instances_block_dev_mapping(reservation.instances, timeout=timeout)
for instance in reservation.instances:
try:
self.logger.debug(str(instance.id)+':Converting instance to euinstance type.')
#convert to euinstances, connect ssh later...
if image.platform == 'windows':
eu_instance = WinInstance.make_euinstance_from_instance( instance,
self,
keypair=keypair,
username='Administrator',
password=password,
reservation=reservation,
private_addressing=private_addressing,
timeout=timeout,
cmdstart=cmdstart,
auto_connect=False
)
else:
eu_instance = EuInstance.make_euinstance_from_instance( instance,
self,
keypair=keypair,
username = username,
password=password,
reservation = reservation,
private_addressing=private_addressing,
timeout=timeout,
cmdstart=cmdstart,
auto_connect=False )
#set the connect flag in the euinstance object for future use
eu_instance.auto_connect = auto_connect
instances.append(eu_instance)
except Exception, e:
self.logger.debug(get_traceback())
raise Exception("Unable to create Euinstance from " + str(instance)+", err:\n"+str(e))
if monitor_to_running:
instances = self.monitor_euinstances_to_running(instances, timeout=timeout)
if return_reservation:
reservation.instances = instances
return reservation
return instances
except Exception, e:
trace = get_traceback()
self.logger.debug('!!! Run_instance failed, terminating reservation. Error:'+str(e)+"\n"+trace)
if reservation and clean_on_fail:
self.terminate_instances(reservation=reservation)
raise e
def wait_for_instances_block_dev_mapping(self,
instances,
poll_interval=1,
timeout=60):
waiting = copy.copy(instances)
elapsed = 0
good = []
failed = []
start = time.time()
self.logger.debug('wait_for_instance_block_dev_mapping started...')
while waiting and (elapsed < timeout):
elapsed = time.time() - start
for instance in waiting:
instance.update()
for failed_state in ['terminated', 'stopped','stopping']:
if instance.state == failed_state:
failed.append(instance)
if instance in waiting:
waiting.remove(instance)
if instance.root_device_type == 'ebs':
if instance.block_device_mapping and instance.block_device_mapping.current_value:
self.logger.debug('Instance block device mapping is populated:'+str(instance.id))
self.update_resources_with_volumes_from_instance_block_device_mapping(instance)
good.append(instance)
else:
good.append(instance)
self.print_block_device_map(instance.block_device_mapping)
for instance in good:
if instance in waiting:
waiting.remove(instance)
if waiting:
if not int(elapsed)%10:
for instance in waiting:
self.logger.debug('Waiting for instance block device mapping to be populated:'+str(instance.id))
time.sleep(poll_interval)
failed.extend(waiting)
if failed:
err_buf = 'Instances failed to populate block dev mapping after '+str(elapsed)+'/'+str(timeout)+' seconds: '
for instance in failed:
err_buf += str(instance.id)+', current state:'+str(instance.state)+', '
raise Exception(err_buf)
self.logger.debug('wait_for_instance_block_dev_mapping done. elapsed:'+str(elapsed))
def update_resources_with_volumes_from_instance_block_device_mapping(self, instance):
for device_name in instance.block_device_mapping:
device = instance.block_device_mapping.get(device_name)
if device.volume_id:
try:
volume = self.get_volume(volume_id=device.volume_id)
if not volume in self.test_resources['volumes']:
self.test_resources['volumes'].append(volume)
except Exception, e:
tb = get_traceback()
self.logger.debug("\n" + str(tb) + "\nError trying to retrieve volume:" + str(device.volume_id) +
' from instance:' + str(instance.id) + " block dev map, err:" + str(e))
@printinfo
def monitor_euinstances_to_running(self,instances, poll_interval=10, timeout=480):
if not isinstance(instances, types.ListType):
instances = [instances]
self.logger.debug("("+str(len(instances))+") Monitor_instances_to_running starting...")
ip_err = ""
#Wait for instances to go to running state...
self.monitor_euinstances_to_state(instances, failstates=['stopped', 'terminated','shutting-down'],timeout=timeout)
#Wait for instances in list to get valid ips, check for duplicates, etc...
try:
self.wait_for_valid_ip(instances, timeout=timeout)
except Exception, e:
tb = get_traceback()
ip_err = str(tb) + "\nWARNING in wait_for_valid_ip: "+str(e)
self.logger.debug(ip_err)
#Now attempt to connect to instances if connect flag is set in the instance...
waiting = copy.copy(instances)
good = []
elapsed = 0
start = time.time()
self.logger.debug("Instances in running state and wait_for_valid_ip complete, attempting connections...")
while waiting and (elapsed < timeout):
self.logger.debug("Checking "+str(len(waiting))+" instance ssh connections...")
elapsed = int(time.time()-start)
for instance in waiting:
self.logger.debug('Checking instance:'+str(instance.id)+" ...")
if instance.auto_connect:
try:
if isinstance(instance, WinInstance):
#First try checking the RDP and WINRM ports for access...
self.logger.debug('Do Security group rules allow winrm from this test machine:'+
str(self.does_instance_sec_group_allow(instance, protocol='tcp', port=instance.winrm_port)))
self.logger.debug('Do Security group rules allow winrm from this test machine:'+
str(self.does_instance_sec_group_allow(instance, protocol='tcp', port=instance.rdp_port)))
instance.poll_for_ports_status(timeout=1)
instance.connect_to_instance(timeout=15)
self.logger.debug("Connected to instance:"+str(instance.id))
good.append(instance)
else:
#First try ping
self.logger.debug('Do Security group rules allow ping from this test machine:'+
str(self.does_instance_sec_group_allow(instance, protocol='icmp', port=0)))
self.ping(instance.ip_address, 2)
#now try to connect ssh or winrm
allow = "None"
try:
allow=str(self.does_instance_sec_group_allow(instance, protocol='tcp', port=22))
except:
pass
self.logger.debug('Do Security group rules allow ssh from this test machine:'+str(allow))
instance.connect_to_instance(timeout=15)
self.logger.debug("Connected to instance:"+str(instance.id))
good.append(instance)
except :
self.logger.debug(get_traceback())
pass
else:
good.append(instance)
for instance in good:
if instance in waiting:
waiting.remove(instance)
if waiting:
time.sleep(poll_interval)
if waiting:
buf = "Following Errors occurred while waiting for instances:\n"
buf += 'Errors while waiting for valid ip:'+ ip_err + "\n"
buf += "Timed out waiting:" + str(elapsed) + " to connect to the following instances:\n"
for instance in waiting:
buf += str(instance.id)+":"+str(instance.ip_address)+","
raise Exception(buf)
self.print_euinstance_list(good)
return good
@printinfo
def does_instance_sec_group_allow(self,
instance,
src_addr=None,
src_group=None,
protocol='tcp',
port=22):
if src_group:
assert isinstance(src_group,SecurityGroup) , \
'src_group({0}) not of type SecurityGroup obj'.format(src_group)
s = None
#self.logger.debug("does_instance_sec_group_allow:"+str(instance.id)+" src_addr:"+str(src_addr))
try:
if not src_group and not src_addr:
#Use the local test machine's addr
if not self.ec2_source_ip:
#Try to get the outgoing addr used to connect to this instance
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,socket.IPPROTO_UDP)
s.connect((instance.ip_address,1))
#set the tester's global source_ip, assuming it can re-used (at least until another method sets it to None again)
self.ec2_source_ip = s.getsockname()[0]
if self.ec2_source_ip == "0.0.0.0":
raise Exception('Test machine source ip detected:'+str(self.ec2_source_ip)+', tester may need ec2_source_ip set manually')
src_addr = self.ec2_source_ip
if src_addr:
self.logger.debug('Using src_addr:'+str(src_addr))
elif src_group:
self.logger.debug('Using src_addr:'+str(src_addr))
else:
raise ValueError('Was not able to find local src ip')
groups = self.get_instance_security_groups(instance)
for group in groups:
if self.does_sec_group_allow(group,
src_addr=src_addr,
src_group=src_group,
protocol=protocol,
port=port):
self.logger.debug("Sec allows from test source addr: " +
str(src_addr) + ", src_group:" +
str(src_group) + ", protocol:" +
str(protocol) + ", port:" + str(port))
#Security group allows from the src/proto/port
return True
#Security group does not allow from the src/proto/port
return False
except Exception, e:
self.logger.debug(get_traceback() + "\nError in sec group check")
raise e
finally:
if s:
s.close()
def get_security_group(self, id=None, name=None):
#Adding this as both a convienence to the user to separate euare groups from security groups
#Not sure if botos filter on group names and ids is reliable?
if not id and not name:
raise Exception('get_security_group needs either a name or an id')
groups = self.get_all_security_groups(groupnames=[name], group_ids=id)
for group in groups:
if not id or (id and group.id == id):
if not name or (name and group.name == name):
self.logger.debug('Found matching security group for name:'+str(name)+' and id:'+str(id))
return group
self.logger.debug('No matching security group found for name:'+str(name)+' and id:'+str(id))
return None
@printinfo
def does_sec_group_allow(self, group, src_addr=None, src_group=None,
protocol='tcp', port=22):
"""
Test whether a security group will allow traffic from a specific 'src' ip address to
a specific 'port' using a specific 'protocol'
:param group: Security group obj to use in lookup
:param src_addr: Source address to lookup against sec group rule(s)
:param src_group: Boto sec group to use in auth check
:param protocol: Protocol to lookup sec group rule against
:param port: Network port to lookup sec group rule against
"""
if src_group:
assert isinstance(src_group, SecurityGroup)
port = int(port)
protocol = str(protocol).strip().lower()
self.logger.debug('Security group:' + str(group.name) + ", src ip:" +
str(src_addr) + ", src_group:" + str(src_group) +
", proto:" + str(protocol) + ", port:" + str(port))
group = self.get_security_group(id=group.id, name=group.name)
for rule in group.rules:
g_buf =""
if str(rule.ip_protocol).strip().lower() == protocol:
for grant in rule.grants:
g_buf += str(grant)+","
self.logger.debug("rule#{0}: ports:{1}-{2}, grants:{3}"
.format(str(group.rules.index(rule)),
str(rule.from_port),
str(rule.to_port),
str(g_buf)))
from_port = int(rule.from_port)
to_port= int(rule.to_port)
if (to_port == 0 ) or (to_port == -1) or \
(port >= from_port and port <= to_port):
for grant in rule.grants:
if src_addr and grant.cidr_ip:
if self.is_address_in_network(src_addr, str(grant)):
self.logger.debug('sec_group DOES allow: group:"{0}"'
', src:"{1}", proto:"{2}", port:"{3}"'
.format(group.name,
src_addr,
protocol,
port))
return True
if src_group:
src_group_id = str(src_group.name) + \
"-" + (src_group.owner_id)
if ( src_group.id == grant.groupId ) or \
( grant.group_id == src_group_id ):
self.logger.debug('sec_group DOES allow: group:"{0}"'
', src_group:"{1}"/"{2}", '
'proto:"{3}", ''port:"{4}"'
.format(group.name,
src_group.id,
src_group.name,
protocol,
port))
return True
self.logger.debug('sec_group:"{0}" DOES NOT allow from: src_ip:"{1}", '
'src_group:"{2}", proto:"{3}", port:"{4}"'
.format(group.name, src_addr, src_group, protocol, port))
return False
@classmethod
@printinfo
def is_address_in_network(cls,ip_addr, network):
"""
:param ip_addr: Ip address ie: 192.168.1.5
:param network: Ip network in cidr notation ie: 192.168.1.0/24
:return: boolean true if ip is found to be in network/mask, else false
"""
ip_addr = str(ip_addr)
network = str(network)
# Check for 0.0.0.0/0 network first...
rem_zero = network.replace('0','')
if not re.search('\d', rem_zero):
return True
ipaddr = int(''.join([ '%02x' % int(x) for x in ip_addr.split('.') ]), 16)
netstr, bits = network.split('/')
netaddr = int(''.join([ '%02x' % int(x) for x in netstr.split('.') ]), 16)
mask = (0xffffffff << (32 - int(bits))) & 0xffffffff
return (ipaddr & mask) == (netaddr & mask)
def get_instance_security_groups(self,instance):
"""
Definition: Look up and return all security groups this instance is referencing.
:param instance: instance or euinstance object to
:return:
"""
secgroups = []
groups = []
if hasattr(instance, 'security_groups') and instance.security_groups:
return instance.security_groups
if hasattr(instance, 'groups') and instance.groups:
groups = instance.groups
else:
if hasattr(instance, 'reservation') and instance.reservation:
res = instance.reservation
else:
res = self.get_reservation_for_instance(instance)
groups = res.groups
for group in groups:
secgroups.extend(self.get_all_security_groups(
groupnames=[str(group.name)]))
return secgroups
def get_reservation_for_instance(self, instance):
"""
Definition: Lookup and return reservation obj for this instance
:param instance: boto instance or euinstance obj to use for lookup
:return: :raise:
"""
if hasattr(self.ec2, 'get_all_reservations'):
res = self.get_all_reservations(instance_ids=instance.id)
if res and isinstance(res, types.ListType):
return res[0]
for res in self.get_all_instances():
for inst in res.instances:
if inst.id == instance.id:
if hasattr(instance,'reservation'):
instance.reservation = res
return res
raise Exception('No reservation found for instance:'+str(instance.id))
@printinfo
def monitor_euinstances_to_state(self,
instance_list,
state='running',
min=None,
poll_interval=10,
failstates=[],
timeout=120,
eof=True):
"""
:param instance_list: list of instances to monitor
:param state: state to monitor to, expected state
:param min: int min count of instances that need to succeed otherwise except out
:param poll_interval: int number of seconds between polls for instance status
:param timeout: time to wait before this method is considered to have failed
:param eof: boolean to indicate whether or not to exit on first failure
:return list of instances
"""
self.logger.debug('(' + str(len(instance_list)) + ") monitor_instances_to_state: '" + str(state) + "' starting....")
monitor = copy.copy(instance_list)
for instance in monitor:
if not isinstance(instance, EuInstance) and not isinstance(instance, WinInstance):
instance = self.convert_instance_to_euisntance(instance, auto_connect=False)
good = []
failed = []
elapsed = 0
start = time.time()
failmsg = None
pollinterval = 10
failmsg = ""
#If no min allowed successful instance count is given, set it to the length of the list provdied.
if min is None:
min = len(instance_list)
while monitor and elapsed < timeout:
elapsed = int(time.time() - start)
self.logger.debug("\n------>Waiting for remaining "+str(len(monitor))+"/"+str(len(instance_list))+
" instances to go to state:"+str(state)+', elapsed:('+str(elapsed)+'/'+str(timeout)+")...")
for instance in monitor:
try:
instance.update()
bdm_root_vol_status = None
bdm_root_vol_id = None
if instance.root_device_type == 'ebs':
if not instance.bdm_root_vol:
try:
instance.bdm_root_vol = self.get_volume(volume_id = instance.block_device_mapping.get(instance.root_device_name).volume_id)
bdm_root_vol_id = instance.bdm_root_vol.id
bdm_root_vol_status = instance.bdm_root_vol.status
except: pass
else:
instance.bdm_root_vol.update()
bdm_root_vol_id = instance.bdm_root_vol.id
bdm_root_vol_status = instance.bdm_root_vol.status
if instance.laststate:
#fail fast on ebs backed instances that go into stopped stated unintentionally
if state != "stopped" and ( instance.laststate == 'pending' and instance.state == "stopped"):
raise Exception("Instance:"+str(instance.id)+" illegal state transition from "
+str(instance.laststate)+" to "+str(instance.state))
dbgmsg = ("Intended state:" + str(state)+": "+str(instance.id)+' Current state:'+str(instance.state)+', type:'+
str(instance.root_device_type) + ', backing volume:'+str(bdm_root_vol_id)+' status:'+
str(bdm_root_vol_status)+", elapsed:"+ str(elapsed)+"/"+str(timeout))
if instance.state == state:
self.logger.debug("SUCCESS "+ dbgmsg)
#This instance is in the correct state, remove from monitor list
good.append(instance)
else:
for failed_state in failstates:
if instance.state == failed_state:
raise Exception('FAILED STATE:'+ dbgmsg )
self.logger.debug("WAITING for "+dbgmsg)
except Exception, e:
failed.append(instance)
tb = get_traceback()
self.logger.debug('FAILED: Instance:'+str(instance.id)+",err:"+str(e)+"\n"+str(tb))
if eof:
self.logger.debug("EOF set to True, monitor_euinstances_to_state ending...")
raise e
if len(instance_list) - len(failed) > min:
self.logger.debug('Failed instances has exceeded allowed minimum('+str(min)+") monitor_euinstances_to_state ending...")
raise e
else:
failmsg += str(e)+"\n"
#remove good instances from list to monitor
for instance in monitor:
if (instance in good) or (instance in failed):
monitor.remove(instance)
if monitor:
time.sleep(poll_interval)
self.print_euinstance_list(instance_list)
if monitor:
failmsg = "Some instances did not go to state:"+str(state)+' within timeout:'+str(timeout)+"\nFailed:"
for instance in monitor:
failed.append(instance)
failmsg += str(instance.id)+","
if eof:
raise Exception(failmsg)
if len(instance_list) - len(failed) > min:
self.logger.debug('Failed instances has exceeded allowed minimum('+str(min)+") monitor_euinstances_to_state ending...")
raise Exception(failmsg)
else:
self.logger.debug(failmsg)
def print_euinstance_list(self,
euinstance_list=None,
state=None,
instance_id=None,
reservation=None,
root_device_type=None,
zone=None,
key=None,
public_ip=None,
private_ip=None,
ramdisk=None,
kernel=None,
image_id=None
):
"""
:param euinstance_list: list of euinstance objs
:raise:
"""
plist = []
if not euinstance_list:
euinstance_list = []
instances = self.get_instances(state=state,
idstring=instance_id,
reservation=reservation,
rootdevtype=root_device_type,
zone=zone,
key=key,
pubip=public_ip,
privip=private_ip,
ramdisk=ramdisk,
kernel=kernel,
image_id=image_id)
for instance in instances:
if instance:
instance_res = getattr(instance, 'reservation', None)
euinstance_list.append(self.convert_instance_to_euisntance(
instance, reservation=instance_res, auto_connect=False))
if not euinstance_list:
self.logger.debug('No instances to print')
return
for instance in euinstance_list:
if not isinstance(instance,EuInstance) and not isinstance(instance, WinInstance):
self.logger.debug("print instance list passed non-EuInstnace type")
instance = self.convert_instance_to_euisntance(instance, auto_connect=False)
plist.append(instance)
first = plist.pop(0)
buf = first.printself(title=True, footer=True)
for instance in plist:
buf += instance.printself(title=False, footer=True)
self.logger.debug("\n"+str(buf)+"\n")
@printinfo
def wait_for_valid_ip(self, instances, regex="0.0.0.0", poll_interval=10, timeout = 60):
"""
Wait for instance public DNS name to clear from regex
:param instances:
:param private_addressing: boolean for whether instance has private addressing enabled
:param poll_interval:
:param instance: instance object to check
:param timeout: Time in seconds to wait for IP to change
:return: True on success
:raise: Exception if IP stays at 0.0.0.0
"""
#self.logger.debug("wait_for_valid_ip: Monitoring instances for valid ips...")
if not isinstance(instances, types.ListType):
monitoring = [instances]
else:
monitoring = copy.copy(instances)
elapsed = 0
good = []
start = time.time()
zeros = re.compile(regex)
while monitoring and (elapsed <= timeout):
elapsed = int(time.time()- start)
for instance in monitoring:
instance.update()
if hasattr(instance, 'ip_address') and instance.ip_address and \
(zeros.search(str(instance.ip_address)) or zeros.search(str(instance.private_ip_address))):
# Either public or private ip was still not populated
self.logger.debug(str(instance.id)+": WAITING for public ip. Current:"+str(instance.ip_address)+
", elapsed:"+str(elapsed)+"/"+str(timeout))
else:
self.logger.debug(str(instance.id)+": FOUND public ip. Current:"+str(instance.ip_address)+
", elapsed:"+str(elapsed)+"/"+str(timeout))
good.append(instance)
#clean up list outside of loop
for instance in good:
if instance in monitoring:
monitoring.remove(instance)
if monitoring:
time.sleep(poll_interval)
if monitoring:
buf = "Instances timed out waiting for a valid IP, elapsed:"+str(elapsed)+"/"+str(timeout)+"\n"
for instance in instances:
buf += "Instance: "+str(instance.id)+", public ip: "+str(instance.ip_address)+", private ip: "+str(instance.private_ip_address)+"\n"
raise Exception(buf)
self.check_system_for_dup_ip(instances=good)
self.logger.debug('Wait_for_valid_ip done')
def check_system_for_dup_ip(self, instances=None):
"""
Check system for instances with conflicting duplicate IPs.
Will raise exception at end of iterating through all running, pending, or starting instances with info
as to which instances and IPs conflict.
If a list of instances is provided, all other conflicting IPS will be ignored and will only raise an exception
for conflicts with the provided instance 'inst'
:param instances: optional list, or subset of instances to use in duplicate search.
"""
errbuf = ""
publist = {}
privlist = {}
self.logger.debug('Check_system_for_dup_ip starting...')
reslist = self.get_all_instances()
for res in reslist:
self.logger.debug("Checking reservation: "+str(res.id))
for instance in res.instances:
self.logger.debug('Checking instance '+str(instance.id).ljust(20)+', state:'+str(instance.state).ljust(20)+
' pubip:'+str(instance.ip_address).ljust(20)+
' privip:'+str(instance.private_ip_address).ljust(20))
if instance.state == 'running' or instance.state == 'pending' or instance.state == 'starting':
if instance.ip_address != '0.0.0.0':
if instance.ip_address in publist:
errbuf += "PUBLIC:"+str(instance.id)+"/"+str(instance.state)+"="+\
str(instance.ip_address)+" vs: "+\
str(publist[instance.ip_address])+"\n"
if instances and (instance in instances):
raise Exception("PUBLIC:"+str(instance.id)+"/"+str(instance.state)+"="+
str(instance.ip_address)+" vs: "+
str(publist[instance.ip_address]))
else:
publist[instance.ip_address] = str(instance.id+"/"+instance.state)
if instance.private_ip_address != '0.0.0.0':
if instance.private_ip_address in privlist:
errbuf += "PRIVATE:"+str(instance.id)+"/"+str(instance.state)+"="+\
str(instance.private_ip_address)+" vs: "+\
str(privlist[instance.private_ip_address])+"\n"
if instances and (instance in instances):
raise Exception("PRIVATE:"+str(instance.id)+"/"+str(instance.state)+"="+
str(instance.private_ip_address)+" vs: "+
str(privlist[instance.private_ip_address]))
else:
privlist[instance.private_ip_address] = str(instance.id+"/"+instance.state)
if not instances and errbuf:
raise Exception("DUPLICATE IPs FOUND:"+errbuf)
self.logger.debug("Done with check_system_for_dup_ip")
def convert_reservation_to_euinstance(self,
reservation,
username=None,
password=None,
keyname=None,
private_addressing=False,
timeout=60):
"""
Convert all instances in an entire reservation into eutester.euinstance.Euinstance objects.
:param reservation: reservation object to use in conversion
:param username: SSH user name of instance
:param password: SSH password
:param keyname: Private key file to use when connecting to the instance
:param timeout: Time in seconds to wait for successful SSH connection
:return:
"""
euinstance_list = []
keypair = None
if keyname is not None:
keypair = self.get_keypair(keyname)
auto_connect = True
if private_addressing:
auto_connect = False
for instance in reservation.instances:
if keypair is not None or password is not None:
try:
euinstance_list.append(
self.convert_instance_to_euisntance(instance,
keypair=keypair,
username = username,
password=password,
timeout=timeout,
auto_connect=auto_connect))
except Exception, e:
self.logger.debug(get_traceback())
euinstance_list.append(instance)
self.get_console_output(instance)
self.fail("Unable to create Euinstance from " + str(instance)+": "+str(e))
else:
euinstance_list.append(instance)
reservation.instances = euinstance_list
return reservation
def convert_instance_to_euisntance(self, instance, keypair=None,
username=None, password=None,
reservation=None, auto_connect=True,
timeout=120):
if instance.platform == 'windows':
username = username or 'Administrator'
instance = WinInstance.make_euinstance_from_instance(
instance,
self,
keypair=keypair,
username = username,
password=password,
reservation=reservation,
auto_connect=auto_connect,
timeout=timeout)
else:
username = username or 'root'
instance = EuInstance.make_euinstance_from_instance(
instance,
self,
keypair=keypair,
username = username,
password=password,
reservation=reservation,
auto_connect=auto_connect,
timeout=timeout)
if 'instances' in self.test_resources:
for x in xrange(0, len(self.test_resources['instances'])):
ins = self.test_resources['instances'][x] == instance.id
if ins.id == instance.id:
self.test_resources['instances'][x] = instance
return instance
def get_console_output(self, instance):
"""
Retrieve console output from an instance
:param instance: Instance ID or Instance object
:return: string
:raise: Exception on failure to get console output
"""
self.logger.debug("Attempting to get console output from: " + str(instance))
if isinstance(instance, Instance):
instance = instance.id
output = self.get_console_output(instance_id=instance)
self.logger.debug(output.output)
return output
def get_keypair(self, name):
"""
Retrieve a boto.ec2.keypair object by its name
:param name: Name of keypair on the cloud
:return: boto.ec2.keypair object
:raise: Exception on failure to find keypair
"""
try:
return self.get_all_key_pairs([name])[0]
except IndexError, e:
raise Exception("Keypair: " + name + " not found")
def get_zones(self):
"""
Return a list of availability zone names.
:return: list of zone names
"""
zone_objects = self.get_all_zones()
zone_names = []
for zone in zone_objects:
zone_names.append(zone.name)
return zone_names
@printinfo
def get_instances(self,
state=None,
idstring=None,
reservation=None,
rootdevtype=None,
zone=None,
key=None,
pubip=None,
privip=None,
ramdisk=None,
kernel=None,
image_id=None,
filters=None):
"""
Return a list of instances matching the filters provided.
:param state: str of desired state
:param idstring: instance-id string
:param reservation: reservation-id
:param rootdevtype: 'instance-store' or 'ebs'
:param zone: Availablity zone
:param key: Keypair the instance was launched with
:param pubip: Instance public IP
:param privip: Instance private IP
:param ramdisk: Ramdisk ID string
:param kernel: Kernel ID string
:param image_id: Image ID string
:param filters: dict filters
:return: list of instances
"""
ilist = []
if isinstance(idstring, list):
instance_ids = idstring
elif idstring:
instance_ids = str(idstring)
else:
instance_ids = idstring
reservations = self.get_all_instances(instance_ids=instance_ids, filters=filters)
for res in reservations:
if ( reservation is None ) or (re.search(str(reservation), str(res.id))):
for i in res.instances:
#if (idstring is not None) and (not re.search(idstring, i.id)) :
# continue
if (state is not None) and (i.state != state):
continue
if (rootdevtype is not None) and (i.root_device_type != rootdevtype):
continue
if (zone is not None) and (i.placement != zone ):
continue
if (key is not None) and (i.key_name != key):
continue
if (pubip is not None) and (i.ip_address != pubip):
continue
if (privip is not None) and (i.private_ip_address != privip):
continue
if (ramdisk is not None) and (i.ramdisk != ramdisk):
continue
if (kernel is not None) and (i.kernel != kernel):
continue
if (image_id is not None) and (i.image_id != image_id):
continue
i.reservation = res
ilist.append(i)
return ilist
def get_connectable_euinstances(self,path=None,username=None, password=None, connect=True):
"""
Convenience method, returns a list of all running instances, for the current creduser
for which there are local keys at 'path'
:param path: Path to look for private keys
:param username: username to use if path is not pfassed
:param password: password to use if path is not passed
:param connect: bool, Whether to create an ssh connection to the instances
:return:
"""
try:
euinstances = []
keys = self.get_all_current_local_keys(path=path) or []
for keypair in keys:
self.logger.debug('Looking for instances using keypair:'+keypair.name)
instances = self.get_instances(state='running',key=keypair.name) or []
for instance in instances:
if not connect:
keypair=None
euinstances.append(instance)
else:
euinstances.append(
self.convert_instance_to_euisntance(instance,
username=username,
password=password,
keypair=keypair ))
return euinstances
except Exception, e:
traceback.print_exc()
self.logger.debug("Failed to find a pre-existing instance we can connect to:"+str(e))
pass
def get_all_attributes(self, obj, verbose=True):
"""
Get a formatted list of all the key pair values pertaining to the object 'obj'
:param obj: Object to extract information from
:param verbose: Print key value pairs
:return: Buffer of key value pairs
"""
buf=""
alist = sorted(obj.__dict__)
for item in alist:
if verbose:
print str(item)+" = "+str(obj.__dict__[item])
buf += str(item)+" = "+str(obj.__dict__[item])+"\n"
return buf
def terminate_instances(self, reservation=None, timeout=480):
"""
Terminate instances in the system
:param reservation: Reservation object to terminate all instances in, default is to terminate all instances
:raise: Exception when instance does not reach terminated state
"""
### If a reservation is not passed then kill all instances
aggregate_result = False
instance_list = []
monitor_list = []
if reservation and not isinstance(reservation, types.ListType):
if isinstance(reservation, Reservation):
instance_list = reservation.instances or []
elif isinstance(reservation, Instance):
instance_list.append(reservation)
else:
raise Exception('Unknown type:' + str(type(reservation)) + ', for reservation passed to terminate_instances')
else:
if reservation is None:
reservation = self.get_all_instances()
#first send terminate for all instances
for res in reservation:
if isinstance(res, Reservation):
instance_list.extend(res.instances)
elif isinstance(res, Instance):
instance_list.append(res)
else:
raise Exception('Need type instance or reservation in terminate_instances. type:' + str(type(res)))
for instance in instance_list:
self.logger.debug( "Sending terminate for " + str(instance))
try:
instance.terminate()
instance.update()
if instance.state != 'terminated':
monitor_list.append(instance)
else:
self.logger.debug('Instance: ' + str(instance.id) + ' in terminated state:' + str(instance.state))
except EC2ResponseError, e:
if e.status == 400:
pass
else:
raise e
try:
self.print_euinstance_list(euinstance_list=monitor_list)
except:
pass
try:
self.monitor_euinstances_to_state(instance_list=monitor_list, state='terminated', timeout=timeout)
aggregate_result = True
except Exception, e:
tb = traceback.format_exc()
self.logger.debug(str(tb) + '\nCaught Exception in monitoring instances to terminated state:' + str(e))
return aggregate_result
def stop_instances(self,reservation, timeout=480):
"""
Stop all instances in a reservation
:param reservation: boto.ec2.reservation object
:raise: Exception when instance does not reach stopped state
"""
instance_list = reservation
if isinstance(reservation, Reservation):
instance_list = reservation.instances
for instance in instance_list:
self.logger.debug( "Sending stop for " + str(instance) )
instance.stop()
if self.wait_for_reservation(reservation, state="stopped", timeout=timeout) is False:
return False
return True
def start_instances(self, reservation, timeout=480):
"""
Start all instances in a reservation
:param reservation: boto.ec2.reservation object or list of instances
:raise: Exception when instance does not reach running state
"""
instance_list = reservation
if isinstance(reservation, Reservation):
instance_list = reservation.instances
for instance in instance_list:
self.logger.debug( "Sending start for " + str(instance) )
instance.start()
if self.wait_for_reservation(reservation, state="running", timeout=timeout) is False:
return False
return True
def start_bundle_instance_task( self,
instance,
bucket_name = None,
prefix = None,
):
"""
REQUIRED PARAMETERS
:rtype : BundleInstanceTask
:param instance: boto instance to bundle
:param bucket_name: Name of the bucket to upload. Default='win+ramdom'
:param prefix: The prefix for the image file name:Default='windows-bun + emi + random.'
:param access_key: String, Access Key ID of the owner of the bucket
:param secret_key: String, Secret key used to sign the upload policy
:return : bundle task object
"""
if not bucket_name:
# Count images already registered with this instance id for concurrent tests
try:
id_count = len(self.get_images(location=instance.id))
except:
id_count = 0
bucket_name = str(instance.id) + "-" \
+ str(id_count)
prefix = prefix or 'bundleof-' + str(instance.id)
s3_upload_policy = self.generate_default_s3_upload_policy(bucket_name,prefix)
bundle_task = self.bundle_instance(instance.id, bucket_name, prefix, s3_upload_policy)
self.print_bundle_task(bundle_task)
return bundle_task
def print_bundle_task(self,bundle, header=True, footer=True, printout=True):
"""
Prints formatted output of bundle task attributes.
:param bundle: BundleInstanceTask object to be printed
:param header: boolean to print header containing column titles
:param footer: boolean to print footer containing closing row line
:param printout: boolean to print output using self.logger.debug, else will return a buffer to be printed later.
:return: string containing formatted output.
"""
id_len = 15
instance_id_len = 12
bucket_len = 36
prefix_len = 36
state_len = 15
start_time_len = 25
update_time_len = 25
buf = ""
line = "-----------------------------------------------------------------------------------------------------" \
"--------------------------------------------------------------"
if header:
buf += str("\n" + line +"\n")
buf += str('BUNDLE_ID').center(id_len) + '|' \
+ str('INSTANCE').center(instance_id_len) + '|' \
+ str('BUCKET').center(bucket_len) + '|' \
+ str('PREFIX').center(prefix_len) + '|' \
+ str('STATE').center(state_len) + '|' \
+ str('START_TIME').center(start_time_len) + '|' \
+ str('UPDATE_TIME').center(update_time_len) + '\n'
buf += str(line + "\n")
buf += str(bundle.id).center(id_len) + '|' \
+ str(bundle.instance_id).center(instance_id_len) + '|' \
+ str(bundle.bucket).center(bucket_len) + '|' \
+ str(bundle.prefix).center(prefix_len) + '|' \
+ str(bundle.state).center(state_len) + '|' \
+ str(bundle.start_time).center(start_time_len) + '|' \
+ str(bundle.update_time).center(update_time_len)
if footer:
buf += str("\n" + line)
if printout:
self.logger.debug(buf)
return buf
def bundle_instance_monitor_and_register(self,
instance,
bucket_name=None,
prefix=None,
poll_interval_seconds=20,
timeout_minutes=25):
"""
Attempts to start a bundle task and monitor it to completion.
:param instance: boto instance to bundle
:param bucket_name: Name of the bucket to upload. Default='win+ramdom'
:param prefix: The prefix for the image file name:Default='windows-bun + emi + random.'
:param access_key: String, Access Key ID of the owner of the bucket
:param secret_key: String, Secret key used to sign the upload policy
:param poll_interval_seconds: Seconds to wait between polling for bundle task status
:param timeout_minutes: int, minutes to wait before timing out.
:return : image
"""
return_dict = {}
return_dict['manifest'] = None
return_dict['image'] = None
bundle_task = self.start_bundle_instance_task(instance,
bucket_name=bucket_name,
prefix=prefix,
)
self.logger.debug("bundle_instance_monitor_and_register: Got bundle task id:" +str(bundle_task.id)
+ ", now monitor to completed state")
self.monitor_bundle_tasks(bundle_task.id,
poll_interval_seconds=poll_interval_seconds,
timeout_minutes=timeout_minutes)
self.logger.debug("bundle_instance_monitor_and_register:" + str(bundle_task.id)
+ " monitored to completed, now get manifest and register...")
manifest = self.get_manifest_string_from_bundle_task(bundle_task)
image = self.register_manifest(manifest, virtualization_type=instance.virtualization_type)
self.logger.debug("bundle_instance_monitor_and_register:" + str(bundle_task.id)
+ ", registered as image:" + str(image.id))
self.logger.debug("bundle_instance_monitor_and_register:" + str(bundle_task.id)
+ ", now make sure original instance " + (instance.id) + " returns to running state...")
self.monitor_euinstances_to_state(instance_list=[instance],
state='running',
timeout=600)
return image
def get_bundle_task_by_id(self,bundle_task_id):
bundles = self.get_all_bundle_tasks(bundle_ids=[bundle_task_id])
if bundles:
return bundles[0]
def get_manifest_string_from_bundle_task(self,bundle):
"""
Create a manifest string from a BundleInstanceTask obj
:param bundle: BundleInstanceTask
:return: manifest string
"""
return str(bundle.bucket) + "/" + str(bundle.prefix) + ".manifest.xml"
def monitor_bundle_tasks(self, bundle_list, poll_interval_seconds=20, timeout_minutes=25, eof=True):
"""
Attempts to monitor the state of the bundle task id provided until completed or failed.
:param bundle_id: string bundle id to poll status for
:param poll_interval_seconds: sleep period in seconds between polling for bundle task status
:param timeout_minutes: timeout specified in minutes
:param eof: boolean, end on first failure otherwise delay error until all bundle tasks have completed or failed
"""
monitor_list = []
fail_msg = ""
if not isinstance(bundle_list, types.ListType):
bundle_list = [bundle_list]
for bundle in bundle_list:
if isinstance(bundle, BundleInstanceTask ):
monitor_list.append(bundle.id)
else:
monitor_list.append(bundle)
start = time.time()
elapsed = 0
timeout = timeout_minutes * 60
while monitor_list and elapsed < timeout:
for bundle_id in monitor_list:
self.logger.debug('Waiting for bundle task:' + str(bundle_id) + ' to finish. Elapsed:' + str(elapsed))
try:
bundle_task = self.get_bundle_task_by_id(bundle_id)
if bundle_task:
self.print_bundle_task(bundle_task)
else:
self.logger.debug(str(bundle_id) + ": Assuming bundle task is complete, fetch came back empty?")
monitor_list.remove(bundle_id)
if bundle_task.state is None or bundle_task.state == 'none':
raise Exception(str(bundle_id) + ": Bundle task state err, state is: '"
+ str(bundle_task.state) + "' in monitor")
if bundle_task.state == 'failed':
raise Exception(str(bundle_id) + ": Bundle task reporting failed state during monitor")
if bundle_task.state == 'complete':
self.logger.debug(str(bundle_id) +": Bundle task reported state is completed during monitor")
monitor_list.remove(bundle_id)
except Exception, e:
fail_msg += 'Monitor_bundle_task ERROR: '+str(e) + "\n"
if eof:
raise Exception('Monitor_bundle_task ERROR: '+str(e))
else:
monitor_list.remove(bundle_id)
time.sleep(poll_interval_seconds)
elapsed = int(time.time()-start)
if fail_msg:
raise Exception(fail_msg)
return bundle_list
def register_manifest(self,
manifest,
root_device_name=None,
description=None,
architecture=None,
virtualization_type=None,
platform=None,
bdmdev=None,
name=None,
ramdisk=None,
kernel=None):
"""
Attempts to register the provided manifest and return the image id created by it
:param manifest: manifest string to register
:return: : image id string
"""
image = self.register_image(manifest,
root_device_name=root_device_name,
description=description,
architecture=architecture,
virtualization_type=virtualization_type,
platform=platform,
bdmdev=bdmdev,
name=name,
ramdisk=ramdisk,
kernel=kernel)
#check to see if really registered by getting image obj to be returned
try:
image_obj = self.get_emi(emi=image)
except Exception, e:
raise Exception('Failed to retrieve image after registering. Image:' + str(image) + ", err:" + str(e))
self.logger.debug("Registered '" + str(manifest) + "as image:" + str(image))
return image_obj
def _register_image_custom_params(self,
name=None,
description=None,
image_location=None,
architecture=None,
kernel_id=None,
ramdisk_id=None,
root_device_name=None,
block_device_map=None,
dry_run=False,
virtualization_type=None,
sriov_net_support=None,
snapshot_id=None,
platform=None,
**custom_params):
'''
Register method to allow testing of 'custom_params' dict if provided
'''
params = custom_params or {}
if name:
params['Name'] = name
if description:
params['Description'] = description
if architecture:
params['Architecture'] = architecture
if kernel_id:
params['KernelId'] = kernel_id
if ramdisk_id:
params['RamdiskId'] = ramdisk_id
if image_location:
params['ImageLocation'] = image_location
if platform:
params['Platform'] = platform
if root_device_name:
params['RootDeviceName'] = root_device_name
if snapshot_id:
root_vol = BlockDeviceType(snapshot_id=snapshot_id)
block_device_map = BlockDeviceMapping()
block_device_map[root_device_name] = root_vol
if block_device_map:
block_device_map.ec2_build_list_params(params)
if dry_run:
params['DryRun'] = 'true'
if virtualization_type:
params['VirtualizationType'] = virtualization_type
if sriov_net_support:
params['SriovNetSupport'] = sriov_net_support
rs = self.get_object('RegisterImage', params,
ResultSet, verb='POST')
image_id = getattr(rs, 'imageId', None)
return image_id
def create_image(self, instance, name, description=None, no_reboot=False, block_device_mapping=None, dry_run=False,
timeout=600):
"""
:type instance_id: string
:param instance_id: the ID of the instance to image.
:type name: string
:param name: The name of the new image
:type description: string
:param description: An optional human-readable string describing
the contents and purpose of the AMI.
:type no_reboot: bool
:param no_reboot: An optional flag indicating that the
bundling process should not attempt to shutdown the
instance before bundling. If this flag is True, the
responsibility of maintaining file system integrity is
left to the owner of the instance.
:type block_device_mapping: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_mapping: A BlockDeviceMapping data structure
describing the EBS volumes associated with the Image.
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:type timeout: int
:param timeout: Time to allow image to get to "available" state.
:raise Exception: On not reaching the correct state or when more than one image is returned
"""
if isinstance(instance, Instance):
instance_id = instance.id
else:
instance_id = instance
image_id = self.create_image(instance_id, name=name,description=description,no_reboot=no_reboot,
block_device_mapping=block_device_mapping, dry_run=dry_run)
def get_emi_state():
images = self.get_all_images(image_ids=[image_id])
if len(images) == 0:
raise Exception("Image not found after sending create image request: " + image_id)
elif len(images) == 1:
state = images[0].state
self.logger.debug( image_id + " returned state: " + state)
return state
else:
raise Exception("More than one image returned for: " + image_id)
self.wait_for_result(get_emi_state, "available", timeout=timeout,poll_wait=20)
return image_id
def get_all_conversion_tasks(self, taskid=None):
params = {}
if taskid:
params['ConversionTaskId'] = str(taskid)
return self.get_list('DescribeConversionTasks',
params,
[('item', ConversionTask),
('euca:item', ConversionTask)],
verb='POST')
def get_conversion_task(self, taskid):
params = {'ConversionTaskId':str(taskid)}
task = self.get_object('DescribeConversionTasks',
params,
ConversionTask,
verb='POST')
if not task:
raise ResourceNotFoundException('"{0}". Conversion task not found'
.format(taskid))
return task
def monitor_conversion_tasks(self,
tasks,
states='completed',
time_per_gig=90,
base_timeout=600,
interval=10,
exit_on_failure=False):
"""
Monitors a list a task or list of tasks. Will monitor each
task's state to the provided 'state', or until failure, timeout.
Note: timeout is calculated by size of the largest task multiplied by
'time_per_gig' added to the 'base_timeout'.
:param tasks: list of tasks.
:param state: string representing desired state to monitor to.
(pending, active, cancelled, completed, failed)
:param time_per_gig: time in seconds per largest task size in GB
to append to base timeout
:param base_timeout: base timeout in seconds
:param interval: seconds between polling tasks
:param exit_on_failure: Will stop monitoring and raise an exception
upon first found failure. Otherwise will
continue monitoring remaining tasks in list
and raise the error when all tasks are
complete or timed out.
"""
err_buf = ""
monitor_list = []
if not isinstance(states, types.ListType):
states = [states]
#Sanitize provided list...
if not isinstance(tasks, types.ListType):
tasks = [tasks]
for task in tasks:
if (not isinstance(task,ConversionTask) and
isinstance(task,types.StringType)):
task = self.get_conversion_task(taskid=task)
monitor_list.append(task)
checking_list = copy.copy(monitor_list)
done_list = []
start = time.time()
elapsed = 0
timeout = 0
for task in checking_list:
for im in task.importvolumes:
task_timeout = int(im.volume_size) * int(time_per_gig)
if task_timeout > timeout:
timeout = task_timeout
timeout += base_timeout
while checking_list and elapsed < timeout:
for task in checking_list:
task.update()
self.logger.debug(task)
#If the task volume is present add it to the resources list.
found = False
for vol in task.volumes:
for resvol in self.test_resources['volumes']:
if resvol.id == vol.id:
found = True
break
if not found and not vol in self.test_resources['volumes']:
self.test_resources['volumes'].append(vol)
found = False
if task.instanceid:
for resins in self.test_resources['reservations']:
if resins.id == task.instanceid:
found = True
break
if resins.id == task.instance.reservation.id:
found = True
break
if not found:
ins = self.get_instances(idstring=task.instanceid)
if ins:
ins = ins[0]
if not ins in self.test_resources['reservations']:
self.test_resources['reservations'].append(ins)
#notfound flag is set if task is not found during update()
if task.notfound:
err_msg = 'Task "{0}" not found after elapsed:"{1}"'\
.format(task.conversiontaskid, elapsed)
err_buf += "\n" + err_msg
self.logger.debug(err_msg)
done_list.append(task)
continue
self.logger.debug('Monitoring task:"{0}:{1}", elapsed:'
'"{2}/{3}"'
.format(task.conversiontaskid,
task.state,
elapsed,
timeout))
task_state = task.state.lower()
in_state = False
#Check state of task against all desired states provided
for state in states:
if task_state == state:
in_state = True
break
if in_state:
self.logger.debug('Task:"{0}" found in desired state:"{1}"'.
format(task.conversiontaskid, task.state))
done_list.append(task)
continue
# Fail fast for tasks found a final state that doesnt match
# the desired state provided
for final_state in ["completed", "cancelled", "failed"]:
if re.search(final_state, task_state):
err_msg = ('Task "{0}" found in a final state:"{1}" '
'after elapsed:"{2}", msg:"{3}"'
.format(task.conversiontaskid,
task.state,
elapsed,
task.statusmessage))
err_buf += "\n" + err_msg
self.logger.debug(err_msg)
done_list.append(task)
continue
try:
self.print_conversion_task_list(clist=monitor_list)
except Exception as PE:
self.logger.debug('failed to print conversion task list, err:' +
str(PE))
if exit_on_failure and err_buf:
break
for done_task in done_list:
if done_task in checking_list:
checking_list.remove(done_task)
if checking_list:
self.logger.debug('Waiting for "{0}" remaining tasks to reach '
'desired state:"{1}". Sleeping:"{2}"'
.format(len(checking_list), state, interval))
time.sleep(interval)
elapsed = int(time.time() - start)
self.print_conversion_task_list(clist=tasks)
#Any tasks still in checking_list are failures
for task in checking_list:
err_buf += ('Monitor complete. Task "{0}:{1}" not in desired '
'state "{2}" after elapsed:"{3}"\n'
.format(task.conversiontaskid,
task.state,
state,
elapsed))
if err_buf:
err_buf = "Exit on first failure set to:" + str(exit_on_failure) \
+ "\n" + err_buf
raise Exception('Monitor conversion tasks failures detected:\n'
+ str(err_buf))
def print_conversion_task_list(self,
clist=None,
doprint=True,
printmethod=None):
clist = clist or self.get_all_conversion_tasks()
printmethod = printmethod or self.logger.debug
taskidlen = 19
statusmsglen = 24
availzonelen=14
volumelen=16
snaplen=13
instancelen=13
imagelen=13
header = ('TASKID'.center(taskidlen) + " | " +
'SNAPSHOTS'.center(snaplen) + " | " +
'INSTANCE'.center(instancelen) + " | " +
'IMAGE ID'.center(imagelen) + " | " +
'ZONE'.center(availzonelen) + " | " +
'VOLUMES'.center(volumelen) + " | " +
'STATUS MSG'.center(statusmsglen) + " |\n" )
line = ""
for x in xrange(0, len(header)):
line += '-'
line += "\n"
buf = "\n" + line + header + line
for task in clist:
sizestr = None
instancestr = "???"
instancestatus = ""
imagesize = None
vollist = []
volbytes = []
for importvol in task.importvolumes:
bytesconverted = importvol.bytesconverted
volume_id = importvol.volume_id
if importvol.image:
imagesize = long(importvol.image.size)
if imagesize is not None:
sizegb = "%.3f" % float(
long(imagesize) / float(1073741824))
gbconverted = "%.3f" % float(
long(bytesconverted) / float(1073741824))
sizestr = ("{0}/{1}gb".format(gbconverted, sizegb))
vollist.append(str(volume_id))
volbytes.append(sizestr)
volumes = ",".join(vollist)
volbytescon = ",".join(volbytes)
volstatus = ",".join([str('(' + str(vol.status) + ':' +
str(vol.size) + ')')
for vol in task.volumes]) or "???"
snaps = ",".join([str(snap.id ) for snap in task.snapshots]) or \
"???"
snapstatus = ",".join([str('(' + snap.status + ')')
for snap in task.snapshots])
if task.instance:
instancestr = str(task.instance.id)
instancestatus = '(' + str(task.instance.state) + ')'
image_id = task.image_id or "???"
buf += (str(task.conversiontaskid).center(taskidlen) + " | " +
str(snaps).center(snaplen) + " | " +
str(instancestr).center(instancelen) + " | " +
str(image_id ).center(imagelen) + " | " +
str(task.availabilityzone).center(availzonelen) + " | " +
str(volumes).center(volumelen) + " | " +
str(task.statusmessage[:statusmsglen]).ljust(statusmsglen)
+ " |\n")
buf += (str('(' + task.state + ')').center(taskidlen) + " | " +
str(snapstatus).center(snaplen) + " | " +
str(instancestatus).center(instancelen) + " | " +
str('').center(imagelen) + " | " +
str('').center(availzonelen) + " | " +
str(volstatus).center(volumelen) + " | " +
str(task.statusmessage[
statusmsglen:(2*statusmsglen)]).ljust(statusmsglen)
+ " |\n")
buf += (str('').center(taskidlen) + " | " +
str('').center(snaplen) + " | " +
str('').center(instancelen) + " | " +
str('').center(imagelen) + " | " +
str('').center(availzonelen) + " | " +
str(volbytescon).center(volumelen) + " | " +
str(task.statusmessage[
(2*statusmsglen):(3*statusmsglen)]).ljust(statusmsglen)
+ " |\n")
buf += line
if doprint:
printmethod(buf)
return buf
def cancel_conversion_tasks(self, tasks, timeout=180):
tasks = tasks or self.test_resources['conversion_tasks']
if not isinstance(tasks, types.ListType):
tasks = [tasks]
printbuf = self.print_conversion_task_list(clist=tasks, doprint=False)
self.logger.debug('Cancel Conversion task list...\n' + str(printbuf))
cancel_tasks = copy.copy(tasks)
for task in tasks:
task.update()
for state in ['canceled', 'failed', 'completed']:
if task.state == state:
cancel_tasks.remove(task)
break
for task in cancel_tasks:
task.cancel()
self.monitor_conversion_tasks(tasks=cancel_tasks, states=['canceled'])
printbuf = self.print_conversion_task_list(clist=tasks, doprint=False)
self.logger.debug('Done with canceling_conversion_tasks...' + str(printbuf))
def cleanup_conversion_task_resources(self, tasks=None):
tasks = tasks or self.test_resources['conversion_tasks']
if not isinstance(tasks, types.ListType):
tasks = [tasks]
error_msg = ""
try:
self.cancel_conversion_tasks(tasks)
except Exception as CE:
tb = get_traceback()
self.critical('Failed to cancel some tasks:' + str(CE))
for task in tasks:
self.logger.debug('Attempting to delete all resources associated '
'with task: "{0}"'
.format(getattr(task, 'id', 'UNKOWN_ID')))
try:
assert isinstance(task,ConversionTask)
task.update()
try:
if task.instance:
self.terminate_single_instance(task.instance)
except Exception, e:
tb = get_traceback()
error_msg += str(tb) + '\n"{0}":Cleanup_error:"{1}"\n'\
.format(task.conversiontaskid, str(e))
try:
if task.image_id:
image = self.get_images(emi=task.image_id)
if image:
self.delete_image(image=image)
except Exception, e:
tb = get_traceback()
error_msg += str(tb) + '\n"{0}":Cleanup_error:"{1}"\n'\
.format(task.conversiontaskid, str(e))
try:
if task.snapshots:
self.delete_snapshots(snapshots=task.snapshots)
except Exception, e:
tb = get_traceback()
error_msg += str(tb) + '\n"{0}":Cleanup_error:"{1}"\n'\
.format(task.conversiontaskid, str(e))
try:
if task.volumes:
self.delete_volumes(volume_list=task.volumes)
except Exception, e:
tb = get_traceback()
error_msg += str(tb) + '\n"{0}":Cleanup_error:"{1}"\n'\
.format(task.conversiontaskid, str(e))
except Exception as TE:
tb = get_traceback()
error_msg += '{0}\n"{1}" Failed to cleanup task, err:"{1}"'\
.format(str(tb), getattr(task, 'id', 'UNKOWN_ID'), str(TE))
if error_msg:
raise Exception(error_msg)
def create_web_servers(self, keypair, group, zone, port=80, count=2, image=None, filename="test-file", cookiename="test-cookie"):
if not image:
image = self.get_emi(root_device_type="instance-store", not_location="loadbalancer", not_platform="windows")
reservation = self.run_instance(image, keypair=keypair, group=group, zone=zone, min=count, max=count)
self.authorize_group(group=group,port=port)
### TODO edit this so that the proper port is open on the apache instance
for instance in reservation.instances:
assert isinstance(instance, EuInstance)
try:
instance.sys("which apt-get", code=0)
## Debian based Linux
instance.sys("apt-get update", code=0)
instance.sys("apt-get install -y apache2", code=0)
instance.sys("echo \"" + instance.id +"\" > /var/www/" + filename)
instance.sys("echo \"CookieTracking on\" >> /etc/apache2/apache2.conf")
instance.sys("echo CookieName " + cookiename +" >> /etc/apache2/apache2.conf")
except sshconnection.CommandExitCodeException, e:
### Enterprise Linux
instance.sys("yum install -y httpd", code=0)
instance.sys("echo \"" + instance.id +"\" > /var/www/html/" + filename)
instance.sys("echo \"CookieTracking on\" >> /etc/httpd/conf/httpd.conf")
instance.sys("echo CookieName " + cookiename +" >> /etc/httpd/conf/httpd.conf")
instance.sys("service httpd start")
instance.sys("chkconfig httpd on")
return (reservation, filename)
def generate_default_s3_upload_policy(self, bucket, prefix, expiration=24, acl='ec2-bundle-read', encode=True):
"""
Generates s3 upload policy for bundle instance operation
:param bucket: bucket string
:param prefix: prefix string
:param expiration: int representing hours
:param acl: acl to be used
:return: s3 upload encoded policy
"""
delta = timedelta(hours=expiration)
expiration_time = (datetime.utcnow() + delta).replace(microsecond=0)
expiration_str = expiration_time.isoformat()
policy = '{"expiration": "%s",' % expiration_str + \
'"conditions": [' + \
'{"bucket": "%s" },' % bucket + \
'{"acl": "%s" },' % acl + \
'["starts-with", "$key", "%s"]' % prefix + \
']' + \
'}'
if encode:
policy = base64.b64encode(policy)
return policy
def sign_policy(self, policy):
my_hmac = hmac.new(self.aws_secret_access_key, policy, digestmod=hashlib.sha1)
return base64.b64encode(my_hmac.digest())
def get_euzones(self, zones=None):
ret_list = []
if zones is not None and not isinstance(zones, types.ListType):
get_zones = [zones]
else:
get_zones = zones
myzones = self.get_all_zones(zones=get_zones)
for zone in myzones:
ret_list.append(EuZone.make_euzone_from_zone(zone, self))
return ret_list
def get_vm_type_list_from_zone(self, zone):
self.logger.debug('Looking up zone:' + str(zone))
euzone = self.get_euzones(zone)[0]
return euzone.vm_types
def get_vm_type_from_zone(self,zone, vmtype_name):
vm_type = None
type_list = self.get_vm_type_list_from_zone(zone)
for type in type_list:
if type.name == vmtype_name:
vm_type = type
break
return vm_type
def print_block_device_map(self,block_device_map, printmethod=None ):
printmethod = printmethod or self.logger.debug
buf = '\n'
device_w = 16
snap_w = 15
volume_w = 15
dot_w = 7
size_w = 6
status_w = 7
ephemeral_name_w = 12
attach_time_w = 12
no_device_w = 7
line = ''
titles = str('DEVICE').ljust(device_w) + "|" + \
str('VOLUME_ID').center(volume_w) + "|" + \
str('SNAP_ID').center(snap_w) + "|" + \
str('D.O.T.').center(dot_w) + "|" + \
str('SIZE').center(size_w) + "|" + \
str('EPHEMERAL').center(ephemeral_name_w) + "|" + \
str('NO DEV').center(no_device_w) + "|" + \
str('ATTACH TM').center(attach_time_w) + "|" + \
str('STATUS').center(status_w) + "\n"
for x in titles:
if x == '|':
line += '|'
else:
line += "-"
line = line+"\n"
header = str('BLOCK DEVICE MAP').center(len(line)) + "\n"
buf += line + header + line + titles + line
for device in block_device_map:
bdm = block_device_map[device]
buf += str(device).center(device_w) + "|" + \
str(bdm.volume_id).center(volume_w) + "|" + \
str(bdm.snapshot_id).center(snap_w) + "|" + \
str(bdm.delete_on_termination).center(dot_w) + "|" + \
str(bdm.size).center(size_w) + "|" + \
str(bdm.ephemeral_name).center(ephemeral_name_w) + "|" + \
str(bdm.no_device).center(no_device_w) + "|" + \
str(bdm.attach_time).center(attach_time_w) + "|" + \
str(bdm.status).center(status_w) + "\n"
buf += line
printmethod(buf)
def print_all_vm_types(self,zone=None, debugmethod=None):
debugmethod = debugmethod or self.logger.debug
buf = "\n"
if zone:
zones = [zone]
else:
zones = self.get_all_zones()
for zone in zones:
buf += "------------------------( " + str(zone) + " )--------------------------------------------\n"
for vm in self.get_vm_type_list_from_zone(zone):
vminfo = self.get_all_attributes(vm, verbose=False)
buf += "---------------------------------"
buf += self.get_all_attributes(vm, verbose=False)
debugmethod(buf)
def monitor_instances(self, instance_ids):
self.logger.debug('Enabling monitoring for instance(s) ' + str(instance_ids))
self.monitor_instances(instance_ids)
def unmonitor_instances(self, instance_ids):
self.logger.debug('Disabling monitoring for instance(s) ' + str(instance_ids))
self.unmonitor_instances(instance_ids)
def show_images(self, images=None, verbose=False, basic_image=False, printmethod=None):
printmethod = printmethod or self.logger.debug
buf = "\n"
if not images:
try:
images = self.get_images(emi='',basic_image=basic_image, state=None) or []
except ResourceNotFoundException, nfe:
printmethod("\nNo images found\n")
return
for image in images:
buf += str(self.show_image(image=image, verbose=verbose, printme=False)) + "\n"
printmethod(buf)
def show_image(self, image, verbose=True, printmethod=None,
header_markups=[1,4], printme=True):
if isinstance(image, basestring):
image = self.get_emi(emi=image, state=None)
if not image:
raise ResourceNotFoundException('Image:"{0}" not found'.format(image))
if not isinstance(image, Image):
raise ValueError('Unknown type provided for image:"{0}:{1}"'.format(image,
type(image)))
def header(text):
return self.markup(text=text, markups=header_markups)
title =self.markup("IMAGE ID: {0}, IMAGE NAME:{1}".format(image.id, image.name),
markups=[1,94])
main_pt = PrettyTable([title])
main_pt.align[title] = 'l'
main_pt.padding_width = 0
mainbuf = ""
if verbose:
mainbuf += header("IMAGE SUMMARY:\n")
platform = str(image.platform or "LINUX").upper()
summary_pt = PrettyTable(["VIRT TYPE", "PUBLIC", "OWNER ID", "KERNEL ID", "RAMDISK ID",
"PLATFORM", "ROOT DEV TYPE", "STATE"])
summary_pt.padding_width = 0
row = [image.virtualization_type, image.is_public, image.owner_id, image.kernel_id,
image.ramdisk_id, platform, image.root_device_type, image.state]
summary_pt.add_row(row)
mainbuf += str(summary_pt)
if verbose:
mainbuf += header("\n\nIMAGE MANIFEST PATH:\n")
locpt = PrettyTable(['IMAGE LOCATION:'])
locpt.add_row([image.location])
mainbuf += str(locpt) + "\n"
mainbuf += header("\nIMAGE BLOCK DEVICE MAPPING:")
if not image.block_device_mapping:
mainbuf += " N/A\n"
else:
mainbuf += "\n" + str(self.show_block_device_map(image.block_device_mapping,
printme=False)) + "\n"
mainbuf += header("\nIMAGE TAGS:\n")
mainbuf += str(self.show_tags(image.tags, printme=False)) + "\n"
main_pt.add_row([mainbuf])
if printme:
printmethod = printmethod or self.logger.debug
printmethod( "\n" + str(main_pt) + "\n")
else:
return main_pt
def show_addresses(self, addresses=None, verbose=True, printme=True):
"""
Print table to debug output showing all addresses available to
cloud admin using verbose filter
:param addresses:
"""
pt = PrettyTable([self.markup('PUBLIC IP'), self.markup('ACCOUNT NAME'),
self.markup('REGION'), self.markup('ADDRESS INFO')])
pt.align = 'l'
show_addresses = []
get_addresses = []
try:
if addresses:
if not isinstance(addresses, list):
addresses = [addresses]
for address in addresses:
if isinstance(addresses, basestring):
get_addresses.append(address)
elif isinstance(address, Address):
show_addresses.append(address)
else:
raise ValueError('Show_addresses(). Got unknown address type: {0}:{1}'
.format(address, type(address)))
if get_addresses and verbose:
get_addresses.append('verbose')
ad_list = show_addresses.extend(self.ec2.get_all_addresses(
addresses=get_addresses))
else:
if verbose:
get_addresses = ['verbose']
else:
get_addresses = None
ad_list = self.ec2.get_all_addresses(addresses=get_addresses)
for ad in ad_list:
instance_id = ad.instance_id
public_ip = ad.public_ip
region = None
if ad.region:
region = ad.region.name
account_name = ""
match = re.findall('\(arn:*.*\)', ad.instance_id)
if match:
try:
match = match[0]
account_id = match.split(':')[4]
account_name = self.get_all_accounts(account_id=account_id)[0]['account_name']
if account_name:
account_name = self.markup(account_name)
instance_id = self.markup(instance_id)
public_ip = self.markup(public_ip)
region = self.markup(region)
except:pass
pt.add_row([public_ip, account_name, region, instance_id])
except Exception, e:
tb = get_traceback()
self.critical( str(tb) + "\n ERROR in show_all_addresses_verbose:" + str(e))
if not printme:
return pt
self.logger.debug("\n" + str(pt) + "\n")
def show_instance(self, instance, printme=True):
if not isinstance(instance, EuInstance):
orig_instance = instance
if isinstance(instance, str):
try:
instance = self.get_instances(idstring=instance)[0]
except IndexError: pass
if isinstance(instance, Instance):
instance = self.convert_instance_to_euisntance(instance=instance,
auto_connect=False)
else:
raise ValueError('Unknown type for instance: "{0}:{1}"'
.format(orig_instance, type(orig_instance)))
return instance.show_summary(printme=printme)
def show_instances(self,
euinstance_list=None,
state=None,
instance_id=None,
reservation=None,
root_device_type=None,
zone=None,
key=None,
public_ip=None,
private_ip=None,
ramdisk=None,
kernel=None,
image_id=None,
printme=True
):
"""
Display or return a table of instances and summary information
:param euinstance_list: list of euinstance objs, otherwise all instances will be shown
:param state: filter to be applied if no instance list is provided
:param instance_id: filter to be applied if no instance list is provided
:param reservation: filter to be applied if no instance list is provided
:param root_device_type: filter to be applied if no instance list is provided
:param zone: filter to be applied if no instance list is provided
:param key: filter to be applied if no instance list is provided
:param public_ip: filter to be applied if no instance list is provided
:param private_ip: filter to be applied if no instance list is provided
:param ramdisk: filter to be applied if no instance list is provided
:param kernel: filter to be applied if no instance list is provided
:param image_id: filter to be applied if no instance list is provided
:param printme: boolean flag, if True will print the table with self.logger.debug, else will
return the PrettyTable obj
:returns: None if printme is True, else will return the PrettyTable obj
"""
plist = []
if not euinstance_list:
euinstance_list = []
instances = self.get_instances(state=state,
idstring=instance_id,
reservation=reservation,
rootdevtype=root_device_type,
zone=zone,
key=key,
pubip=public_ip,
privip=private_ip,
ramdisk=ramdisk,
kernel=kernel,
image_id=image_id)
for instance in instances:
if instance:
instance_res = getattr(instance, 'reservation', None)
euinstance_list.append(self.convert_instance_to_euisntance(
instance, reservation=instance_res, auto_connect=False))
if not euinstance_list:
self.logger.debug('No instances to print')
return
for instance in euinstance_list:
if not isinstance(instance,EuInstance) and not isinstance(instance, WinInstance):
self.logger.debug("print instance list passed non-EuInstnace type")
instance = self.convert_instance_to_euisntance(instance, auto_connect=False)
plist.append(instance)
first = plist.pop(0)
# Build upon a table created from a euinstance class obj
maintable = first.printself(printme=False)
maintable.hrules = 1
count = 0
# The first row of the table returned from a euinstance.printself() is a sudo header
new_header = maintable._rows[0]
for instance in plist:
count += 1
if not count % 5:
# Add a header every 5th row to make the tables easier to read
maintable.add_row(new_header)
pt = instance.printself(printme=False)
if pt._rows:
maintable.add_row(pt._rows[1])
# Adjust the table's column widths to allow the largest entries
for key in pt._max_width:
pt_max = pt._max_width[key] or 0
max = maintable._max_width.get(key, 0)
if pt_max > max:
maintable._max_width[key] = pt_max
if printme:
self.logger.debug("\n"+str(maintable)+"\n")
else:
return maintable
def show_bundle_task(self,bundle, header=True, footer=True, printout=True):
"""
Prints formatted output of bundle task attributes.
:param bundle: BundleInstanceTask object to be printed
:param header: boolean to print header containing column titles
:param footer: boolean to print footer containing closing row line
:param printout: boolean to print output using self.logger.debug, else will return a buffer to be printed later.
:return: string containing formatted output.
"""
id_len = 15
instance_id_len = 12
bucket_len = 36
prefix_len = 36
state_len = 15
start_time_len = 25
update_time_len = 25
buf = ""
line = "-----------------------------------------------------------------------------------------------------" \
"--------------------------------------------------------------"
if header:
buf += str("\n" + line +"\n")
buf += str('BUNDLE_ID').center(id_len) + '|' \
+ str('INSTANCE').center(instance_id_len) + '|' \
+ str('BUCKET').center(bucket_len) + '|' \
+ str('PREFIX').center(prefix_len) + '|' \
+ str('STATE').center(state_len) + '|' \
+ str('START_TIME').center(start_time_len) + '|' \
+ str('UPDATE_TIME').center(update_time_len) + '\n'
buf += str(line + "\n")
buf += str(bundle.id).center(id_len) + '|' \
+ str(bundle.instance_id).center(instance_id_len) + '|' \
+ str(bundle.bucket).center(bucket_len) + '|' \
+ str(bundle.prefix).center(prefix_len) + '|' \
+ str(bundle.state).center(state_len) + '|' \
+ str(bundle.start_time).center(start_time_len) + '|' \
+ str(bundle.update_time).center(update_time_len)
if footer:
buf += str("\n" + line)
if printout:
self.logger.debug(buf)
return buf
def show_conversion_task_list(self,
clist=None,
doprint=True,
printmethod=None):
clist = clist or self.get_all_conversion_tasks()
printmethod = printmethod or self.logger.debug
taskidlen = 19
statusmsglen = 24
availzonelen=14
volumelen=16
snaplen=13
instancelen=13
imagelen=13
header = ('TASKID'.center(taskidlen) + " | " +
'SNAPSHOTS'.center(snaplen) + " | " +
'INSTANCE'.center(instancelen) + " | " +
'IMAGE ID'.center(imagelen) + " | " +
'ZONE'.center(availzonelen) + " | " +
'VOLUMES'.center(volumelen) + " | " +
'STATUS MSG'.center(statusmsglen) + " |\n" )
line = ""
for x in xrange(0, len(header)):
line += '-'
line += "\n"
buf = "\n" + line + header + line
for task in clist:
sizestr = None
instancestr = "???"
instancestatus = ""
imagesize = None
vollist = []
volbytes = []
for importvol in task.importvolumes:
bytesconverted = importvol.bytesconverted
volume_id = importvol.volume_id
if importvol.image:
imagesize = long(importvol.image.size)
if imagesize is not None:
sizegb = "%.3f" % float(
long(imagesize) / float(1073741824))
gbconverted = "%.3f" % float(
long(bytesconverted) / float(1073741824))
sizestr = ("{0}/{1}gb".format(gbconverted, sizegb))
vollist.append(str(volume_id))
volbytes.append(sizestr)
volumes = ",".join(vollist)
volbytescon = ",".join(volbytes)
volstatus = ",".join([str('(' + str(vol.status) + ':' +
str(vol.size) + ')')
for vol in task.volumes]) or "???"
snaps = ",".join([str(snap.id ) for snap in task.snapshots]) or \
"???"
snapstatus = ",".join([str('(' + snap.status + ')')
for snap in task.snapshots])
if task.instance:
instancestr = str(task.instance.id)
instancestatus = '(' + str(task.instance.state) + ')'
image_id = task.image_id or "???"
buf += (str(task.conversiontaskid).center(taskidlen) + " | " +
str(snaps).center(snaplen) + " | " +
str(instancestr).center(instancelen) + " | " +
str(image_id ).center(imagelen) + " | " +
str(task.availabilityzone).center(availzonelen) + " | " +
str(volumes).center(volumelen) + " | " +
str(task.statusmessage[:statusmsglen]).ljust(statusmsglen)
+ " |\n")
buf += (str('(' + task.state + ')').center(taskidlen) + " | " +
str(snapstatus).center(snaplen) + " | " +
str(instancestatus).center(instancelen) + " | " +
str('').center(imagelen) + " | " +
str('').center(availzonelen) + " | " +
str(volstatus).center(volumelen) + " | " +
str(task.statusmessage[
statusmsglen:(2*statusmsglen)]).ljust(statusmsglen)
+ " |\n")
buf += (str('').center(taskidlen) + " | " +
str('').center(snaplen) + " | " +
str('').center(instancelen) + " | " +
str('').center(imagelen) + " | " +
str('').center(availzonelen) + " | " +
str(volbytescon).center(volumelen) + " | " +
str(task.statusmessage[
(2*statusmsglen):(3*statusmsglen)]).ljust(statusmsglen)
+ " |\n")
buf += line
if doprint:
printmethod(buf)
else:
return buf
def show_block_device_map(self,block_device_map, printmethod=None, printme=True ):
printmethod = printmethod or self.logger.debug
title = 'BLOCK DEVICE MAP'
main_pt = PrettyTable([title])
main_pt.align[title] = 'l'
main_pt.padding_width = 0
headers = ['DEVICE', 'VOLUME_ID', 'SNAP_ID', 'D.O.T.', 'SIZE', 'EPHEMERAL',
'NO DEV', 'ATTACH TM', 'STATUS']
pt = PrettyTable(headers)
pt.padding_width = 0
for device in block_device_map:
bdm = block_device_map[device]
row = [str(device), str(bdm.volume_id), str(bdm.snapshot_id),
str(bdm.delete_on_termination), str(bdm.size), str(bdm.ephemeral_name),
str(bdm.no_device), str(bdm.attach_time), str(bdm.status)]
pt.add_row(row)
main_pt.add_row([str(pt)])
if printme:
printmethod("\n" + str(main_pt) + "\n")
else:
return main_pt
def show_vm_types(self,zone=None, debugmethod=None):
debugmethod = debugmethod or self.logger.debug
buf = "\n"
if zone:
zones = [zone]
else:
zones = self.ec2.get_all_zones()
for zone in zones:
buf += "------------------------( " + str(zone) + " )--------------------------------------------\n"
for vm in self.get_vm_type_list_from_zone(zone):
vminfo = self.get_all_attributes(vm, verbose=False)
buf += "---------------------------------"
buf += self.get_all_attributes(vm, verbose=False)
debugmethod(buf)
def show_security_groups(self, groups=None, verbose=True, printme=True):
ret_buf = ""
groups = groups or self.ec2.get_all_security_groups()
for group in groups:
ret_buf += "\n" + str(self.show_security_group(group, printme=False))
if printme:
self.logger.debug(ret_buf)
else:
return ret_buf
def show_security_group(self, group, printme=True):
try:
from prettytable import PrettyTable, ALL
except ImportError as IE:
self.logger.debug('No pretty table import failed:' + str(IE))
return
group = self.get_security_group(id=group.id)
if not group:
raise ValueError('Show sec group failed. Could not fetch group:'
+ str(group))
title = self.markup("Security Group: {0}/{1}, VPC: {2}"
.format(group.name, group.id, group.vpc_id))
maintable = PrettyTable([title])
table = PrettyTable(["CIDR_IP", "SRC_GRP_NAME",
"SRC_GRP_ID", "OWNER_ID", "PORT",
"END_PORT", "PROTO"])
maintable.align["title"] = 'l'
#table.padding_width = 1
for rule in group.rules:
port = rule.from_port
end_port = rule.to_port
proto = rule.ip_protocol
for grant in rule.grants:
table.add_row([grant.cidr_ip, grant.name,
grant.group_id, grant.owner_id, port,
end_port, proto])
table.hrules = ALL
maintable.add_row([str(table)])
if printme:
self.logger.debug("\n{0}".format(str(maintable)))
else:
return maintable
def show_security_groups_for_instance(self, instance, printmethod=None, printme=True):
buf = ""
title = self.markup("EUCA SECURITY GROUPS FOR INSTANCE:{0}".format(instance.id))
pt = PrettyTable([title])
pt.align['title'] = 'l'
for group in instance.groups:
buf += str(self.show_security_group(group=group, printme=False))
pt.add_row([buf])
if printme:
printmethod = printmethod or self.logger.debug
printmethod('\n{0}\n'.format(pt))
else:
return pt
def show_account_attributes(self, attribute_names=None, printmethod=None, printme=True):
attrs = self.ec2.describe_account_attributes(attribute_names=attribute_names)
main_pt = PrettyTable([self.markup('ACCOUNT ATTRIBUTES')])
pt = PrettyTable([self.markup('NAME'), self.markup('VALUE')])
pt.hrules = ALL
for attr in attrs:
pt.add_row([attr.attribute_name, attr.attribute_values])
main_pt.add_row([str(pt)])
if printme:
printmethod = printmethod or self.logger.debug
printmethod( "\n" + str(main_pt) + "\n")
else:
return main_pt
class VolumeStateException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ResourceNotFoundException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| 47.97231
| 189
| 0.534092
|
1e88d24e5468eeda42f0114129ff97def0b60503
| 70
|
py
|
Python
|
version.py
|
Ace17/godot
|
203255580050eaf9543b2e58c7323268aebb66f4
|
[
"MIT"
] | 1
|
2020-07-23T17:08:01.000Z
|
2020-07-23T17:08:01.000Z
|
version.py
|
Ace17/godot
|
203255580050eaf9543b2e58c7323268aebb66f4
|
[
"MIT"
] | null | null | null |
version.py
|
Ace17/godot
|
203255580050eaf9543b2e58c7323268aebb66f4
|
[
"MIT"
] | null | null | null |
short_name="godot"
name="Godot Engine"
major=1
minor=0
status="rc2"
| 8.75
| 19
| 0.728571
|
d47e21bd8da626b0e102d505618fa1260fc8e8df
| 1,990
|
py
|
Python
|
novel_coronavirus/novel_coronavirus/spiders/peopleapp.py
|
ExchangeAnn/2019ncov
|
dbb2c87a6ae4eb50bece9f5b6e2431e89d66f02e
|
[
"MIT"
] | null | null | null |
novel_coronavirus/novel_coronavirus/spiders/peopleapp.py
|
ExchangeAnn/2019ncov
|
dbb2c87a6ae4eb50bece9f5b6e2431e89d66f02e
|
[
"MIT"
] | 274
|
2020-02-22T07:54:37.000Z
|
2021-06-23T12:48:05.000Z
|
novel_coronavirus/novel_coronavirus/spiders/peopleapp.py
|
ExchangeAnn/2019ncov
|
dbb2c87a6ae4eb50bece9f5b6e2431e89d66f02e
|
[
"MIT"
] | 4
|
2020-02-20T11:19:33.000Z
|
2020-09-30T12:40:34.000Z
|
import json
import scrapy
from pprint import pprint
from novel_coronavirus.items import FlashNewsLoader, FlashNewsItem
class PeopleAPPSpider(scrapy.Spider):
name = "peopleapp"
allowed_domains = ["h5-api.tikrnews.com"]
start_urls = ["https://h5-api.tikrnews.com/h5/province"]
headers = {
"accept": "application/json",
"content-type": "application/json;charset=UTF-8",
"origin": "https://h5.peopleapp.com"
}
def start_requests(self):
data = {
"type": "rapidReport",
"lastTimestamp": None,
"current": 1,
"size": 10,
"province": "",
"city": ""
}
for url in self.start_urls:
yield scrapy.Request(url, body=json.dumps(data),
headers=self.headers,
method="POST", callback=self.parse)
def parse(self, response):
# self.logger.info(response.text)
_data = json.loads(response.text)
# print(_data["data"]["records"])
for row in _data["data"]["records"]:
# pprint(row, indent=2)
yield self.parse_data(data=row)
def parse_data(self, data: dict):
self.logger.debug(data)
item = FlashNewsLoader(item=FlashNewsItem())
item.add_value("crawlSource", data["crawlSource"])
item.add_value("majorClassification", data["majorClassification"])
item.add_value("metaInfoName", data["metaInfoName"])
item.add_value("releaseTime", data["releaseTime"])
item.add_value("summary", data.get("summary"))
item.add_value("title", data["title"])
item.add_value("pictrueUrl", data.get("pictrueUrl"))
item.add_value("webpageCode", data["webpageCode"])
item.add_value("webpageUrl", data.get("webpageUrl"))
item.add_value("reportSource", data.get("reportSource"))
# self.logger.info(item.load_item())
return item.load_item()
| 35.535714
| 74
| 0.59397
|
1c9f9c96f79893fdaa721c0b5ea11a4f3afeba36
| 24,117
|
py
|
Python
|
Day 11/blackjack.py
|
secureterminal/100-Days-of-Code
|
04383ae541938d8a551b5aac9a0dad3348a6ef23
|
[
"MIT"
] | 1
|
2022-01-28T13:55:39.000Z
|
2022-01-28T13:55:39.000Z
|
Day 11/blackjack.py
|
secureterminal/100-Days-of-Code
|
04383ae541938d8a551b5aac9a0dad3348a6ef23
|
[
"MIT"
] | 1
|
2022-02-02T00:13:18.000Z
|
2022-02-03T11:32:53.000Z
|
Day 11/blackjack.py
|
secureterminal/100-Days-of-Code
|
04383ae541938d8a551b5aac9a0dad3348a6ef23
|
[
"MIT"
] | 2
|
2022-02-07T20:49:36.000Z
|
2022-02-19T21:22:15.000Z
|
## The deck is unlimited in size.
## There are no jokers.
## The Jack/Queen/King all count as 10.
## The the Ace can count as 11 or 1.
## Use the following list as the deck of cards:
## cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
## The cards in the list have equal probability of being drawn.
## Cards are not removed from the deck as they are drawn.
## The computer is the dealer.
from art import logo
from helper import clearConsole
import random
print(logo)
start_game = input("Welcome, Do you want to play a game of Blackjack? Type 'y' or 'n':").lower()
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
def get_card(cards):
card = cards[random.randint(0, len(cards))-1]
return card
def sum_user_cards(cards):
total = 0
for card in cards:
total += card
if 11 in cards and total > 21:
total -= 10
return total
house_action = ''
def sum_house_cards(cards):
total = 0
for card in cards:
total += card
# if (11 in cards) and (total < 17):
# total -= 10
return total
def house_next_action(sum_of_cards, cards):
# house > 21, busted
if (11 in cards) and (17 < sum_of_cards < 21):
return 'stand'
if sum_of_cards > 21:
return 'bust'
elif sum_of_cards == 21:
return 'win'
else:
if sum_of_cards >= 17:
house_action = 'stand'
else:
house_action = 'hit'
return house_action
def compare_cards(house_card, user_card):
user_total_var = sum_user_cards(user_card)
house_total_var = sum_house_cards(house_card)
if user_total_var > house_total_var:
return 'user'
elif house_total_var > user_total_var:
return 'house'
else:
return 'draw'
def end_hand():
start_game = 'y'
active_hand_status = False
continue_hitting_user = False
house_turn = False
user_card = []
house_card = []
user_card.append(get_card(cards))
user_card.append(get_card(cards))
print('End Hand', start_game, active_hand_status, continue_hitting_user, house_turn, user_card, house_card)
return start_game, active_hand_status, continue_hitting_user, house_turn, user_card, house_card
def end_game():
start_game = 'n'
active_hand_status = False
continue_hitting_user = False
house_turn = False
user_card = []
house_card = []
print('End Game', start_game, active_hand_status, continue_hitting_user, house_turn, user_card, house_card)
return start_game, active_hand_status, continue_hitting_user, house_turn, user_card, house_card
hands_won = 0
hands_drawn = 0
hands_lost = 0
hands = 0
user_bank = 1000
# house_bank = 1000
user_card = []
house_card = []
user_card.append(get_card(cards))
house_card.append(get_card(cards))
active_hand_status = True
# MAJOR CHANGE
while (start_game == 'y'):
# Game round per user, covers multiple hands
if user_bank > 0:
active_hand_status = True
if hands == 0:
hands += 1
# print(all_game_stats)
amount = input(f'\n\nWelcome to Hand {hands} Your bank is ${user_bank:,}, How much do you want to deal? >>> $')
else: # hands > 0
if input(f'\nHand {hands} ended, Do you want another round? Y for yes >>>').lower() == 'y':
hands += 1
user_card = []
house_card = []
user_card.append(get_card(cards))
house_card.append(get_card(cards))
amount = input(f'\n\nWelcome to Hand {hands} Your bank is ${user_bank:,}, How much do you want to deal? >>> $')
else: # User dont want any more round
# End the game
# STATUS
if user_bank > 1000:
print(f'''
Game has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Won = ${(user_bank - 1000):,}
Final Amount = ${user_bank:,}
''')
else:
print(f'''
Game has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Lost = ${(1000 - user_bank):,}
Final Amount = ${user_bank:,}
''')
start_game, active_hand_status, continue_hitting_user, house_turn, user_card, house_card = end_game()
else: # bank < 0
# End the game
print('Your money don finish')
print(f'''
Game has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Lost = ${(1000 - user_bank):,}
Final Amount = ${user_bank:,}
''')
start_game, active_hand_status, continue_hitting_user, house_turn, user_card, house_card = end_game()
while (user_bank > 0) and (active_hand_status == True):
# This handles each set of hands
# amount input might go in here
# checking if amount is valid
try:
amount = int(amount)
if amount > user_bank:
print('Insufficient funds')
amount = input(f'\nYour bank is ${user_bank:,}, How much do you want to deal? Game will end if you put in a wrong amount >>> $')
if int(amount) > user_bank:
# STATUS
if user_bank > 1000:
print(f'''
Game has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Won = ${(user_bank - 1000):,}
Final Amount = ${user_bank:,}
''')
else:
print(f'''
Game has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Lost = ${(1000 - user_bank):,}
Final Amount = ${user_bank:,}
''')
start_game, active_hand_status, continue_hitting_user, house_turn, user_card, house_card = end_game()
# GAME_END
# PRINT_STATUS
# EXIT
else: # valid amount entered
user_bank = user_bank - amount
print(f'Dealing ${amount}, you have ${user_bank:,} left in your bank')
continue_hitting_user = True
user_card.append(get_card(cards))
print('User Card', user_card, 'House Card', house_card)
print(f'Your cards: {user_card}, current score: {sum_user_cards(user_card)} ')
print(f'Computer\'s first card: : {house_card} ')
# Deal or stand
if len(user_card) == 2 and sum_user_cards(user_card) == 21:
print(f'\nBLACKJACK!!!!!, YOUR CARDS {user_card} sums up to {sum_user_cards(user_card)}, you have won 150% of your stake equivalent to ${amount * 1.5} ')
# Add winnings
user_bank = user_bank + (amount* 2.5)
hands_won += 1
start_game, active_hand_status, continue_hitting_user, house_turn, user_card, house_card = end_hand()
# reset cards
# ask to deal, if yes active_hand_status = False, else start_game = 'n'
active_hand_status = False
while continue_hitting_user == True:
# Keep hitting the user unless he chooses otherwise
if sum_user_cards(user_card) > 21:
print(f'Bust!!!, your score is {user_card} {sum_user_cards(user_card)}, House wins this hand with a score of {house_card} {sum_user_cards(house_card)} ')
# Add losses
hands_lost += 1
# HAND_END
# PRINT_PREVIOUS_STATUS
# STATUS
if user_bank > 1000:
print(f'''
Hand has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Won = ${(user_bank - 1000):,}
Final Amount = ${user_bank:,}
''')
else:
print(f'''
Hand has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Lost = ${(1000 - user_bank):,}
Final Amount = ${user_bank:,}
''')
start_game, active_hand_status, continue_hitting_user, house_turn, user_card, house_card = end_hand()
# CONTINUE TO FIRST WHILE STATEMENT (NEXT HAND)
active_hand_status = False # ask to start another round under first while statement
continue_hitting_user = False
elif sum_user_cards(user_card) == 21:
print(f'You win!!!, your score is {user_card} {sum_user_cards(user_card)}, User wins this hand.')
# Add winnings
user_bank = user_bank + (amount*2)
hands_won += 1
# HAND_END
# PRINT_PREVIOUS_STATUS
# STATUS
if user_bank > 1000:
print(f'''
Hand has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Won = ${(user_bank - 1000):,}
Final Amount = ${user_bank:,}
''')
else:
print(f'''
Hand has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Lost = ${(1000 - user_bank):,}
Final Amount = ${user_bank:,}
''')
# CONTINUE TO FIRST WHILE STATEMENT (NEXT HAND)
start_game, active_hand_status, continue_hitting_user, house_turn, user_card, house_card = end_hand()
elif sum_user_cards(user_card) < 21:
continue_hitting_user = True # not needed
hit_or_stand = input('Do you want to hit or stand? "H for hit and any other key for stand" ').lower()
if hit_or_stand == 'h':
print('Oya Hit Me')
user_card.append(get_card(cards))
print(user_card, house_card)
print(f'Your cards: {user_card}, current score: {sum_user_cards(user_card)} ')
print(f'Computer\'s first card: : {house_card} ')
else:
continue_hitting_user = False
# house plays, compare results and the set active_hand_status = False
print('House will start playing now')
house_turn = True
while house_turn == True:
# give house a card if it has just one card
if len(house_card) == 1:
print('\n\n Hitting house its second card...\n')
else:
print('\n\n Hitting house its NEXT card...\n')
house_card.append(get_card(cards))
print(f'House Cards: {house_card} ')
# Sum house cards
house_total = sum_house_cards(house_card)
print(f'House Total: {house_total} ')
# check for house next action
house_next_action_var = house_next_action(house_total, house_card)
print(f'house_next_action: {house_next_action_var} ')
# if hit, give a card, else
if house_next_action_var == 'hit':
# house_card.append(get_card(cards))
continue
elif house_next_action_var == 'bust':
# End this hand, ask user for another round, add winings/losses
print(f'Bust!!!, your score is {house_card} {sum_house_cards(house_card)}, User wins this hand with a score of {user_card} {sum_user_cards(user_card)} ')
user_bank += (amount*2)
hands_won += 1
# STATUS
if user_bank > 1000:
print(f'''
Hand has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Won = ${(user_bank - 1000):,}
Final Amount = ${user_bank:,}
''')
else:
print(f'''
Hand has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Lost = ${(1000 - user_bank):,}
Final Amount = ${user_bank:,}
''')
# print('Busted!!!')
house_turn = False
# this is supposed to end the hand
start_game, active_hand_status, continue_hitting_user, house_turn, user_card, house_card = end_hand()
else:
# House stands, compare results
hand_result = compare_cards(house_card, user_card)
if hand_result == 'user':
hands_won += 1
user_bank += (amount*2)
# STATUS
if user_bank > 1000:
print(f'''
Hand has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Won = ${(user_bank - 1000):,}
Final Amount = ${user_bank:,}
''')
else:
print(f'''
Hand has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Lost = ${(1000 - user_bank):,}
Final Amount = ${user_bank:,}
''')
elif hand_result == 'house':
hands_lost += 1
# STATUS
if user_bank > 1000:
print(f'''
Hand has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Won = ${(user_bank - 1000):,}
Final Amount = ${user_bank:,}
''')
else:
print(f'''
Hand has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Lost = ${(1000 - user_bank):,}
Final Amount = ${user_bank:,}
''')
else:
# draw
hands_drawn += 1
# refund user
user_bank += amount
print(f'Amount {amount} returned')
# STATUS
if user_bank > 1000:
print(f'''
Hand has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Won = ${(user_bank - 1000):,}
Final Amount = ${user_bank:,}
''')
else:
print(f'''
Hand has ended
Hands won = {hands_won}
Hands lost = {hands_lost}
Hands Drawn = {hands_drawn}
Number of Hands = {hands}
Amount Lost = ${(1000 - user_bank):,}
Final Amount = ${user_bank:,}
''')
house_turn = False
active_hand_status = False
start_game = 'n'
# compare cards
print(house_card)
print(user_card)
print(hand_result)
print('Hand ends')
start_game, active_hand_status, continue_hitting_user, house_turn, user_card, house_card = end_hand()
else:
# No action
pass
except Exception as e: # amount is invalid
print(e)
print('Wrong amount entered, game is quitting... ')
active_hand_status = False
start_game = 'n'
# check if user wants to continue playing
# clearConsole()
# Play again or quit
# all_game_stats_win = f'''
# Game has ended
# Hands won = {hands_won}
# Hands lost = {hands_lost}
# Hands Drawn = {hands_drawn}
# Number of Hands = {hands}
# Amount Won = ${(user_bank - 1000):,}
# Final Amount = ${user_bank:,}
# '''
# all_game_stats_lost = f'''
# Game has ended
# Hands won = {hands_won}
# Hands lost = {hands_lost}
# Hands Drawn = {hands_drawn}
# Number of Hands = {hands}
# Amount Lost = ${(1000 - user_bank):,}
# Final Amount = ${user_bank:,}
# '''
| 43.76951
| 190
| 0.375669
|
eb95f49c898957452a610b94b99681b6d0d68b71
| 21,525
|
py
|
Python
|
tests/google/appengine/ext/ndb/key_test.py
|
phil-lopreiato/appengine-python-standard
|
5e2c400a24d299bb86e98f755a6ef510b4e1e0df
|
[
"Apache-2.0"
] | 28
|
2021-01-06T19:55:21.000Z
|
2022-03-28T09:41:08.000Z
|
tests/google/appengine/ext/ndb/key_test.py
|
SOFTWARESOLUTONS-PVT-LIMITED/appengine-python-standard
|
530a54b0fc0eb74d9dc29b19b7c4cdfab0556ebc
|
[
"Apache-2.0"
] | 13
|
2021-06-17T09:38:17.000Z
|
2022-03-11T01:12:33.000Z
|
tests/google/appengine/ext/ndb/key_test.py
|
SOFTWARESOLUTONS-PVT-LIMITED/appengine-python-standard
|
530a54b0fc0eb74d9dc29b19b7c4cdfab0556ebc
|
[
"Apache-2.0"
] | 28
|
2021-03-09T19:27:37.000Z
|
2022-01-21T21:18:52.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for key.py."""
import base64
import collections
import datetime
import os
import pickle
import sortedcontainers
from google.appengine.ext.ndb import key
from google.appengine.ext.ndb import model
from google.appengine.ext.ndb import tasklets
from google.appengine.ext.ndb import test_utils
import six
from six.moves import range
from six.moves import zip
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.datastore import entity_bytes_pb2 as entity_pb2
from absl.testing import absltest as unittest
class KeyTests(test_utils.NDBTest):
the_module = key
def testShort(self):
k0 = key.Key('Kind', None)
self.assertEqual(k0.flat(), ('Kind', None))
k1 = key.Key('Kind', 1)
self.assertEqual(k1.flat(), ('Kind', 1))
k2 = key.Key('Parent', 42, 'Kind', 1)
self.assertEqual(k2.flat(), ('Parent', 42, 'Kind', 1))
def testFlat(self):
flat = ('Kind', 1)
pairs = tuple((flat[i], flat[i + 1]) for i in range(0, len(flat), 2))
k = key.Key(flat=flat)
self.assertEqual(k.pairs(), pairs)
self.assertEqual(k.flat(), flat)
self.assertEqual(k.kind(), 'Kind')
def testFlatLong(self):
flat = ('Kind', 1, 'Subkind', 'foobar')
pairs = tuple((flat[i], flat[i + 1]) for i in range(0, len(flat), 2))
k = key.Key(flat=flat)
self.assertEqual(k.pairs(), pairs)
self.assertEqual(k.flat(), flat)
self.assertEqual(k.kind(), 'Subkind')
def testSerialized(self):
flat = ['Kind', 1, 'Subkind', 'foobar']
r = entity_pb2.Reference()
r.app = 'ndb-test-app-id'
e = r.path.element.add()
e.type = flat[0]
e.id = flat[1]
e = r.path.element.add()
e.type = flat[2]
e.name = flat[3]
serialized = r.SerializeToString()
ref_bytes = six.ensure_binary(r.SerializeToString())
urlsafe = base64.urlsafe_b64encode(ref_bytes).rstrip(b'=')
k = key.Key(flat=flat)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(urlsafe=urlsafe)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(serialized=serialized)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(reference=r)
self.assertIsNot(k.reference(), r)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(reference=r, app=r.app, namespace='')
self.assertIsNot(k.reference(), r)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k1 = key.Key('A', 1)
self.assertEqual(k1.urlsafe(), b'ag9uZGItdGVzdC1hcHAtaWRyBwsSAUEYAQw')
k2 = key.Key(urlsafe=k1.urlsafe())
self.assertEqual(k1, k2)
def testId(self):
k1 = key.Key('Kind', 'foo', app='app1', namespace='ns1')
self.assertEqual(k1.id(), 'foo')
k2 = key.Key('Subkind', 42, parent=k1)
self.assertEqual(k2.id(), 42)
k3 = key.Key('Subkind', 'bar', parent=k2)
self.assertEqual(k3.id(), 'bar')
k4 = key.Key('Subkind', None, parent=k3)
self.assertEqual(k4.id(), None)
def testIdentity(self):
test_kind, test_id = 'test-kind', 'test-id'
k = key.Key(test_kind, test_id)
with self.subTest(name='Kind'):
self.assertEqual(k.kind(), test_kind)
with self.subTest(name='ID'):
self.assertEqual(k.id(), test_id)
def testStringId(self):
k1 = key.Key('Kind', 'foo', app='app1', namespace='ns1')
self.assertEqual(k1.string_id(), 'foo')
k2 = key.Key('Subkind', 'bar', parent=k1)
self.assertEqual(k2.string_id(), 'bar')
k3 = key.Key('Subkind', 42, parent=k2)
self.assertEqual(k3.string_id(), None)
k4 = key.Key('Subkind', None, parent=k3)
self.assertEqual(k4.string_id(), None)
def testIntegerId(self):
k1 = key.Key('Kind', 42, app='app1', namespace='ns1')
self.assertEqual(k1.integer_id(), 42)
k2 = key.Key('Subkind', 43, parent=k1)
self.assertEqual(k2.integer_id(), 43)
k3 = key.Key('Subkind', 'foobar', parent=k2)
self.assertEqual(k3.integer_id(), None)
k4 = key.Key('Subkind', None, parent=k3)
self.assertEqual(k4.integer_id(), None)
def testParent(self):
p = key.Key('Kind', 1, app='app1', namespace='ns1')
self.assertEqual(p.parent(), None)
k = key.Key('Subkind', 'foobar', parent=p)
self.assertEqual(k.flat(), ('Kind', 1, 'Subkind', 'foobar'))
self.assertEqual(k.parent(), p)
k = key.Key(
'Subkind', 'foobar', parent=p, app=p.app(), namespace=p.namespace())
self.assertEqual(k.flat(), ('Kind', 1, 'Subkind', 'foobar'))
self.assertEqual(k.parent(), p)
def testRoot(self):
p = key.Key('Kind', 1, app='app1', namespace='ns1')
self.assertEqual(p.root(), p)
k = key.Key('Subkind', 'foobar', parent=p)
self.assertEqual(k.flat(), ('Kind', 1, 'Subkind', 'foobar'))
self.assertEqual(k.root(), p)
k2 = key.Key(
'Subsubkind', 42, parent=k, app=p.app(), namespace=p.namespace())
self.assertEqual(k2.flat(),
('Kind', 1, 'Subkind', 'foobar', 'Subsubkind', 42))
self.assertEqual(k2.root(), p)
def testRepr_Inferior(self):
k = key.Key('Kind', 1, 'Subkind', 'foobar')
self.assertEqual(repr(k), "Key('Kind', 1, 'Subkind', 'foobar')")
self.assertEqual(repr(k), str(k))
def testRepr_Toplevel(self):
k = key.Key('Kind', 1)
self.assertEqual(repr(k), "Key('Kind', 1)")
def testRepr_Incomplete(self):
k = key.Key('Kind', None)
self.assertEqual(repr(k), "Key('Kind', None)")
def testRepr_UnicodeKind(self):
k = key.Key(u'\u1234', 1)
if six.PY2:
self.assertEqual(repr(k), "Key('\\xe1\\x88\\xb4', 1)")
else:
self.assertEqual(repr(k), u"Key('\u1234', 1)")
def testRepr_UnicodeId(self):
k = key.Key('Kind', u'\u1234')
if six.PY2:
self.assertEqual(repr(k), "Key('Kind', '\\xe1\\x88\\xb4')")
else:
self.assertEqual(repr(k), u"Key('Kind', '\u1234')")
def testRepr_App(self):
k = key.Key('Kind', 1, app='foo')
self.assertEqual(repr(k), "Key('Kind', 1, app='foo')")
def testRepr_Namespace(self):
k = key.Key('Kind', 1, namespace='foo')
self.assertEqual(repr(k), "Key('Kind', 1, namespace='foo')")
def testUnicode(self):
flat_input = (u'Kind\u1234', 1, 'Subkind', u'foobar\u4321')
flat = (six.ensure_str(flat_input[0]), flat_input[1], flat_input[2],
six.ensure_str(flat_input[3]))
pairs = tuple((flat[i], flat[i + 1]) for i in range(0, len(flat), 2))
k = key.Key(flat=flat_input)
self.assertEqual(k.pairs(), pairs)
self.assertEqual(k.flat(), flat)
r = k.reference()
serialized = k.serialized()
urlsafe = k.urlsafe()
key.Key(urlsafe=urlsafe.decode('utf8'))
key.Key(serialized=serialized.decode('utf8'))
key.Key(reference=r)
r = entity_pb2.Reference()
r.app = 'ndb-test-app-id'
e = r.path.element.add()
e.type = flat[0]
e.name = flat[3]
k = key.Key(reference=r)
self.assertEqual(k.reference(), r)
def testHash(self):
flat = ['Kind', 1, 'Subkind', 'foobar']
pairs = [(flat[i], flat[i + 1]) for i in range(0, len(flat), 2)]
k = key.Key(flat=flat)
self.assertEqual(hash(k), hash(tuple(pairs)))
def testOrdering(self):
a = key.Key(app='app2', namespace='ns2', flat=('kind1', 1))
b = key.Key(app='app2', namespace='ns1', flat=('kind1', 1))
c = key.Key(app='app1', namespace='ns1', flat=('kind1', 1))
d = key.Key(app='app1', namespace='ns1', flat=('kind1', 2))
e = key.Key(app='app1', namespace='ns1', flat=('kind1', 'e'))
f = key.Key(app='app1', namespace='ns1', flat=('kind1', 'f'))
g = key.Key(app='app1', namespace='ns1', flat=('kind2', 'f', 'x', 1))
h = key.Key(app='app1', namespace='ns1', flat=('kind2', 'f', 'x', 2))
expected = [c, d, e, f, g, h, b, a]
actual = sorted([a, b, c, d, e, f, g, h])
self.assertEqual(actual, expected)
for i in range(len(actual)):
for j in range(len(actual)):
self.assertEqual(actual[i] < actual[j], i < j)
self.assertEqual(actual[i] <= actual[j], i <= j)
self.assertEqual(actual[i] > actual[j], i > j)
self.assertEqual(actual[i] >= actual[j], i >= j)
self.assertEqual(actual[i] == actual[j], i == j)
self.assertEqual(actual[i] != actual[j], i != j)
def testUniqueIncomplete(self):
p0 = None
p1 = key.Key('bar', 1)
for p in p0, p1:
a = key.Key('foo', 0, parent=p)
b = key.Key('foo', '', parent=p)
c = key.Key('foo', None, parent=p)
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertEqual(c, a)
for x in a, b, c:
self.assertEqual(x.id(), None)
self.assertEqual(x.string_id(), None)
self.assertEqual(x.integer_id(), None)
self.assertEqual(x.pairs()[-1], ('foo', None))
self.assertEqual(x.flat()[-1], None)
self.assertEqual(x.urlsafe(), c.urlsafe())
def testIncomplete(self):
key.Key(flat=['Kind', None])
self.assertRaises(
datastore_errors.BadArgumentError,
key.Key,
flat=['Kind', None, 'Subkind', 1])
self.assertRaises(TypeError, key.Key, flat=['Kind', ()])
def testKindFromModel(self):
class M(model.Model):
pass
class N(model.Model):
@classmethod
def _get_kind(cls):
return 'NN'
k = key.Key(M, 1)
self.assertEqual(k, key.Key('M', 1))
k = key.Key('X', 1, N, 2, 'Y', 3)
self.assertEqual(k, key.Key('X', 1, 'NN', 2, 'Y', 3))
def testKindFromBadValue(self):
self.assertRaises(Exception, key.Key, 42, 42)
def testDeleteHooksCalled(self):
test = self
self.pre_counter = 0
self.post_counter = 0
class HatStand(model.Model):
@classmethod
def _pre_delete_hook(cls, key):
test.pre_counter += 1
if test.pre_counter == 1:
self.assertEqual(self.key, key)
@classmethod
def _post_delete_hook(cls, key, future):
test.post_counter += 1
self.assertEqual(self.key, key)
self.assertIs(future.get_result(), None)
furniture = HatStand()
key = furniture.put()
self.key = key
self.assertEqual(self.pre_counter, 0, 'Pre delete hook called early')
future = key.delete_async()
self.assertEqual(self.pre_counter, 1, 'Pre delete hook not called')
self.assertEqual(self.post_counter, 0, 'Post delete hook called early')
future.get_result()
self.assertEqual(self.post_counter, 1, 'Post delete hook not called')
new_furniture = [HatStand() for _ in range(10)]
keys = [furniture.put() for furniture in new_furniture]
multi_future = model.delete_multi_async(keys)
self.assertEqual(self.pre_counter, 11,
'Pre delete hooks not called on delete_multi')
self.assertEqual(self.post_counter, 1,
'Post delete hooks called early on delete_multi')
for fut, key in zip(multi_future, keys):
self.key = key
fut.get_result()
self.assertEqual(self.post_counter, 11,
'Post delete hooks not called on delete_multi')
def testNoDefaultDeleteCallback(self):
ctx = tasklets.get_context()
ctx.set_cache_policy(False)
class EmptyModel(model.Model):
pass
entity = EmptyModel()
entity.put()
fut = entity.key.delete_async()
self.assertFalse(fut._immediate_callbacks,
'Delete hook queued default no-op.')
def testGetHooksCalled(self):
test = self
self.pre_counter = 0
self.post_counter = 0
class HatStand(model.Model):
@classmethod
def _pre_get_hook(cls, key):
test.pre_counter += 1
if test.pre_counter == 1:
self.assertEqual(key, self.key)
@classmethod
def _post_get_hook(cls, key, future):
test.post_counter += 1
self.assertEqual(key, self.key)
self.assertEqual(future.get_result(), self.entity)
furniture = HatStand()
self.entity = furniture
key = furniture.put()
self.key = key
self.assertEqual(self.pre_counter, 0, 'Pre get hook called early')
future = key.get_async()
self.assertEqual(self.pre_counter, 1, 'Pre get hook not called')
self.assertEqual(self.post_counter, 0, 'Post get hook called early')
future.get_result()
self.assertEqual(self.post_counter, 1, 'Post get hook not called')
new_furniture = [HatStand() for _ in range(10)]
keys = [furniture.put() for furniture in new_furniture]
multi_future = model.get_multi_async(keys)
self.assertEqual(self.pre_counter, 11,
'Pre get hooks not called on get_multi')
self.assertEqual(self.post_counter, 1,
'Post get hooks called early on get_multi')
for fut, key, entity in zip(multi_future, keys, new_furniture):
self.key = key
self.entity = entity
fut.get_result()
self.assertEqual(self.post_counter, 11,
'Post get hooks not called on get_multi')
def testMonkeyPatchHooks(self):
hook_attr_names = ('_pre_get_hook', '_post_get_hook', '_pre_delete_hook',
'_post_delete_hook')
original_hooks = {}
for name in hook_attr_names:
original_hooks[name] = getattr(model.Model, name)
self.pre_get_flag = False
self.post_get_flag = False
self.pre_delete_flag = False
self.post_delete_flag = False
class HatStand(model.Model):
@classmethod
def _pre_get_hook(cls, unused_key):
self.pre_get_flag = True
@classmethod
def _post_get_hook(cls, unused_key, unused_future):
self.post_get_flag = True
@classmethod
def _pre_delete_hook(cls, unused_key):
self.pre_delete_flag = True
@classmethod
def _post_delete_hook(cls, unused_key, unused_future):
self.post_delete_flag = True
for name in hook_attr_names:
hook = getattr(HatStand, name)
setattr(model.Model, name, hook)
try:
key = HatStand().put()
key.get()
self.assertTrue(self.pre_get_flag,
'Pre get hook not called when model is monkey patched')
self.assertTrue(self.post_get_flag,
'Post get hook not called when model is monkey patched')
key.delete()
self.assertTrue(
self.pre_delete_flag,
'Pre delete hook not called when model is monkey patched')
self.assertTrue(
self.post_delete_flag, 'Post delete hook not called when model '
'is monkey patched')
finally:
for name in hook_attr_names:
setattr(model.Model, name, original_hooks[name])
def testPreHooksCannotCancelRPC(self):
class Foo(model.Model):
@classmethod
def _pre_get_hook(cls, unused_key):
raise tasklets.Return()
@classmethod
def _pre_delete_hook(cls, unused_key):
raise tasklets.Return()
entity = Foo()
entity.put()
self.assertRaises(tasklets.Return, entity.key.get)
self.assertRaises(tasklets.Return, entity.key.delete)
def testNoDefaultGetCallback(self):
ctx = tasklets.get_context()
ctx.set_cache_policy(False)
class EmptyModel(model.Model):
pass
entity = EmptyModel()
entity.put()
fut = entity.key.get_async()
self.assertFalse(fut._immediate_callbacks, 'Get hook queued default no-op.')
def testFromOldKey(self):
old_key = datastore_types.Key.from_path('TestKey', 1234)
new_key = key.Key.from_old_key(old_key)
self.assertEqual(str(old_key), six.ensure_text(new_key.urlsafe()))
old_key2 = new_key.to_old_key()
self.assertEqual(old_key, old_key2)
Snapshot = collections.namedtuple('Snapshot', ['snapshot_key', 'created_on'])
class KeyPickleTests(test_utils.NDBTest):
"""Tests for key pickling."""
def setUp(self):
super(KeyPickleTests, self).setUp()
self.keys = [
key.Key(flat=['Kind', 1]),
key.Key(flat=['Kind', 1, 'Subkind', 'foobar']),
key.Key(
flat=['Kind', 1, 'Subkind', 'foobar'],
namespace='ns',
app='a-different-app')
]
self.pkeys = [
[
b"ccopy_reg\n_reconstructor\np0\n(cgoogle.appengine.ext.ndb.key\nKey\np1\nc__builtin__\nobject\np2\nNtp3\nRp4\n((dp5\nS'namespace'\np6\nS''\np7\nsS'app'\np8\nS'ndb-test-app-id'\np9\nsS'pairs'\np10\n(lp11\n(S'Kind'\np12\nI1\ntp13\nastp14\nb.",
b"ccopy_reg\n_reconstructor\np0\n(cgoogle.appengine.ext.ndb.key\nKey\np1\nc__builtin__\nobject\np2\nNtp3\nRp4\n((dp5\nS'namespace'\np6\nS''\np7\nsS'app'\np8\nS'ndb-test-app-id'\np9\nsS'pairs'\np10\n(lp11\n(S'Kind'\np12\nI1\ntp13\na(S'Subkind'\np14\nS'foobar'\np15\ntp16\nastp17\nb.",
b"ccopy_reg\n_reconstructor\np0\n(cgoogle.appengine.ext.ndb.key\nKey\np1\nc__builtin__\nobject\np2\nNtp3\nRp4\n((dp5\nS'namespace'\np6\nS'ns'\np7\nsS'app'\np8\nS'a-different-app'\np9\nsS'pairs'\np9\n(lp10\n(S'Kind'\np11\nI1\ntp12\na(S'Subkind'\np13\nS'foobar'\np14\ntp15\nastp16\nb."
],
[
b"ccopy_reg\n_reconstructor\np0\n(cgoogle.appengine.ext.ndb.key\nKey\np1\nc__builtin__\nobject\np2\nNtp3\nRp4\n((dp5\nS'app'\np6\nS'ndb-test-app-id'\np7\nsS'pairs'\np8\n((S'Kind'\np9\nI1\ntp10\ntp11\nsS'namespace'\np12\nS''\np13\nstp14\nb.",
b"ccopy_reg\n_reconstructor\np0\n(cgoogle.appengine.ext.ndb.key\nKey\np1\nc__builtin__\nobject\np2\nNtp3\nRp4\n((dp5\nS'app'\np6\nS'ndb-test-app-id'\np7\nsS'pairs'\np8\n((S'Kind'\np9\nI1\ntp10\n(S'Subkind'\np11\nS'foobar'\np12\ntp13\ntp14\nsS'namespace'\np15\nS''\np16\nstp17\nb.",
b"ccopy_reg\n_reconstructor\np0\n(cgoogle.appengine.ext.ndb.key\nKey\np1\nc__builtin__\nobject\np2\nNtp3\nRp4\n((dp5\nS'app'\np6\nS'a-different-app'\np7\nsS'pairs'\np8\n((S'Kind'\np9\nI1\ntp10\n(S'Subkind'\np11\nS'foobar'\np12\ntp13\ntp14\nsS'namespace'\np15\nS'ns'\np16\nstp17\nb."
]
]
def _Unpickle(self, s):
if six.PY2:
return pickle.loads(s)
else:
return pickle.loads(s, encoding='bytes')
def testPickleBackwardsCompatibility(self):
for pkey_list in self.pkeys:
for expected, pkey in zip(self.keys, pkey_list):
expected.app()
actual = self._Unpickle(pkey)
self.assertEqual(expected, actual)
def testPickleForwardCompatibility(self):
os.environ['NDB_PY2_UNPICKLE_COMPAT'] = '1'
self.addCleanup(os.unsetenv, 'NDB_PY2_UNPICKLE_COMPAT')
expected = sortedcontainers.SortedSet([
Snapshot(
snapshot_key=key.Key('Kind', 1, 'Subkind', 'foobar', app='test'),
created_on=datetime.datetime(2020, 8, 25, 18, 28, 27, 66651))
])
py2_pickle = b'\x80\x02csortedcontainers.sortedset\nSortedSet\nq\x00c__builtin__\nset\nq\x01]q\x02cgoogle.appengine.ext.ndb.key_test\nSnapshot\nq\x03cgoogle.appengine.ext.ndb.key\nKey\nq\x04}q\x05(U\x05pairsq\x06U\x04Kindq\x07K\x01\x86q\x08U\x07Subkindq\tU\x06foobarq\n\x86q\x0b\x86q\x0cU\tnamespaceq\rU\x00q\x0eU\x03appq\x0fU\x04testq\x10u\x85q\x11\x81q\x12}q\x13(h\x06h\x0ch\rh\x0eh\x0fh\x10u\x85q\x14bcdatetime\ndatetime\nq\x15U\n\x07\xe4\x08\x19\x12\x1c\x1b\x01\x04[q\x16\x85q\x17Rq\x18\x86q\x19\x81q\x1aa\x85q\x1bRq\x1cN\x86q\x1dRq\x1e.'
actual = self._Unpickle(six.ensure_binary(py2_pickle))
self.assertEqual(expected, actual)
def testPy2PickleSetState(self):
pickle_bytes = b'\x80\x02cgoogle.appengine.ext.ndb.key\nKey\nq\x00}q\x01(U\x09namespaceq\x02U\x00q\x03U\x03appq\x04U\x0ds~some-app-idq\x05U\x05pairsq\x06U\x05Classq\x07U\x02idq\x08\x86q\x09\x85q\nu\x85q\x0b\x81q\x0c}q\x0d(h\x02h\x03h\x04h\x05h\x06h\nu\x85q\x0eb.'
expected = key.Key('Class', 'id', app='s~some-app-id')
actual = pickle.loads(pickle_bytes)
self.assertEqual(expected.__getstate__()[0]['pairs'],
actual.__getstate__()[0]['pairs'])
def testConsistentPickleBehaviour(self):
k = key.Key('Kind', 'foo', app='app1', namespace='ns1')
k.__setstate__(
({'pairs': (('Kind', 'foo'),), 'app': 'app1', 'namespace': 'ns1'},)
)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(k, self._Unpickle(pickle.dumps(k, protocol)))
def testPickling(self):
for k in self.keys:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(k, self._Unpickle(pickle.dumps(k, protocol)))
def _bytes2str(bytestring):
"""Helper method for debugging pickle.dumps output."""
return ''.join(_human_readable_byte(x) for x in bytestring)
def _human_readable_byte(c):
if isinstance(c, str):
c = ord(c)
if c == 10:
return '\\n'
if c > 21 and c < 127:
return chr(c)
else:
return '\\x{:02x}'.format(c)
if __name__ == '__main__':
unittest.main()
| 33.423913
| 546
| 0.645807
|
c78474157f2270b92cfc74bfa756cfb2b95f31f4
| 2,169
|
py
|
Python
|
src/process_data.py
|
sidgairo18/deep-inspiration
|
13db9d02cae2a960b3b0ac3a2c95eaadbee8ad6a
|
[
"MIT"
] | 4
|
2019-02-10T12:20:32.000Z
|
2019-02-21T22:16:36.000Z
|
src/process_data.py
|
sidgairo18/deep-inspiration
|
13db9d02cae2a960b3b0ac3a2c95eaadbee8ad6a
|
[
"MIT"
] | null | null | null |
src/process_data.py
|
sidgairo18/deep-inspiration
|
13db9d02cae2a960b3b0ac3a2c95eaadbee8ad6a
|
[
"MIT"
] | null | null | null |
from get_data import read_data
import numpy as np
def process(filename):
quotes = read_data(filename)
print ("Length of quotes", len(quotes))
#quotes appended to a single string.
all_quotes = ""
for q in quotes:
all_quotes += q+"\n"
#Map each character to an integer
chars = tuple(set(all_quotes))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
# Encode the text
encoded = np.array([char2int[ch] for ch in all_quotes])
return chars, encoded
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
# Defining method to make mini-batches for training
def get_batches(arr, batch_size, seq_length):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
batch_size_total = batch_size * seq_length
# total number of batches we can make
n_batches = len(arr)//batch_size_total
# Keep only enough characters to make full batches
arr = arr[:n_batches * batch_size_total]
# Reshape into batch_size rows
arr = arr.reshape((batch_size, -1))
# iterate through the array, one sequence at a time
for n in range(0, arr.shape[1], seq_length):
# The features
x = arr[:, n:n+seq_length]
# The targets, shifted by one
y = np.zeros_like(x)
try:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length]
except IndexError:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]
yield x, y
if __name__ == "__main__":
process('../data/quotes_data.txt')
| 28.539474
| 77
| 0.626095
|
43111a4dc0b0c1545d5200e04b753e692d8c9ad0
| 6,939
|
py
|
Python
|
serialtest/sandbox/models.py
|
fsnidely/serialtest
|
2e3fd1dfb6b13ccfecb4418a801d0330490bf841
|
[
"MIT"
] | null | null | null |
serialtest/sandbox/models.py
|
fsnidely/serialtest
|
2e3fd1dfb6b13ccfecb4418a801d0330490bf841
|
[
"MIT"
] | null | null | null |
serialtest/sandbox/models.py
|
fsnidely/serialtest
|
2e3fd1dfb6b13ccfecb4418a801d0330490bf841
|
[
"MIT"
] | null | null | null |
"""Database models for Sandbox App"""
import os
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils.html import format_html
STATIC_IMAGES_PATH = 'sandbox/images'
IMAGE_ASSET_STORAGE = FileSystemStorage(
location=os.path.join(settings.STATIC_ROOT, STATIC_IMAGES_PATH),
base_url=os.path.join(settings.STATIC_URL, STATIC_IMAGES_PATH))
class KindManager(models.Manager):
def get_by_natural_key(self, iden):
return self.get(iden=iden)
class Kind(models.Model):
"""Kind Model is the Kind of the Thing."""
objects = KindManager()
iden = models.CharField('Kind', max_length=8, unique=True)
name = models.CharField('Name', max_length=80)
desc = models.CharField('Description', max_length=255, blank=True)
rank = models.IntegerField('Rank', default=0)
class Meta:
ordering = ('rank', 'id',)
def __str__(self):
return self.name
def natural_key(self):
return (self.iden,)
class ThingManager(models.Manager):
def get_by_natural_key(self, iden):
return self.get(iden=iden)
class Thing(models.Model):
"""General collection of objects.
Named 'things' instead of objects as that would get confusing referring to Object.objects, etc.
"""
objects = ThingManager()
iden = models.CharField('Thing', max_length=8, unique=True)
kind = models.ForeignKey(Kind, on_delete=models.CASCADE)
name = models.CharField('Name', max_length=80)
desc = models.CharField('Description', max_length=255)
rank = models.IntegerField('Rank', default=0)
image = models.ImageField('Thing', storage=IMAGE_ASSET_STORAGE, blank=True)
class Meta:
ordering = ('kind__rank', 'rank', 'id',)
def __str__(self):
return self.name
def natural_key(self):
return (self.iden,)
def img_html(self):
"""Image tag for image file."""
if self.image:
return format_html(
'<img src="{0}" alt="{1}" title={1}>',
self.image.url,
self.name)
return self.__str__()
img_html.short_description = 'Thing'
def img_name_html(self):
"""Image tag for image file."""
if self.image:
return format_html(
'{0} {1}',
self.img_html(),
self.name)
return self.__str__()
img_name_html.short_description = 'Thing'
class Product(Thing):
"""Product for things that can be produced.
A child record of Thing.
"""
thing = models.OneToOneField(
Thing, on_delete=models.CASCADE, parent_link=True)
prod_secs = models.IntegerField('Production seconds')
low_level = models.IntegerField('Low-level code', default=-1)
# Tweak the 'thing' field's 'serialize' attribute.
# This overrides the normally 'False' value, which omits
# the 'thing' field from serialization when using
# the natural_key().
Product._meta.get_field('thing').serialize = True
class MaterielManager(models.Manager):
def get_by_natural_key(self, parent, component):
return self.get(
parent__thing__iden=parent, component__thing__iden=component)
class Materiel(models.Model):
"""Materiel Bill-Of-Material records."""
objects = MaterielManager()
parent = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='materiels')
component = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='usedin')
quantity = models.IntegerField()
class Meta:
unique_together = (('parent', 'component'),)
def natural_key(self):
return self.parent.natural_key() + self.component.natural_key()
natural_key.dependencies = ['sandbox.product']
def clean(self):
"""Validate BOM relationship."""
if self.parent and self.component and self.parent == self.component:
raise ValidationError('Parent and component can not be the same.')
def parent_name(self):
return self.parent.thing.name
def component_name(self):
return self.component.thing.name
def __str__(self):
return f'{self.parent_name()} :-- {self.quantity} * {self.component_name()}'
class Product2(Thing):
"""Experimental Product model extending Thing model.
Simple inheritance with implied PK of 'thing_ptr'.
"""
prod_secs = models.IntegerField('Production seconds')
low_level = models.IntegerField('Low-level code', default=-1)
# # Tweak the 'thing' field's 'serialize' attribute.
# Product2._meta.get_field('thing_ptr').serialize = True
class Product3(Thing):
"""Experimental Product model extending Thing model.
Inheritance with a OneToOneField and parent_link=True
"""
thing = models.OneToOneField(
Thing, on_delete=models.CASCADE, parent_link=True)
prod_secs = models.IntegerField('Production seconds')
low_level = models.IntegerField('Low-level code', default=-1)
# # Tweak the 'thing' field's 'serialize' attribute.
# Product3._meta.get_field('thing').serialize = True
class Product4Manager(models.Manager):
"""Old Product model manager."""
def get_by_natural_key(self, iden):
return self.get(thing__iden=iden)
class Product4(models.Model):
"""Old Product model.
Non-inherited instance with a OneToOneField and primary_key=True
(Product4 was my original variation.)
"""
objects = Product4Manager()
thing = models.OneToOneField(
Thing, on_delete=models.CASCADE, primary_key=True)
prod_secs = models.IntegerField('Production seconds')
low_level = models.IntegerField('Low-level code', default=-1)
class Meta:
ordering = ('thing__kind__rank', 'thing__rank')
def __str__(self):
return self.thing.name
def natural_key(self):
return self.thing.natural_key()
natural_key.dependencies = ['sandbox.thing']
# Tweak the 'thing' field's 'serialize' attribute.
# This overrides the normally 'False' value, which omits
# the 'thing' field from serialization when using
# the natural_key().
Product4._meta.get_field('thing').serialize = True
class Product5(models.Model):
"""Experimental Product model extending Thing model.
Non-inherited instance with a ForeignKeyField and primary_key=True
"""
objects = Product4Manager() # Reuse from Product4 model
thing = models.ForeignKey(
Thing, on_delete=models.CASCADE, primary_key=True)
prod_secs = models.IntegerField('Production seconds')
low_level = models.IntegerField('Low-level code', default=-1)
def __str__(self):
return self.thing.name
def natural_key(self):
return self.thing.natural_key()
natural_key.dependencies = ['sandbox.thing']
# # Tweak the 'thing' field's 'serialize' attribute.
# Product5._meta.get_field('thing').serialize = True
| 31.256757
| 99
| 0.686122
|
690a9a3cb85e3ab897f1ee50ae5566b23199e686
| 224
|
py
|
Python
|
setup.py
|
ytkj/tblg-scraper
|
0bd7cd42d308aaa325e9c61203e9d84dcb16a3bb
|
[
"MIT"
] | null | null | null |
setup.py
|
ytkj/tblg-scraper
|
0bd7cd42d308aaa325e9c61203e9d84dcb16a3bb
|
[
"MIT"
] | null | null | null |
setup.py
|
ytkj/tblg-scraper
|
0bd7cd42d308aaa325e9c61203e9d84dcb16a3bb
|
[
"MIT"
] | null | null | null |
import setuptools
install_requires = [
'selenium',
'chromedriver_binary',
'pandas',
]
setuptools.setup(
name='tblg_scraper',
install_requires=install_requires,
packages=setuptools.find_packages(),
)
| 17.230769
| 40
| 0.709821
|
94cdca979026188bf19e2cf3807789016f0f63db
| 407
|
py
|
Python
|
molecool/atom_data.py
|
kanishka-ghosh/molecool
|
bc0b772f029a8a3e3d5319d774c83aa7899f3735
|
[
"BSD-3-Clause"
] | null | null | null |
molecool/atom_data.py
|
kanishka-ghosh/molecool
|
bc0b772f029a8a3e3d5319d774c83aa7899f3735
|
[
"BSD-3-Clause"
] | 1
|
2020-12-18T19:22:50.000Z
|
2020-12-18T19:22:50.000Z
|
molecool/atom_data.py
|
kanishka-ghosh/molecool
|
bc0b772f029a8a3e3d5319d774c83aa7899f3735
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Data used for the rest of the package
"""
atomic_weights = {
'H': 1.00784,
'C': 12.0107,
'N': 14.0067,
'O': 15.999,
'P': 30.973762,
'F': 18.998403,
'Cl': 35.453,
'Br': 79.904,
}
atom_colors = {
'H': 'white',
'C': '#D3D3D3',
'N': '#add8e6',
'O': 'red',
'P': '#FFA500',
'F': '#FFFFE0',
'Cl': '#98FB98',
'Br': '#F4A460',
'S': 'yellow'
}
| 15.653846
| 37
| 0.429975
|
33626491bb29ba18c20ae1069249c48bd14d756c
| 2,279
|
py
|
Python
|
venv/Lib/site-packages/flake8/__init__.py
|
dhina016/OnCV
|
2429ba7bc8589e92b2c273af6e8763487e1e05a8
|
[
"Unlicense",
"MIT"
] | 9
|
2021-04-12T16:11:38.000Z
|
2022-03-18T09:03:58.000Z
|
venv/Lib/site-packages/flake8/__init__.py
|
dhina016/OnCV
|
2429ba7bc8589e92b2c273af6e8763487e1e05a8
|
[
"Unlicense",
"MIT"
] | 21
|
2021-04-13T01:17:40.000Z
|
2022-03-11T16:06:50.000Z
|
venv/Lib/site-packages/flake8/__init__.py
|
dhina016/OnCV
|
2429ba7bc8589e92b2c273af6e8763487e1e05a8
|
[
"Unlicense",
"MIT"
] | 1
|
2021-11-15T20:56:46.000Z
|
2021-11-15T20:56:46.000Z
|
"""Top-level module for Flake8.
This module
- initializes logging for the command-line tool
- tracks the version of the package
- provides a way to configure logging for the command-line tool
.. autofunction:: flake8.configure_logging
"""
import logging
import sys
if False: # `typing.TYPE_CHECKING` was introduced in 3.5.2
from typing import Type # `typing.Type` was introduced in 3.5.2
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
__version__ = "3.8.3"
__version_info__ = tuple(
int(i) for i in __version__.split(".") if i.isdigit()
)
# There is nothing lower than logging.DEBUG (10) in the logging library,
# but we want an extra level to avoid being too verbose when using -vv.
_EXTRA_VERBOSE = 5
logging.addLevelName(_EXTRA_VERBOSE, "VERBOSE")
_VERBOSITY_TO_LOG_LEVEL = {
# output more than warnings but not debugging info
1: logging.INFO, # INFO is a numerical level of 20
# output debugging information
2: logging.DEBUG, # DEBUG is a numerical level of 10
# output extra verbose debugging information
3: _EXTRA_VERBOSE,
}
LOG_FORMAT = (
"%(name)-25s %(processName)-11s %(relativeCreated)6d "
"%(levelname)-8s %(message)s"
)
def configure_logging(verbosity, filename=None, logformat=LOG_FORMAT):
"""Configure logging for flake8.
:param int verbosity:
How verbose to be in logging information.
:param str filename:
Name of the file to append log information to.
If ``None`` this will log to ``sys.stderr``.
If the name is "stdout" or "stderr" this will log to the appropriate
stream.
"""
if verbosity <= 0:
return
if verbosity > 3:
verbosity = 3
log_level = _VERBOSITY_TO_LOG_LEVEL[verbosity]
if not filename or filename in ("stderr", "stdout"):
fileobj = getattr(sys, filename or "stderr")
handler_cls = logging.StreamHandler # type: Type[logging.Handler]
else:
fileobj = filename
handler_cls = logging.FileHandler
handler = handler_cls(fileobj)
handler.setFormatter(logging.Formatter(logformat))
LOG.addHandler(handler)
LOG.setLevel(log_level)
LOG.debug(
"Added a %s logging handler to logger root at %s", filename, __name__
)
| 28.848101
| 77
| 0.691531
|
1891e359e26ead1fcd37ba3569d365a0523a7eb7
| 2,613
|
py
|
Python
|
lbr_description/launch/view_robot.launch.py
|
KCL-BMEIS/lbr_fri_ros2_stack
|
0d2cc6e2f6bcfbff860fa87ecb87a64520f55181
|
[
"MIT"
] | 16
|
2021-10-09T11:25:22.000Z
|
2022-02-15T22:04:41.000Z
|
lbr_description/launch/view_robot.launch.py
|
KCL-BMEIS/lbr_fri_ros2_stack
|
0d2cc6e2f6bcfbff860fa87ecb87a64520f55181
|
[
"MIT"
] | 11
|
2021-11-03T16:43:47.000Z
|
2022-03-26T13:23:54.000Z
|
lbr_description/launch/view_robot.launch.py
|
KCL-BMEIS/lbr_fri_ros2_stack
|
0d2cc6e2f6bcfbff860fa87ecb87a64520f55181
|
[
"MIT"
] | 2
|
2022-01-25T04:17:42.000Z
|
2022-01-25T11:07:53.000Z
|
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import Command, FindExecutable, PathJoinSubstitution, LaunchConfiguration
from launch_ros.actions import Node
from launch_ros.substitutions import FindPackageShare
# for reference see
# https://github.com/ros-controls/ros2_control_demos/tree/master/ros2_control_demo_description/rrbot_description
def generate_launch_description():
# Launch arguments
launch_args = []
launch_args.append(DeclareLaunchArgument(
name='description_package',
default_value='lbr_description',
description='Description package.'
))
launch_args.append(DeclareLaunchArgument(
name='description_file',
default_value='urdf/med7/med7.urdf.xacro',
description='Path to URDF file, relative to description_package.'
))
launch_args.append(DeclareLaunchArgument(
name='rviz_pkg',
default_value='lbr_description',
description='Package containing rviz_config.'
))
launch_args.append(DeclareLaunchArgument(
name='rviz_config',
default_value='config/config.rviz',
description='Rviz configuration relative to rviz_pkg.'
))
launch_args.append(DeclareLaunchArgument(
name='robot_name',
default_value='lbr',
description='Set robot name.'
))
# Load robot description
robot_description_content = Command(
[
FindExecutable(name="xacro"), " ",
PathJoinSubstitution(
[FindPackageShare(LaunchConfiguration('description_package')), LaunchConfiguration('description_file')]
), " ",
"robot_name:=", LaunchConfiguration('robot_name')
]
)
robot_description = {'robot_description': robot_description_content}
# Create required nodes
joint_state_publisher_node = Node(
package="joint_state_publisher_gui",
executable="joint_state_publisher_gui",
)
robot_state_publisher_node = Node(
package="robot_state_publisher",
executable="robot_state_publisher",
output="both",
parameters=[robot_description],
)
rviz = Node(
package='rviz2',
executable='rviz2',
arguments=['-d', PathJoinSubstitution(
[FindPackageShare(LaunchConfiguration('rviz_pkg')), LaunchConfiguration('rviz_config')]
)]
)
return LaunchDescription(
launch_args +
[
joint_state_publisher_node,
robot_state_publisher_node,
rviz
]
)
| 30.383721
| 119
| 0.676617
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.