hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2e6674eabad01f063a87f09e0f523066e3a45b14
| 2,298
|
py
|
Python
|
mapping.py
|
sshleifer/transformers_config_mirror
|
5644483b24212e822eeda898b5693862f914cb23
|
[
"Apache-2.0"
] | null | null | null |
mapping.py
|
sshleifer/transformers_config_mirror
|
5644483b24212e822eeda898b5693862f914cb23
|
[
"Apache-2.0"
] | null | null | null |
mapping.py
|
sshleifer/transformers_config_mirror
|
5644483b24212e822eeda898b5693862f914cb23
|
[
"Apache-2.0"
] | 1
|
2022-02-28T18:10:49.000Z
|
2022-02-28T18:10:49.000Z
|
from transformers.hf_api import HfApi
import os
import json
import sys
from pathlib import Path
MIRROR_DIR = Path('/Users/shleifer/transformers-config-mirror/')
from transformers import MarianConfig
from durbango import tqdm_nice
DEFAULT_UPDATE_DICT = {'max_length': 512}
def bulk_update_local_configs(models, update_dict=DEFAULT_UPDATE_DICT,
save_dir=MIRROR_DIR):
failures = []
for slug in tqdm_nice(models):
assert slug.startswith('opus-mt')
try:
cfg = MarianConfig.from_pretrained(f'Helsinki-NLP/{slug}')
except OSError:
failures.append(slug)
continue
for k,v in update_dict.items():
setattr(cfg, k, v)
# if a new value depends on a cfg value, add code here
# e.g. cfg.decoder_start_token_id = cfg.pad_token_id
dest_dir = (save_dir/'Helsinki-NLP'/ slug)
if not dest_dir.exists():
print(f'making {dest_dir}')
dest_dir.mkdir(exist_ok=True)
cfg.save_pretrained(dest_dir)
assert cfg.from_pretrained(dest_dir).model_type == 'marian'
def update_config(model_identifier, updates):
api = HfApi()
model_list = api.model_list()
model_dict = [
model_dict
for model_dict in model_list
if model_dict.modelId == model_identifier
][0]
model_identifier = "_".join(model_identifier.split("/"))
http = "https://s3.amazonaws.com/"
hf_url = "models.huggingface.co/"
config_path_aws = http + hf_url + model_dict.key
file_name = f"./{model_identifier}_config.json"
bash_command = f"curl {config_path_aws} > {file_name}"
os.system(bash_command)
with open(file_name) as f:
config_json = json.load(f)
bash_command = "rm {}".format(file_name)
os.system(bash_command)
##### HERE YOU SHOULD STATE WHICH PARAMS WILL BE CHANGED #####
config_json.update(updates)
# save config as it was saved before
with open(file_name, "w") as f:
json.dump(config_json, f, indent=2, sort_keys=True)
# upload new config
bash_command = f"s3cmd cp {file_name} s3://{hf_url + model_dict.key}"
os.system(bash_command)
if __name__ == "__main__":
model_identifier = sys.argv[1]
update_config(model_identifier)
| 30.236842
| 73
| 0.659704
|
a6b7b7e53274e0900faefa73b666844708ee36ab
| 20,920
|
py
|
Python
|
tensorflow/python/ops/gradients_test.py
|
My-Technical-Architect/tensorflow
|
35cf4653e6fe15953e2e565afc5a0fd2ab4d5290
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/gradients_test.py
|
My-Technical-Architect/tensorflow
|
35cf4653e6fe15953e2e565afc5a0fd2ab4d5290
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/gradients_test.py
|
My-Technical-Architect/tensorflow
|
35cf4653e6fe15953e2e565afc5a0fd2ab4d5290
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import data_flow_ops # pylint: disable=unused-import
from tensorflow.python.ops import functional_ops # pylint: disable=unused-import
from tensorflow.python.ops import gradients
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import state_grad # pylint: disable=unused-import
from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.platform import googletest
def _OpsBetween(graph, to_ops, from_ops):
"""Build the list of operations between two lists of Operations.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
The list of operations between "from_ops" and "to_ops", sorted by
decreasing operation id. This list contains all elements of to_ops.
TODO(touts): Think about returning an empty list if from_ops are not
reachable from to_ops. Presently it returns to_ops in that case.
"""
# List of booleans, indexed by operation id, indicating if
# an op is reached from the output of "input_ops".
reached_ops = [False] * (graph._last_id + 1)
# We only care to reach up to "output_ops" so we mark the
# output ops as reached to avoid recursing past them.
for op in to_ops:
reached_ops[op._id] = True
gradients_impl._MarkReachedOps(from_ops, reached_ops)
between_ops = gradients_impl._GatherInputs(to_ops, reached_ops)
between_ops.sort(key=lambda x: -x._id)
return between_ops
class GradientsTest(test_util.TensorFlowTestCase):
def _OpNames(self, op_list):
return ["%s/%d" % (str(op.name), op._id) for op in op_list]
def _assertOpListEqual(self, ops1, ops2):
self.assertEquals(self._OpNames(ops1), self._OpNames(ops2))
def testOpsBetweenSimple(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
# Full graph
self._assertOpListEqual([t3.op, t2.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op, t2.op]))
# Only t1, t3.
self._assertOpListEqual([t3.op, t1.op], _OpsBetween(g, [t3.op], [t1.op]))
def testOpsBetweenUnreachable(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
_ = array_ops.stack([t1, t2])
t4 = constant(1.0)
t5 = constant(2.0)
t6 = array_ops.stack([t4, t5])
# Elements of to_ops are always listed.
self._assertOpListEqual([t6.op], _OpsBetween(g, [t6.op], [t1.op]))
def testOpsBetweenCut(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = constant([1.0])
t5 = array_ops.concat_v2([t4, t3], 0)
t6 = constant([2.0])
t7 = array_ops.concat_v2([t5, t6], 0)
self._assertOpListEqual([t7.op, t5.op, t4.op],
_OpsBetween(g, [t7.op], [t4.op]))
def testOpsBetweenCycle(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.stack([t1, t2])
t4 = array_ops.concat_v2([t3, t3, t3], 0)
t5 = constant([1.0])
t6 = array_ops.concat_v2([t4, t5], 0)
t7 = array_ops.concat_v2([t6, t3], 0)
self._assertOpListEqual([t6.op, t4.op, t3.op],
_OpsBetween(g, [t6.op], [t3.op]))
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
_OpsBetween(g, [t7.op], [t1.op, t5.op]))
self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
_OpsBetween(g, [t6.op], [t2.op, t5.op]))
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEquals("MatMul", w_grad.op.type)
self.assertEquals(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
def testUnusedOutput(self):
with ops.Graph().as_default():
w = constant(1.0, shape=[2, 2])
x = constant(1.0, shape=[2, 2])
wx = math_ops.matmul(w, x)
split_wx = array_ops.split(value=wx, num_or_size_splits=2, axis=0)
c = math_ops.reduce_sum(split_wx[1])
gw = gradients.gradients(c, [w])[0]
self.assertEquals("MatMul", gw.op.type)
def testColocateGradients(self):
with ops.Graph().as_default() as g:
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
with g.device("/gpu:0"):
wx = math_ops.matmul(w, x)
gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw.op.colocation_groups(), wx.op.colocation_groups())
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), wx.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(wx.op.colocation_groups() != gw2.op.colocation_groups())
def testColocateGradientsWithAggregationInMultipleDevices(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
with g.device("/task:1"):
wx = math_ops.matmul(w, x)
with g.device("/task:2"):
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), w.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(w.op.colocation_groups() != gw2.op.colocation_groups())
def testBoundaryStop(self):
# Test that we don't differentiate 'x'. The gradient function for 'x' is
# set explicitly to None so we will get an exception if the gradient code
# tries to differentiate 'x'.
with ops.Graph().as_default() as g:
c = constant(1.0)
x = array_ops.identity(c)
y = x + 1.0
z = y + 1
grads = gradients.gradients(z, [x])
self.assertTrue(all(x is not None for x in grads))
def testBoundaryContinue(self):
# Test that we differentiate both 'x' and 'y' correctly when x is a
# predecessor of y.
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y * 3.0
grads = gradients.gradients(z, [x, y])
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(6.0, grads[0].eval())
def testAggregationMethodAccumulateN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y],
aggregation_method=gradients.AggregationMethod.
EXPERIMENTAL_ACCUMULATE_N)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodAddN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y], aggregation_method=gradients.AggregationMethod.ADD_N)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodTree(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y],
aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testNoGradientForStringOutputs(self):
with ops.Graph().as_default():
def _TestOpGrad(_, float_grad, string_grad):
"""Gradient function for TestStringOutput."""
self.assertEquals(float_grad.dtype, dtypes.float32)
self.assertFalse(string_grad)
return float_grad
ops.RegisterGradient("TestStringOutput")(_TestOpGrad)
c = constant(1.0)
x, _ = test_ops.test_string_output(c)
z = x * 2.0
w = z * 3.0
grads = gradients.gradients(z, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
grads = gradients.gradients(w, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
def testSingletonIndexedSlices(self):
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
y = array_ops.identity(x)
dy = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32))
dx, = gradients.gradients(y, x, grad_ys=dy)
# The gradient of tf.identity should pass the value through unchanged.
# A previous version of the code did this only for tf.Tensor, not
# tf.IndexedSlices.
self.assertEqual(dx, dy)
class FunctionGradientsTest(test_util.TensorFlowTestCase):
@classmethod
def XSquarePlusB(cls, x, b):
return x * x + b
@classmethod
def XSquarePlusBGradient(cls, x, b, g):
# Perturb gradients (multiply by 2), so we can test that this was called.
g *= 2.0
return g * 2.0 * x, g
@classmethod
def _PythonGradient(cls, op, grad):
# Perturb gradients (multiply by 3), so we can test that this was called.
grad *= 3.0
return grad * op.inputs[0] * 2.0, grad
@classmethod
def _GetFunc(cls, **kwargs):
return function.Defun(dtypes.float32, dtypes.float32, **
kwargs)(cls.XSquarePlusB)
def _GetFuncGradients(self, f, x_value, b_value):
x = constant_op.constant(x_value, name="x")
b = constant_op.constant(b_value, name="b")
y = f(x, b)
grads = gradients.gradients(y, [x, b])
with self.test_session() as sess:
return sess.run(grads)
def testFunctionGradientsBasic(self):
g = ops.Graph()
with g.as_default():
f = self._GetFunc()
# Get gradients (should add SymbolicGradient node for function).
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0], grads[0])
self.assertAllEqual([1.0], grads[1])
def testFunctionGradientsComposition(self):
with ops.Graph().as_default():
f = self._GetFunc()
x = constant_op.constant([2.0], name="x")
b1 = constant_op.constant([1.0], name="b1")
b2 = constant_op.constant([1.0], name="b2")
y = f(f(x, b1), b2)
# Build gradient graph (should add SymbolicGradient node for function).
grads = gradients.gradients(y, [x, b1])
with self.test_session() as sess:
self.assertAllEqual([40.0], sess.run(grads)[0])
self.assertAllEqual([10.0], sess.run(grads)[1])
def testFunctionGradientsWithGradFunc(self):
g = ops.Graph()
with g.as_default():
grad_func = function.Defun(dtypes.float32, dtypes.float32,
dtypes.float32)(self.XSquarePlusBGradient)
f = self._GetFunc(grad_func=grad_func)
# Get gradients (should add SymbolicGradient node for function, which
# uses the grad_func above, which multiplies all gradients by 2).
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0 * 2], grads[0])
self.assertAllEqual([1.0 * 2], grads[1])
def testFunctionGradientWithRegistration(self):
g = ops.Graph()
with g.as_default():
f = self._GetFunc(python_grad_func=self._PythonGradient)
# Get gradients, using the python gradient function. It multiplies the
# gradients by 3.
grads = self._GetFuncGradients(f, [2.0], [1.0])
self.assertAllEqual([4.0 * 3], grads[0])
self.assertAllEqual([1.0 * 3], grads[1])
def testFunctionGradientWithGradFuncAndRegistration(self):
g = ops.Graph()
with g.as_default():
grad_func = function.Defun(dtypes.float32, dtypes.float32,
dtypes.float32)(self.XSquarePlusBGradient)
with self.assertRaisesRegexp(ValueError, "Gradient defined twice"):
f = self._GetFunc(
grad_func=grad_func, python_grad_func=self._PythonGradient)
f.add_to_graph(ops.Graph())
class StopGradientTest(test_util.TensorFlowTestCase):
def testStopGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.stop_gradient(inp)
igrad = gradients.gradients(out, inp)[0]
assert igrad is None
class HessianVectorProductTest(test_util.TensorFlowTestCase):
def testHessianVectorProduct(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that HessianVectorProduct matches multiplication by the
# explicit Hessian.
# Specifically, the Hessian of f(x) = x^T A x is
# H = A + A^T.
# We expect HessianVectorProduct(f(x), x, v) to be H v.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
v_value = rng.randn(m, 1).astype("float32")
x_value = rng.randn(m, 1).astype("float32")
hess_value = mat_value + mat_value.T
hess_v_value = np.dot(hess_value, v_value)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
mat = constant_op.constant(mat_value)
v = constant_op.constant(v_value)
x = constant_op.constant(x_value)
mat_x = math_ops.matmul(mat, x, name="Ax")
x_mat_x = math_ops.matmul(array_ops.transpose(x), mat_x, name="xAx")
hess_v = gradients_impl._hessian_vector_product(x_mat_x, [x], [v])[0]
hess_v_actual = hess_v.eval()
self.assertAllClose(hess_v_value, hess_v_actual)
class HessianTest(test_util.TensorFlowTestCase):
def testHessian1D(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that `hessian` matches. Specifically, the Hessian of
# f(x) = x^T A x is H = A + A^T.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
x_value = rng.randn(m).astype("float32")
hess_value = mat_value + mat_value.T
with self.test_session(use_gpu=True):
mat = constant_op.constant(mat_value)
x = constant_op.constant(x_value)
x_mat_x = math_ops.reduce_sum(x[:, None] * mat * x[None, :])
hess = gradients.hessians(x_mat_x, x)[0]
hess_actual = hess.eval()
self.assertAllClose(hess_value, hess_actual)
def testHessian1D_multi(self):
# Test the computation of the hessian with respect to multiple tensors
m = 4
n = 3
rng = np.random.RandomState([1, 2, 3])
mat_values = [rng.randn(m, m).astype("float32") for _ in range(n)]
x_values = [rng.randn(m).astype("float32") for _ in range(n)]
hess_values = [mat_value + mat_value.T for mat_value in mat_values]
with self.test_session(use_gpu=True):
mats = [constant_op.constant(mat_value) for mat_value in mat_values]
xs = [constant_op.constant(x_value) for x_value in x_values]
xs_mats_xs = [
math_ops.reduce_sum(x[:, None] * mat * x[None, :])
for x, mat in zip(xs, mats)
]
hessians = gradients.hessians(xs_mats_xs, xs)
hessians_actual = [hess.eval() for hess in hessians]
for hess_value, hess_actual in zip(hess_values, hessians_actual):
self.assertAllClose(hess_value, hess_actual)
def testHessianInvalidDimension(self):
for shape in [(10, 10), None]:
with self.test_session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32, shape)
# Expect a ValueError because the dimensions are wrong
with self.assertRaises(ValueError):
gradients.hessians(x, x)
class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
def testIndexedSlicesToTensor(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.mul(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testIndexedSlicesToTensorList(self):
with self.test_session():
numpy_list = []
dense_list = []
sparse_list = []
for _ in range(3):
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
numpy_list.append(np_val)
dense_list.append(c)
sparse_list.append(c_sparse)
packed_dense = array_ops.stack(dense_list)
packed_sparse = array_ops.stack(sparse_list)
self.assertAllClose(packed_dense.eval(), packed_sparse.eval())
def testInt64Indices(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
c_sparse = ops.IndexedSlices(
c_sparse.values,
math_ops.cast(c_sparse.indices, dtypes.int64), c_sparse.dense_shape)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.mul(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testWarnings(self):
# Smaller than the threshold: no warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32), constant([4, 4, 4, 4]))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(0, len(w))
# Greater than or equal to the threshold: warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32), constant([100, 100, 100, 100]))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"with 100000000 elements. This may consume a large amount of memory." in
str(w[0].message))
# Unknown dense shape: warning.
c_sparse = ops.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"of unknown shape. This may consume a large amount of memory." in
str(w[0].message))
if __name__ == "__main__":
googletest.main()
| 38.526703
| 81
| 0.658222
|
5a364a5724bebfc7ea1a36b3d2114c42eda00c54
| 1,240
|
py
|
Python
|
scripts/redact_cli_py/tests/factories/image_factory.py
|
ScriptBox21/OCR-Form-Tools
|
2b130424f76cab5d1391a7c90674a9accf2890c3
|
[
"MIT"
] | 1
|
2022-01-21T07:02:02.000Z
|
2022-01-21T07:02:02.000Z
|
scripts/redact_cli_py/tests/factories/image_factory.py
|
ScriptBox21/OCR-Form-Tools
|
2b130424f76cab5d1391a7c90674a9accf2890c3
|
[
"MIT"
] | 1
|
2022-02-16T16:24:36.000Z
|
2022-02-16T16:24:36.000Z
|
scripts/redact_cli_py/tests/factories/image_factory.py
|
ScriptBox21/OCR-Form-Tools
|
2b130424f76cab5d1391a7c90674a9accf2890c3
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project
# root for license information.
from PIL import Image
class ImageFactory:
@staticmethod
def build() -> Image:
image_path = "testdata/testdata.jpg"
return Image.open(image_path)
@staticmethod
def build_redacted() -> Image:
image_path = "testdata/testdata.redacted.jpg"
return Image.open(image_path)
@staticmethod
def build_partial() -> Image:
image_path = "testdata/testdata-partial.jpg"
return Image.open(image_path)
@staticmethod
def build_redacted_partial() -> Image:
image_path = "testdata/testdata-partial.redacted.jpg"
return Image.open(image_path)
@staticmethod
def build_mode_1() -> Image:
image_path = "testdata/testdata-mode-1.tiff"
return Image.open(image_path)
@staticmethod
def build_redacted_mode_1() -> Image:
image_path = "testdata/testdata-mode-1.redacted.tiff"
return Image.open(image_path)
@staticmethod
def build_rendered_pdf() -> Image:
image_path = "testdata/testdata.pdf.rendered.png"
return Image.open(image_path)
| 28.181818
| 64
| 0.679032
|
2618a31e6fcf488de2d11ecb31578a8ee9c4b99f
| 12,792
|
py
|
Python
|
tests/test_fetcher.py
|
v0ila/Lauwersscan
|
651be7ca94f5365e160a09ccd1cc1a985ac37840
|
[
"Apache-2.0"
] | 1
|
2015-11-08T07:33:31.000Z
|
2015-11-08T07:33:31.000Z
|
tests/test_fetcher.py
|
willworks/pyspider
|
9fc2ffa57324d1a42ef767289faa3a04f4d20f2e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_fetcher.py
|
willworks/pyspider
|
9fc2ffa57324d1a42ef767289faa3a04f4d20f2e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-15 22:10:35
import os
import json
import copy
import time
import umsgpack
import subprocess
import unittest2 as unittest
from multiprocessing import Queue
import logging
import logging.config
logging.config.fileConfig("pyspider/logging.conf")
try:
from six.moves import xmlrpc_client
except ImportError:
import xmlrpclib as xmlrpc_client
from pyspider.libs import utils
from pyspider.libs.response import rebuild_response
from pyspider.fetcher.tornado_fetcher import Fetcher
class TestFetcher(unittest.TestCase):
sample_task_http = {
'taskid': 'taskid',
'project': 'project',
'url': '',
'fetch': {
'method': 'GET',
'headers': {
'Cookie': 'a=b',
'a': 'b'
},
'cookies': {
'c': 'd',
},
'timeout': 60,
'save': 'abc',
},
'process': {
'callback': 'callback',
'save': [1, 2, 3],
},
}
@classmethod
def setUpClass(self):
import tests.data_test_webpage
import httpbin
self.httpbin_thread = utils.run_in_subprocess(httpbin.app.run, port=14887)
self.httpbin = 'http://127.0.0.1:14887'
self.inqueue = Queue(10)
self.outqueue = Queue(10)
self.fetcher = Fetcher(self.inqueue, self.outqueue)
self.fetcher.phantomjs_proxy = '127.0.0.1:25555'
self.rpc = xmlrpc_client.ServerProxy('http://localhost:%d' % 24444)
self.xmlrpc_thread = utils.run_in_thread(self.fetcher.xmlrpc_run, port=24444)
self.thread = utils.run_in_thread(self.fetcher.run)
self.proxy_thread = subprocess.Popen(['pyproxy', '--username=binux',
'--password=123456', '--port=14830',
'--debug'], close_fds=True)
self.proxy = '127.0.0.1:14830'
try:
self.phantomjs = subprocess.Popen(['phantomjs',
os.path.join(os.path.dirname(__file__),
'../pyspider/fetcher/phantomjs_fetcher.js'),
'25555'])
except OSError:
self.phantomjs = None
time.sleep(0.5)
@classmethod
def tearDownClass(self):
self.proxy_thread.terminate()
self.proxy_thread.wait()
self.httpbin_thread.terminate()
self.httpbin_thread.join()
if self.phantomjs:
self.phantomjs.kill()
self.phantomjs.wait()
self.rpc._quit()
self.thread.join()
time.sleep(1)
def test_10_http_get(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/get'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
self.assertIsNotNone(response.json, response.content)
self.assertEqual(response.json['headers'].get('A'), 'b', response.json)
self.assertIn('c=d', response.json['headers'].get('Cookie'), response.json)
self.assertIn('a=b', response.json['headers'].get('Cookie'), response.json)
def test_15_http_post(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/post'
request['fetch']['method'] = 'POST'
request['fetch']['data'] = 'binux'
request['fetch']['cookies'] = {'c': 'd'}
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
self.assertIsNotNone(response.json, response.content)
self.assertEqual(response.json['form'].get('binux'), '')
self.assertEqual(response.json['headers'].get('A'), 'b', response.json)
self.assertIn('c=d', response.json['headers'].get('Cookie'), response.json)
self.assertIn('a=b', response.json['headers'].get('Cookie'), response.json)
def test_20_dataurl_get(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'data:,hello'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'hello')
def test_30_with_queue(self):
request= copy.deepcopy(self.sample_task_http)
request['url'] = 'data:,hello'
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'hello')
def test_40_with_rpc(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'data:,hello'
result = umsgpack.unpackb(self.rpc.fetch(request).data)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'hello')
def test_50_base64_data(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/post'
request['fetch']['method'] = 'POST'
# utf8 encoding 中文
request['fetch']['data'] = "[BASE64-DATA]5Lit5paH[/BASE64-DATA]"
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, response.error)
self.assertIsNotNone(response.json, response.content)
self.assertIn(u'中文', response.json['form'], response.json)
def test_55_base64_data(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/post'
request['fetch']['method'] = 'POST'
# gbk encoding 中文
request['fetch']['data'] = "[BASE64-DATA]1tDOxA==[/BASE64-DATA]"
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, response.error)
self.assertIsNotNone(response.json, response.content)
def test_60_timeout(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/delay/5'
request['fetch']['timeout'] = 3
start_time = time.time()
self.inqueue.put(request)
task, result = self.outqueue.get()
end_time = time.time()
self.assertGreater(end_time - start_time, 1.5)
self.assertLess(end_time - start_time, 4.5)
def test_65_418(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/status/418'
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 418)
self.assertIn('teapot', response.text)
def test_70_phantomjs_url(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin + '/get'
request['fetch']['fetch_type'] = 'js'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
data = json.loads(response.doc('pre').text())
self.assertIsNotNone(data, response.content)
self.assertEqual(data['headers'].get('A'), 'b', response.json)
self.assertEqual(data['headers'].get('Cookie'), 'c=d', response.json)
def test_80_phantomjs_timeout(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/delay/5'
request['fetch']['fetch_type'] = 'js'
request['fetch']['timeout'] = 3
start_time = time.time()
result = self.fetcher.sync_fetch(request)
end_time = time.time()
self.assertGreater(end_time - start_time, 2)
self.assertLess(end_time - start_time, 5)
def test_90_phantomjs_js_script(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin + '/html'
request['fetch']['fetch_type'] = 'js'
request['fetch']['js_script'] = 'function() { document.write("binux") }'
result = self.fetcher.sync_fetch(request)
self.assertEqual(result['status_code'], 200)
self.assertIn('binux', result['content'])
def test_a100_phantomjs_sharp_url(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/pyspider/ajax.html'
request['fetch']['fetch_type'] = 'js'
request['fetch']['headers']['User-Agent'] = 'pyspider-test'
result = self.fetcher.sync_fetch(request)
self.assertEqual(result['status_code'], 200)
self.assertNotIn('loading', result['content'])
self.assertIn('done', result['content'])
self.assertIn('pyspider-test', result['content'])
def test_a110_dns_error(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'http://www.not-exists-site.com/'
result = self.fetcher.sync_fetch(request)
self.assertEqual(result['status_code'], 599)
self.assertIn('error', result)
self.assertIn('resolve', result['error'])
self.inqueue.put(request)
task, result = self.outqueue.get()
self.assertEqual(result['status_code'], 599)
self.assertIn('error', result)
self.assertIn('resolve', result['error'])
def test_a120_http_get_with_proxy_fail(self):
self.fetcher.proxy = self.proxy
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/get'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 403, result)
self.fetcher.proxy = None
def test_a130_http_get_with_proxy_ok(self):
self.fetcher.proxy = self.proxy
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/get?username=binux&password=123456'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
self.assertIsNotNone(response.json, response.content)
self.assertEqual(response.json['headers'].get('A'), 'b', response.json)
self.assertIn('c=d', response.json['headers'].get('Cookie'), response.json)
self.assertIn('a=b', response.json['headers'].get('Cookie'), response.json)
self.fetcher.proxy = None
def test_a140_redirect(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/redirect-to?url=/get'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.url, self.httpbin+'/get')
def test_a150_too_much_redirect(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/redirect/10'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 599, result)
self.assertIn('redirects followed', response.error)
def test_a160_cookie(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/cookies/set?k1=v1&k2=v2'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.cookies, {'a': 'b', 'k1': 'v1', 'k2': 'v2', 'c': 'd'}, result)
| 39.36
| 96
| 0.629925
|
42554d8c327a386075e2a0bf4c15bf1f4bca88d2
| 3,615
|
py
|
Python
|
gcp_variant_transforms/libs/vcf_header_definitions_merger.py
|
tsa87/gcp-variant-transforms
|
1742dcafdc2bc6b7eb3959c4963dc5ed1ac2be55
|
[
"Apache-2.0"
] | 113
|
2017-11-09T20:48:36.000Z
|
2022-03-24T19:52:31.000Z
|
gcp_variant_transforms/libs/vcf_header_definitions_merger.py
|
tsa87/gcp-variant-transforms
|
1742dcafdc2bc6b7eb3959c4963dc5ed1ac2be55
|
[
"Apache-2.0"
] | 535
|
2017-11-09T15:47:08.000Z
|
2022-03-31T17:39:10.000Z
|
gcp_variant_transforms/libs/vcf_header_definitions_merger.py
|
tsa87/gcp-variant-transforms
|
1742dcafdc2bc6b7eb3959c4963dc5ed1ac2be55
|
[
"Apache-2.0"
] | 62
|
2017-11-08T21:16:07.000Z
|
2022-01-14T19:12:42.000Z
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VCF Header Definitions Merger class."""
import collections
from collections import namedtuple
from typing import Dict, List # pylint: disable=unused-import
from gcp_variant_transforms.beam_io import vcf_header_io
# `Definition` cherry-picks the attributes from vcf header definitions that
# are critical for checking field compatibilities across VCF files.
Definition = namedtuple('Definition',
[vcf_header_io.VcfParserHeaderKeyConstants.NUM,
vcf_header_io.VcfParserHeaderKeyConstants.TYPE])
class VcfHeaderDefinitions():
"""Container for header definitions."""
def __init__(self, vcf_header=None):
# type: (vcf_header_io.VcfHeader) -> None
"""Initializes a `VcfHeaderDefinitions` object.
Creates two dictionaries (for infos and formats respectively) that map field
id to a dictionary which maps `Definition` to a list of file names.
"""
self._infos = collections.defaultdict(dict)
self._formats = collections.defaultdict(dict)
if not vcf_header:
return
for key, val in vcf_header.infos.items():
definition = Definition(
val[vcf_header_io.VcfParserHeaderKeyConstants.NUM],
val[vcf_header_io.VcfParserHeaderKeyConstants.TYPE])
self._infos[key][definition] = [vcf_header.file_path]
for key, val in vcf_header.formats.items():
definition = Definition(
val[vcf_header_io.VcfParserHeaderKeyConstants.NUM],
val[vcf_header_io.VcfParserHeaderKeyConstants.TYPE])
self._formats[key][definition] = [vcf_header.file_path]
def __eq__(self, other):
return self._infos == other._infos and self._formats == other._formats
@property
def infos(self):
return self._infos
@property
def formats(self):
return self._formats
class DefinitionsMerger():
"""Class for merging two `VcfHeaderDefinitions`s."""
# For the same field definition, save at most `_MAX_NUM_FILE_NAMES` names.
_MAX_NUM_FILE_NAMES = 5
def merge(self, first, second):
# type: (VcfHeaderDefinitions, VcfHeaderDefinitions) -> None
"""Updates `first`'s definitions with values from `second`."""
if (not isinstance(first, VcfHeaderDefinitions) or
not isinstance(second, VcfHeaderDefinitions)):
raise NotImplementedError
self._merge_definitions(first.infos, second.infos)
self._merge_definitions(first.formats, second.formats)
def _merge_definitions(
self,
first, # type: Dict[str, Dict[Definition, List[str]]]
second # type: Dict[str, Dict[Definition, List[str]]]
):
# type: (...) -> None
"""Updates `first` by merging values from `first` and `second`."""
for key, definitions_to_files_map in second.items():
for definition, file_names in definitions_to_files_map.items():
first[key].setdefault(definition, [])
first[key][definition].extend(str(s) for s in file_names)
first[key][definition] = (
first[key][definition][:self._MAX_NUM_FILE_NAMES])
| 38.052632
| 80
| 0.715906
|
2e27c87ccd467095efe26edf7488b28f8ddc3bed
| 26,898
|
py
|
Python
|
sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2022_02_01_preview/operations/_datastores_operations.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2022_02_01_preview/operations/_datastores_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2022_02_01_preview/operations/_datastores_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
skip = kwargs.pop('skip', None) # type: Optional[str]
count = kwargs.pop('count', 30) # type: Optional[int]
is_default = kwargs.pop('is_default', None) # type: Optional[bool]
names = kwargs.pop('names', None) # type: Optional[List[str]]
search_text = kwargs.pop('search_text', None) # type: Optional[str]
order_by = kwargs.pop('order_by', None) # type: Optional[str]
order_by_asc = kwargs.pop('order_by_asc', False) # type: Optional[bool]
api_version = "2022-02-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if skip is not None:
query_parameters['$skip'] = _SERIALIZER.query("skip", skip, 'str')
if count is not None:
query_parameters['count'] = _SERIALIZER.query("count", count, 'int')
if is_default is not None:
query_parameters['isDefault'] = _SERIALIZER.query("is_default", is_default, 'bool')
if names is not None:
query_parameters['names'] = _SERIALIZER.query("names", names, '[str]', div=',')
if search_text is not None:
query_parameters['searchText'] = _SERIALIZER.query("search_text", search_text, 'str')
if order_by is not None:
query_parameters['orderBy'] = _SERIALIZER.query("order_by", order_by, 'str')
if order_by_asc is not None:
query_parameters['orderByAsc'] = _SERIALIZER.query("order_by_asc", order_by_asc, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2022-02-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2022-02-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
skip_validation = kwargs.pop('skip_validation', False) # type: Optional[bool]
api_version = "2022-02-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if skip_validation is not None:
query_parameters['skipValidation'] = _SERIALIZER.query("skip_validation", skip_validation, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_secrets_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2022-02-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}/listSecrets')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class DatastoresOperations(object):
"""DatastoresOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name, # type: str
workspace_name, # type: str
skip=None, # type: Optional[str]
count=30, # type: Optional[int]
is_default=None, # type: Optional[bool]
names=None, # type: Optional[List[str]]
search_text=None, # type: Optional[str]
order_by=None, # type: Optional[str]
order_by_asc=False, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DatastoreResourceArmPaginatedResult"]
"""List datastores.
List datastores.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param skip: Continuation token for pagination.
:type skip: str
:param count: Maximum number of results to return.
:type count: int
:param is_default: Filter down to the workspace default datastore.
:type is_default: bool
:param names: Names of datastores to return.
:type names: list[str]
:param search_text: Text to search for in the datastore names.
:type search_text: str
:param order_by: Order by property (createdtime | modifiedtime | name).
:type order_by: str
:param order_by_asc: Order by property in ascending order.
:type order_by_asc: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DatastoreResourceArmPaginatedResult or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.DatastoreResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatastoreResourceArmPaginatedResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
skip=skip,
count=count,
is_default=is_default,
names=names,
search_text=search_text,
order_by=order_by,
order_by_asc=order_by_asc,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
skip=skip,
count=count,
is_default=is_default,
names=names,
search_text=search_text,
order_by=order_by,
order_by_asc=order_by_asc,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DatastoreResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete datastore.
Delete datastore.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Datastore name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DatastoreData"
"""Get datastore.
Get datastore.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Datastore name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatastoreData, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DatastoreData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatastoreData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DatastoreData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
body, # type: "_models.DatastoreData"
skip_validation=False, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> "_models.DatastoreData"
"""Create or update datastore.
Create or update datastore.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Datastore name.
:type name: str
:param body: Datastore entity to create or update.
:type body: ~azure.mgmt.machinelearningservices.models.DatastoreData
:param skip_validation: Flag to skip validation.
:type skip_validation: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatastoreData, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DatastoreData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatastoreData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'DatastoreData')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
content_type=content_type,
json=_json,
skip_validation=skip_validation,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DatastoreData', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DatastoreData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}'} # type: ignore
@distributed_trace
def list_secrets(
self,
resource_group_name, # type: str
workspace_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DatastoreSecrets"
"""Get datastore secrets.
Get datastore secrets.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Datastore name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatastoreSecrets, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DatastoreSecrets
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DatastoreSecrets"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_secrets_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
template_url=self.list_secrets.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DatastoreSecrets', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_secrets.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/datastores/{name}/listSecrets'} # type: ignore
| 42.425868
| 222
| 0.663618
|
a960b554d7903dcec667324b77548a4988a629b0
| 26,633
|
py
|
Python
|
demo/predictor.py
|
wesleylp/maskrcnn-benchmark
|
16c1787414a88cabdc1835fadd53e9d394cb43a5
|
[
"MIT"
] | null | null | null |
demo/predictor.py
|
wesleylp/maskrcnn-benchmark
|
16c1787414a88cabdc1835fadd53e9d394cb43a5
|
[
"MIT"
] | null | null | null |
demo/predictor.py
|
wesleylp/maskrcnn-benchmark
|
16c1787414a88cabdc1835fadd53e9d394cb43a5
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
from maskrcnn_benchmark import layers as L
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
from maskrcnn_benchmark.utils import cv2_util
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from torchvision import transforms as T
from torchvision.transforms import functional as F
class Resize(object):
def __init__(self, min_size, max_size):
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image):
size = self.get_size(image.size)
image = F.resize(image, size)
return image
class COCODemo(object):
# COCO categories for pretty print
CATEGORIES = [
"__background",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
def __init__(self,
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=224,
weight_loading=None):
self.cfg = cfg.clone()
self.model = build_detection_model(cfg)
self.model.eval()
self.device = torch.device(cfg.MODEL.DEVICE)
self.model.to(self.device)
self.min_image_size = min_image_size
save_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
if weight_loading:
print('Loading weight from {}.'.format(weight_loading))
_ = checkpointer._load_model(torch.load(weight_loading))
self.transforms = self.build_transform()
mask_threshold = -1 if show_mask_heatmaps else 0.5
self.masker = Masker(threshold=mask_threshold, padding=1)
# used to make colors for each class
self.palette = torch.tensor([2**25 - 1, 2**15 - 1, 2**21 - 1])
self.cpu_device = torch.device("cpu")
self.confidence_threshold = confidence_threshold
self.show_mask_heatmaps = show_mask_heatmaps
self.masks_per_dim = masks_per_dim
def build_transform(self):
"""
Creates a basic transformation that was used to train the models
"""
cfg = self.cfg
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
to_bgr_transform = T.Lambda(lambda x: x * 255)
else:
to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
transform = T.Compose([
T.ToPILImage(),
Resize(min_size, max_size),
T.ToTensor(),
to_bgr_transform,
normalize_transform,
])
return transform
def run_on_opencv_image(self, image):
"""
Arguments:
image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
predictions = self.compute_prediction(image)
top_predictions = self.select_top_predictions(predictions)
result = image.copy()
if self.show_mask_heatmaps:
return self.create_mask_montage(result, top_predictions)
result = self.overlay_boxes(result, top_predictions)
if self.cfg.MODEL.MASK_ON:
result = self.overlay_mask(result, top_predictions)
if self.cfg.MODEL.KEYPOINT_ON:
result = self.overlay_keypoints(result, top_predictions)
result = self.overlay_class_names(result, top_predictions)
return result
def compute_prediction(self, original_image):
"""
Arguments:
original_image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
# apply pre-processing to image
image = self.transforms(original_image)
# convert to an ImageList, padded so that it is divisible by
# cfg.DATALOADER.SIZE_DIVISIBILITY
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.device)
# compute predictions
with torch.no_grad():
predictions = self.model(image_list)
predictions = [o.to(self.cpu_device) for o in predictions]
# always single image is passed at a time
prediction = predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[:-1]
prediction = prediction.resize((width, height))
if prediction.has_field("mask"):
# if we have masks, paste the masks in the right position
# in the image, as defined by the bounding boxes
masks = prediction.get_field("mask")
# always single image is passed at a time
masks = self.masker([masks], [prediction])[0]
prediction.add_field("mask", masks)
return prediction
def select_top_predictions(self, predictions):
"""
Select only predictions which have a `score` > self.confidence_threshold,
and returns the predictions in descending order of score
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores`.
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
scores = predictions.get_field("scores")
keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)
predictions = predictions[keep]
scores = predictions.get_field("scores")
_, idx = scores.sort(0, descending=True)
return predictions[idx]
def compute_colors_for_labels(self, labels):
"""
Simple function that adds fixed colors depending on the class
"""
colors = labels[:, None] * self.palette
colors = (colors % 255).numpy().astype("uint8")
return colors
def overlay_boxes(self, image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
"""
labels = predictions.get_field("labels")
boxes = predictions.bbox
colors = self.compute_colors_for_labels(labels).tolist()
for box, color in zip(boxes, colors):
box = box.to(torch.int64)
top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
image = cv2.rectangle(image, tuple(top_left), tuple(bottom_right), tuple(color), 1)
return image
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions.get_field("mask").numpy()
labels = predictions.get_field("labels")
colors = self.compute_colors_for_labels(labels).tolist()
for mask, color in zip(masks, colors):
thresh = mask[0, :, :, None]
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
image = cv2.drawContours(image, contours, -1, color, 3)
composite = image
return composite
def create_mask_montage(self, image, predictions):
"""
Create a montage showing the probability heatmaps for each one one of the
detected objects
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask`.
"""
masks = predictions.get_field("mask")
masks_per_dim = self.masks_per_dim
masks = L.interpolate(masks.float(), scale_factor=1 / masks_per_dim).byte()
height, width = masks.shape[-2:]
max_masks = masks_per_dim**2
masks = masks[:max_masks]
# handle case where we have less detections than max_masks
if len(masks) < max_masks:
masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)
masks_padded[:len(masks)] = masks
masks = masks_padded
masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)
result = torch.zeros((masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8)
for y in range(masks_per_dim):
start_y = y * height
end_y = (y + 1) * height
for x in range(masks_per_dim):
start_x = x * width
end_x = (x + 1) * width
result[start_y:end_y, start_x:end_x] = masks[y, x]
return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)
def overlay_class_names(self, image, predictions):
"""
Adds detected class names and scores in the positions defined by the
top-left corner of the predicted bounding box
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores` and `labels`.
"""
scores = predictions.get_field("scores").tolist()
labels = predictions.get_field("labels").tolist()
labels = [self.CATEGORIES[i] for i in labels]
boxes = predictions.bbox
template = "{}: {:.2f}"
for box, score, label in zip(boxes, scores, labels):
x, y = box[:2]
s = template.format(label, score)
cv2.putText(image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1)
return image
class MosquitoDemo(object):
# Mosquitoes categories for pretty print
CATEGORIES = ['__background', 'tire']
def __init__(
self,
cfg,
confidence_threshold=0.7,
show_mask_heatmaps=False,
masks_per_dim=2,
min_image_size=224,
):
self.cfg = cfg.clone()
self.model = build_detection_model(cfg)
self.model.eval()
self.device = torch.device(cfg.MODEL.DEVICE)
self.model.to(self.device)
self.min_image_size = min_image_size
save_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
self.transforms = self.build_transform()
mask_threshold = -1 if show_mask_heatmaps else 0.5
self.masker = Masker(threshold=mask_threshold, padding=1)
# used to make colors for each class
self.palette = torch.tensor([2**25 - 1, 2**15 - 1, 2**21 - 1])
self.cpu_device = torch.device("cpu")
self.confidence_threshold = confidence_threshold
self.show_mask_heatmaps = show_mask_heatmaps
self.masks_per_dim = masks_per_dim
def build_transform(self):
"""
Creates a basic transformation that was used to train the models
"""
cfg = self.cfg
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
to_bgr_transform = T.Lambda(lambda x: x * 255)
else:
to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
transform = T.Compose([
T.ToPILImage(),
T.Resize(self.min_image_size),
T.ToTensor(),
to_bgr_transform,
normalize_transform,
])
return transform
def run_on_opencv_image(self, image):
"""
Arguments:
image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
predictions = self.compute_prediction(image)
top_predictions = self.select_top_predictions(predictions)
result = image.copy()
if self.show_mask_heatmaps:
return self.create_mask_montage(result, top_predictions)
result = self.overlay_boxes(result, top_predictions)
if self.cfg.MODEL.MASK_ON:
result = self.overlay_mask(result, top_predictions)
result = self.overlay_class_names(result, top_predictions)
return result
def compute_prediction(self, original_image):
"""
Arguments:
original_image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
# apply pre-processing to image
image = self.transforms(original_image)
# convert to an ImageList, padded so that it is divisible by
# cfg.DATALOADER.SIZE_DIVISIBILITY
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.device)
# compute predictions
with torch.no_grad():
predictions = self.model(image_list)
predictions = [o.to(self.cpu_device) for o in predictions]
# always single image is passed at a time
prediction = predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[:-1]
prediction = prediction.resize((width, height))
# used for rescale text
img_area = height * width
self.__scale_factor = ((img_area) / (1080 * 1920))**(1 / 2)
if prediction.has_field("mask"):
# if we have masks, paste the masks in the right position
# in the image, as defined by the bounding boxes
masks = prediction.get_field("mask")
# always single image is passed at a time
masks = self.masker([masks], [prediction])[0]
prediction.add_field("mask", masks)
return prediction
def select_top_predictions(self, predictions):
"""
Select only predictions which have a `score` > self.confidence_threshold,
and returns the predictions in descending order of score
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores`.
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
scores = predictions.get_field("scores")
keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)
predictions = predictions[keep]
scores = predictions.get_field("scores")
_, idx = scores.sort(0, descending=True)
return predictions[idx]
def compute_colors_for_labels(self, labels):
"""
Simple function that adds fixed colors depending on the class
"""
colors = labels[:, None] * self.palette
colors = (colors % 255).numpy().astype("uint8")
return colors
def overlay_boxes(self, image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
"""
labels = predictions.get_field("labels")
boxes = predictions.bbox
colors = self.compute_colors_for_labels(labels).tolist()
for box, color in zip(boxes, colors):
box = box.to(torch.int64)
top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
image = cv2.rectangle(
image,
tuple(top_left),
tuple(bottom_right), # tuple(color),
(0, 0, 255),
int(3.5 * self.__scale_factor))
return image
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions.get_field("mask").numpy()
labels = predictions.get_field("labels")
colors = self.compute_colors_for_labels(labels).tolist()
for mask, color in zip(masks, colors):
thresh = mask[0, :, :, None].astype(np.uint8)
contours, hierarchy = cv2_util.findContours(thresh, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
image = cv2.drawContours(image, contours, -1, color, 3)
composite = image
return composite
def overlay_keypoints(self, image, predictions):
keypoints = predictions.get_field("keypoints")
kps = keypoints.keypoints
scores = keypoints.get_field("logits")
kps = torch.cat((kps[:, :, 0:2], scores[:, :, None]), dim=2).numpy()
for region in kps:
image = vis_keypoints(image, region.transpose((1, 0)))
return image
def create_mask_montage(self, image, predictions):
"""
Create a montage showing the probability heatmaps for each one one of the
detected objects
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask`.
"""
masks = predictions.get_field("mask")
masks_per_dim = self.masks_per_dim
masks = L.interpolate(masks.float(), scale_factor=1 / masks_per_dim).byte()
height, width = masks.shape[-2:]
max_masks = masks_per_dim**2
masks = masks[:max_masks]
# handle case where we have less detections than max_masks
if len(masks) < max_masks:
masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)
masks_padded[:len(masks)] = masks
masks = masks_padded
masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)
result = torch.zeros((masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8)
for y in range(masks_per_dim):
start_y = y * height
end_y = (y + 1) * height
for x in range(masks_per_dim):
start_x = x * width
end_x = (x + 1) * width
result[start_y:end_y, start_x:end_x] = masks[y, x]
return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)
def overlay_class_names(self, image, predictions):
"""
Adds detected class names and scores in the positions defined by the
top-left corner of the predicted bounding box
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores` and `labels`.
"""
scores = predictions.get_field("scores").tolist()
labels = predictions.get_field("labels").tolist()
labels = [self.CATEGORIES[i] for i in labels]
boxes = predictions.bbox
template = "{}: {:.2f}"
for box, score, label in zip(boxes, scores, labels):
x, y = box[:2]
s = template.format(label, score)
cv2.putText(image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, self.__scale_factor,
(255, 255, 255), int(3 * self.__scale_factor))
return image
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
"""Visualizes keypoints (adapted from vis_one_image).
kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
"""
dataset_keypoints = PersonKeypoints.NAMES
kp_lines = PersonKeypoints.CONNECTIONS
# Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]
# Perform the drawing on a copy of the image, to allow for blending.
kp_mask = np.copy(img)
# Draw mid shoulder / mid hip first for better visualization.
mid_shoulder = (kps[:2, dataset_keypoints.index('right_shoulder')] +
kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
sc_mid_shoulder = np.minimum(kps[2, dataset_keypoints.index('right_shoulder')],
kps[2, dataset_keypoints.index('left_shoulder')])
mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
sc_mid_hip = np.minimum(kps[2, dataset_keypoints.index('right_hip')],
kps[2, dataset_keypoints.index('left_hip')])
nose_idx = dataset_keypoints.index('nose')
if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
cv2.line(
kp_mask,
tuple(mid_shoulder),
tuple(kps[:2, nose_idx]),
color=colors[len(kp_lines)],
thickness=2,
lineType=cv2.LINE_AA)
if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
cv2.line(
kp_mask,
tuple(mid_shoulder),
tuple(mid_hip),
color=colors[len(kp_lines) + 1],
thickness=2,
lineType=cv2.LINE_AA)
# Draw the keypoints.
for l in range(len(kp_lines)):
i1 = kp_lines[l][0]
i2 = kp_lines[l][1]
p1 = kps[0, i1], kps[1, i1]
p2 = kps[0, i2], kps[1, i2]
if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
cv2.line(kp_mask, p1, p2, color=colors[l], thickness=2, lineType=cv2.LINE_AA)
if kps[2, i1] > kp_thresh:
cv2.circle(kp_mask, p1, radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
if kps[2, i2] > kp_thresh:
cv2.circle(kp_mask, p2, radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
# Blend the keypoints.
return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)
| 36.938974
| 98
| 0.597379
|
c9091edfab6f16e50aa346d793f32ddbee2c6350
| 921
|
py
|
Python
|
tests/kyu_8_tests/test_removing_elements.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
tests/kyu_8_tests/test_removing_elements.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
tests/kyu_8_tests/test_removing_elements.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
import unittest
from katas.kyu_8.removing_elements import remove_every_other
class RemoveEveryOtherTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(remove_every_other([1]), [1])
def test_equal_2(self):
self.assertEqual(remove_every_other([[1, 2, 3, 4, 5]]),
[[1, 2, 3, 4, 5]])
def test_equal_3(self):
self.assertEqual(remove_every_other(['Hello', 'Goodbye']), ['Hello'])
def test_equal_4(self):
self.assertEqual(remove_every_other([1.013, 2398.00]), [1.013])
def test_equal_5(self):
self.assertEqual(
remove_every_other(['Yes', 'No', 'Yes', 'No', 'Yes', 'No']),
['Yes', 'Yes', 'Yes']
)
def test_equal_6(self):
self.assertEqual(remove_every_other(
[[1, 2], [2, 3], [3, 4], [4, 5], [5, 6, 7], [8, 9, 10, 11, 12]]
), [[1, 2], [3, 4], [5, 6, 7]])
| 30.7
| 77
| 0.563518
|
adab5eb0dfcb31313de659f61b965486447dd16e
| 21,054
|
py
|
Python
|
aiida/orm/implementation/django/nodes.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | 1
|
2019-07-31T04:08:13.000Z
|
2019-07-31T04:08:13.000Z
|
aiida/orm/implementation/django/nodes.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/orm/implementation/django/nodes.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Django implementation of the `BackendNode` and `BackendNodeCollection` classes."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# pylint: disable=import-error,no-name-in-module
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction, IntegrityError
from aiida.backends.djsite.db import models
from aiida.common import exceptions
from aiida.common.lang import type_check
from aiida.orm.utils.node import clean_value
from .. import BackendNode, BackendNodeCollection
from . import entities
from . import utils as dj_utils
from .computers import DjangoComputer
from .users import DjangoUser
class DjangoNode(entities.DjangoModelEntity[models.DbNode], BackendNode):
"""Django Node backend entity"""
# pylint: disable=too-many-public-methods
MODEL_CLASS = models.DbNode
LINK_CLASS = models.DbLink
def __init__(self,
backend,
node_type,
user,
computer=None,
process_type=None,
label='',
description='',
ctime=None,
mtime=None):
"""Construct a new `BackendNode` instance wrapping a new `DbNode` instance.
:param backend: the backend
:param node_type: the node type string
:param user: associated `BackendUser`
:param computer: associated `BackendComputer`
:param label: string label
:param description: string description
:param ctime: The creation time as datetime object
:param mtime: The modification time as datetime object
"""
# pylint: disable=too-many-arguments
super(DjangoNode, self).__init__(backend)
arguments = {
'user': user.dbmodel,
'node_type': node_type,
'process_type': process_type,
'label': label,
'description': description,
}
type_check(user, DjangoUser)
if computer:
type_check(computer, DjangoComputer, 'computer is of type {}'.format(type(computer)))
arguments['dbcomputer'] = computer.dbmodel
if ctime:
type_check(ctime, datetime, 'the given ctime is of type {}'.format(type(ctime)))
arguments['ctime'] = ctime
if mtime:
type_check(mtime, datetime, 'the given mtime is of type {}'.format(type(mtime)))
arguments['mtime'] = mtime
self._dbmodel = dj_utils.ModelWrapper(models.DbNode(**arguments))
def clone(self):
"""Return an unstored clone of ourselves.
:return: an unstored `BackendNode` with the exact same attributes and extras as self
"""
arguments = {
'node_type': self._dbmodel.node_type,
'process_type': self._dbmodel.process_type,
'user': self._dbmodel.user,
'dbcomputer': self._dbmodel.dbcomputer,
'label': self._dbmodel.label,
'description': self._dbmodel.description,
}
clone = self.__class__.__new__(self.__class__) # pylint: disable=no-value-for-parameter
clone.__init__(self.backend, self.node_type, self.user)
clone._dbmodel = dj_utils.ModelWrapper(models.DbNode(**arguments)) # pylint: disable=protected-access
return clone
@property
def computer(self):
"""Return the computer of this node.
:return: the computer or None
:rtype: `BackendComputer` or None
"""
try:
return self.backend.computers.from_dbmodel(self._dbmodel.dbcomputer)
except TypeError:
return None
@computer.setter
def computer(self, computer):
"""Set the computer of this node.
:param computer: a `BackendComputer`
"""
type_check(computer, DjangoComputer, allow_none=True)
if computer is not None:
computer = computer.dbmodel
self._dbmodel.dbcomputer = computer
@property
def user(self):
"""Return the user of this node.
:return: the user
:rtype: `BackendUser`
"""
return self.backend.users.from_dbmodel(self._dbmodel.user)
@user.setter
def user(self, user):
"""Set the user of this node.
:param user: a `BackendUser`
"""
type_check(user, DjangoUser)
self._dbmodel.user = user.dbmodel
@property
def attributes(self):
"""Return the complete attributes dictionary.
.. warning:: While the node is unstored, this will return references of the attributes on the database model,
meaning that changes on the returned values (if they are mutable themselves, e.g. a list or dictionary) will
automatically be reflected on the database model as well. As soon as the node is stored, the returned
attributes will be a deep copy and mutations of the database attributes will have to go through the
appropriate set methods. Therefore, once stored, retrieving a deep copy can be a heavy operation. If you
only need the keys or some values, use the iterators `attributes_keys` and `attributes_items`, or the
getters `get_attribute` and `get_attribute_many` instead.
:return: the attributes as a dictionary
"""
return self.dbmodel.attributes
def get_attribute(self, key):
"""Return the value of an attribute.
.. warning:: While the node is unstored, this will return a reference of the attribute on the database model,
meaning that changes on the returned value (if they are mutable themselves, e.g. a list or dictionary) will
automatically be reflected on the database model as well. As soon as the node is stored, the returned
attribute will be a deep copy and mutations of the database attributes will have to go through the
appropriate set methods.
:param key: name of the attribute
:return: the value of the attribute
:raises AttributeError: if the attribute does not exist and no default is specified
"""
try:
return self._dbmodel.attributes[key]
except KeyError as exception:
raise AttributeError('attribute `{}` does not exist'.format(exception))
def get_attribute_many(self, keys):
"""Return the values of multiple attributes.
.. warning:: While the node is unstored, this will return references of the attributes on the database model,
meaning that changes on the returned values (if they are mutable themselves, e.g. a list or dictionary) will
automatically be reflected on the database model as well. As soon as the node is stored, the returned
attributes will be a deep copy and mutations of the database attributes will have to go through the
appropriate set methods. Therefore, once stored, retrieving a deep copy can be a heavy operation. If you
only need the keys or some values, use the iterators `attributes_keys` and `attributes_items`, or the
getters `get_attribute` and `get_attribute_many` instead.
:param keys: a list of attribute names
:return: a list of attribute values
:raises AttributeError: if at least one attribute does not exist
"""
try:
return [self.get_attribute(key) for key in keys]
except KeyError as exception:
raise AttributeError('attribute `{}` does not exist'.format(exception))
def set_attribute(self, key, value):
"""Set an attribute to the given value.
:param key: name of the attribute
:param value: value of the attribute
"""
if self.is_stored:
value = clean_value(value)
self._dbmodel.attributes[key] = value
self._flush_if_stored()
def set_attribute_many(self, attributes):
"""Set multiple attributes.
.. note:: This will override any existing attributes that are present in the new dictionary.
:param attributes: a dictionary with the attributes to set
"""
if self.is_stored:
attributes = {key: clean_value(value) for key, value in attributes.items()}
for key, value in attributes.items():
# We need to use `self.dbmodel` without the underscore, because otherwise the second iteration will refetch
# what is in the database and we lose the initial changes.
self.dbmodel.attributes[key] = value
self._flush_if_stored()
def reset_attributes(self, attributes):
"""Reset the attributes.
.. note:: This will completely clear any existing attributes and replace them with the new dictionary.
:param attributes: a dictionary with the attributes to set
"""
if self.is_stored:
attributes = clean_value(attributes)
self.dbmodel.attributes = attributes
self._flush_if_stored()
def delete_attribute(self, key):
"""Delete an attribute.
:param key: name of the attribute
:raises AttributeError: if the attribute does not exist
"""
try:
self._dbmodel.attributes.pop(key)
except KeyError as exception:
raise AttributeError('attribute `{}` does not exist'.format(exception))
else:
self._flush_if_stored()
def delete_attribute_many(self, keys):
"""Delete multiple attributes.
:param keys: names of the attributes to delete
:raises AttributeError: if at least one of the attribute does not exist
"""
non_existing_keys = [key for key in keys if key not in self._dbmodel.attributes]
if non_existing_keys:
raise AttributeError('attributes `{}` do not exist'.format(', '.join(non_existing_keys)))
for key in keys:
self.dbmodel.attributes.pop(key)
self._flush_if_stored()
def clear_attributes(self):
"""Delete all attributes."""
self._dbmodel.attributes = {}
self._flush_if_stored()
def attributes_items(self):
"""Return an iterator over the attributes.
:return: an iterator with attribute key value pairs
"""
for key, value in self._dbmodel.attributes.items():
yield key, value
def attributes_keys(self):
"""Return an iterator over the attribute keys.
:return: an iterator with attribute keys
"""
for key in self._dbmodel.attributes:
yield key
@property
def extras(self):
"""Return the complete extras dictionary.
.. warning:: While the node is unstored, this will return references of the extras on the database model,
meaning that changes on the returned values (if they are mutable themselves, e.g. a list or dictionary) will
automatically be reflected on the database model as well. As soon as the node is stored, the returned extras
will be a deep copy and mutations of the database extras will have to go through the appropriate set
methods. Therefore, once stored, retrieving a deep copy can be a heavy operation. If you only need the keys
or some values, use the iterators `extras_keys` and `extras_items`, or the getters `get_extra` and
`get_extra_many` instead.
:return: the extras as a dictionary
"""
return self.dbmodel.extras
def get_extra(self, key):
"""Return the value of an extra.
.. warning:: While the node is unstored, this will return a reference of the extra on the database model,
meaning that changes on the returned value (if they are mutable themselves, e.g. a list or dictionary) will
automatically be reflected on the database model as well. As soon as the node is stored, the returned extra
will be a deep copy and mutations of the database extras will have to go through the appropriate set
methods.
:param key: name of the extra
:return: the value of the extra
:raises AttributeError: if the extra does not exist and no default is specified
"""
try:
return self._dbmodel.extras[key]
except KeyError as exception:
raise AttributeError('extra `{}` does not exist'.format(exception))
def get_extra_many(self, keys):
"""Return the values of multiple extras.
.. warning:: While the node is unstored, this will return references of the extras on the database model,
meaning that changes on the returned values (if they are mutable themselves, e.g. a list or dictionary) will
automatically be reflected on the database model as well. As soon as the node is stored, the returned extras
will be a deep copy and mutations of the database extras will have to go through the appropriate set
methods. Therefore, once stored, retrieving a deep copy can be a heavy operation. If you only need the keys
or some values, use the iterators `extras_keys` and `extras_items`, or the getters `get_extra` and
`get_extra_many` instead.
:param keys: a list of extra names
:return: a list of extra values
:raises AttributeError: if at least one extra does not exist
"""
try:
return [self.get_extra(key) for key in keys]
except KeyError as exception:
raise AttributeError('extra `{}` does not exist'.format(exception))
def set_extra(self, key, value):
"""Set an extra to the given value.
:param key: name of the extra
:param value: value of the extra
"""
if self.is_stored:
value = clean_value(value)
self._dbmodel.extras[key] = value
self._flush_if_stored()
def set_extra_many(self, extras):
"""Set multiple extras.
.. note:: This will override any existing extras that are present in the new dictionary.
:param extras: a dictionary with the extras to set
"""
if self.is_stored:
extras = {key: clean_value(value) for key, value in extras.items()}
for key, value in extras.items():
self.dbmodel.extras[key] = value
self._flush_if_stored()
def reset_extras(self, extras):
"""Reset the extras.
.. note:: This will completely clear any existing extras and replace them with the new dictionary.
:param extras: a dictionary with the extras to set
"""
if self.is_stored:
extras = clean_value(extras)
self.dbmodel.extras = extras
self._flush_if_stored()
def delete_extra(self, key):
"""Delete an extra.
:param key: name of the extra
:raises AttributeError: if the extra does not exist
"""
try:
self._dbmodel.extras.pop(key)
except KeyError as exception:
raise AttributeError('extra `{}` does not exist'.format(exception))
else:
self._flush_if_stored()
def delete_extra_many(self, keys):
"""Delete multiple extras.
:param keys: names of the extras to delete
:raises AttributeError: if at least one of the extra does not exist
"""
non_existing_keys = [key for key in keys if key not in self._dbmodel.extras]
if non_existing_keys:
raise AttributeError('extras `{}` do not exist'.format(', '.join(non_existing_keys)))
for key in keys:
self.dbmodel.extras.pop(key)
self._flush_if_stored()
def clear_extras(self):
"""Delete all extras."""
self._dbmodel.extras = {}
self._flush_if_stored()
def extras_items(self):
"""Return an iterator over the extras.
:return: an iterator with extra key value pairs
"""
for key, value in self._dbmodel.extras.items():
yield key, value
def extras_keys(self):
"""Return an iterator over the extra keys.
:return: an iterator with extra keys
"""
for key in self._dbmodel.extras:
yield key
def _flush_if_stored(self):
if self._dbmodel.is_saved():
self._dbmodel.save()
def add_incoming(self, source, link_type, link_label):
"""Add a link of the given type from a given node to ourself.
:param source: the node from which the link is coming
:param link_type: the link type
:param link_label: the link label
:return: True if the proposed link is allowed, False otherwise
:raise aiida.common.ModificationNotAllowed: if either source or target node is not stored
"""
type_check(source, DjangoNode)
if not self.is_stored:
raise exceptions.ModificationNotAllowed('node has to be stored when adding an incoming link')
if not source.is_stored:
raise exceptions.ModificationNotAllowed('source node has to be stored when adding a link from it')
self._add_link(source, link_type, link_label)
def _add_link(self, source, link_type, link_label):
"""Add a link of the given type from a given node to ourself.
:param source: the node from which the link is coming
:param link_type: the link type
:param link_label: the link label
"""
savepoint_id = None
try:
# Transactions are needed here for Postgresql:
# https://docs.djangoproject.com/en/1.5/topics/db/transactions/#handling-exceptions-within-postgresql-transactions
savepoint_id = transaction.savepoint()
self.LINK_CLASS(input_id=source.id, output_id=self.id, label=link_label, type=link_type.value).save()
transaction.savepoint_commit(savepoint_id)
except IntegrityError as exception:
transaction.savepoint_rollback(savepoint_id)
raise exceptions.UniquenessError('failed to create the link: {}'.format(exception))
def clean_values(self):
self._dbmodel.attributes = clean_value(self._dbmodel.attributes)
self._dbmodel.extras = clean_value(self._dbmodel.extras)
def store(self, links=None, with_transaction=True, clean=True):
"""Store the node in the database.
:param links: optional links to add before storing
:param with_transaction: if False, do not use a transaction because the caller will already have opened one.
:param clean: boolean, if True, will clean the attributes and extras before attempting to store
"""
from aiida.common.lang import EmptyContextManager
from aiida.backends.djsite.db.models import suppress_auto_now
if clean:
self.clean_values()
with transaction.atomic() if with_transaction else EmptyContextManager():
with suppress_auto_now([(models.DbNode, ['mtime'])]) if self.mtime else EmptyContextManager():
# We need to save the node model instance itself first such that it has a pk
# that can be used in the foreign keys that will be needed for setting the
# attributes and links
self.dbmodel.save()
if links:
for link_triple in links:
self._add_link(*link_triple)
return self
class DjangoNodeCollection(BackendNodeCollection):
"""The collection of Node entries."""
ENTITY_CLASS = DjangoNode
def get(self, pk):
"""Return a Node entry from the collection with the given id
:param pk: id of the node
"""
try:
return self.ENTITY_CLASS.from_dbmodel(models.DbNode.objects.get(pk=pk), self.backend)
except ObjectDoesNotExist:
raise exceptions.NotExistent("Node with pk '{}' not found".format(pk))
def delete(self, pk):
"""Remove a Node entry from the collection with the given id
:param pk: id of the node to delete
"""
try:
models.DbNode.objects.filter(pk=pk).delete() # pylint: disable=no-member
except ObjectDoesNotExist:
raise exceptions.NotExistent("Node with pk '{}' not found".format(pk))
| 39.279851
| 126
| 0.636221
|
40da3bbe594a1c569fb8770c41650e84bf159f6b
| 46,601
|
py
|
Python
|
allennlp/tests/semparse/domain_languages/wikitables_language_test.py
|
schmmd/allennlp
|
fbc28cefe03b1ea3ff65300d475d34f5f9629a5c
|
[
"Apache-2.0"
] | 17
|
2019-11-19T19:02:35.000Z
|
2021-11-16T16:19:07.000Z
|
allennlp/tests/semparse/domain_languages/wikitables_language_test.py
|
TalSchuster/allennlp-MultiLang
|
dbb28b939652491d2f633326edccca2cd0e528c8
|
[
"Apache-2.0"
] | 1
|
2021-05-31T11:12:02.000Z
|
2021-06-01T05:34:27.000Z
|
allennlp/tests/semparse/domain_languages/wikitables_language_test.py
|
TalSchuster/allennlp-MultiLang
|
dbb28b939652491d2f633326edccca2cd0e528c8
|
[
"Apache-2.0"
] | 10
|
2019-12-06T11:32:37.000Z
|
2022-01-06T15:39:09.000Z
|
# pylint: disable=no-self-use,invalid-name,too-many-public-methods
from typing import List
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import Token
from allennlp.data.tokenizers import WordTokenizer
from allennlp.semparse.contexts import TableQuestionContext
from allennlp.semparse.domain_languages.domain_language import ExecutionError
from allennlp.semparse.domain_languages.common import Date
from allennlp.semparse.domain_languages.wikitables_language import WikiTablesLanguage
from allennlp.tests.semparse.domain_languages.domain_language_test import check_productions_match
class TestWikiTablesLanguage(AllenNlpTestCase):
# TODO(mattg, pradeep): Add tests for the ActionSpaceWalker as well.
def setUp(self):
super().setUp()
# Adding a bunch of random tokens in here so we get them as constants in the language.
question_tokens = [Token(x) for x in ['what', 'was', 'the', 'last', 'year', '2013', '?',
'quarterfinals', 'a_league', '2010', '8000',
'did_not_qualify', '2001', '2', '23', '2005', '1',
'2002', 'usl_a_league', 'usl_first_division']]
self.table_file = self.FIXTURES_ROOT / 'data' / 'wikitables' / 'sample_table.tagged'
self.table_context = TableQuestionContext.read_from_file(self.table_file, question_tokens)
self.language = WikiTablesLanguage(self.table_context)
def _get_world_with_question_tokens(self, tokens: List[Token]) -> WikiTablesLanguage:
table_context = TableQuestionContext.read_from_file(self.table_file, tokens)
world = WikiTablesLanguage(table_context)
return world
def _get_world_with_question_tokens_and_table_file(self,
tokens: List[Token],
table_file: str) -> WikiTablesLanguage:
table_context = TableQuestionContext.read_from_file(table_file, tokens)
world = WikiTablesLanguage(table_context)
return world
def test_execute_fails_with_unknown_function(self):
logical_form = "(unknown_function all_rows string_column:league)"
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_select(self):
logical_form = "(select_string all_rows string_column:league)"
cell_list = self.language.execute(logical_form)
assert set(cell_list) == {'usl_a_league', 'usl_first_division'}
def test_execute_works_with_select_number(self):
logical_form = "(select_number all_rows number_column:division)"
selected_number = self.language.execute(logical_form)
assert selected_number == 2.0
def test_execute_works_with_argmax(self):
logical_form = "(select_string (argmax all_rows number_column:avg_attendance) string_column:league)"
cell_list = self.language.execute(logical_form)
assert cell_list == ['usl_a_league']
def test_execute_works_with_argmax_on_dates(self):
logical_form = "(select_string (argmax all_rows date_column:year) string_column:league)"
cell_list = self.language.execute(logical_form)
assert cell_list == ['usl_first_division']
def test_execute_works_with_argmin(self):
logical_form = "(select_date (argmin all_rows number_column:avg_attendance) date_column:year)"
cell_list = self.language.execute(logical_form)
assert cell_list == Date(2005, 3, -1)
def test_execute_works_with_argmin_on_dates(self):
logical_form = "(select_string (argmin all_rows date_column:year) string_column:league)"
cell_list = self.language.execute(logical_form)
assert cell_list == ['usl_a_league']
def test_execute_works_with_filter_number_greater(self):
# Selecting cell values from all rows that have attendance greater than the min value of
# attendance.
logical_form = """(select_string (filter_number_greater all_rows number_column:avg_attendance
(min_number all_rows number_column:avg_attendance)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ['usl_a_league']
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_number_greater all_rows number_column:avg_attendance
all_rows) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_number_greater all_rows number_column:avg_attendance
string:usl_first_division) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_date_greater(self):
# Selecting cell values from all rows that have date greater than 2002.
logical_form = """(select_string (filter_date_greater all_rows date_column:year
(date 2002 -1 -1)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ['usl_first_division']
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_date_greater all_rows date_column:year
2005) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_number_greater_equals(self):
# Counting rows that have attendance greater than or equal to the min value of attendance.
logical_form = """(count (filter_number_greater_equals all_rows number_column:avg_attendance
(min_number all_rows number_column:avg_attendance)))"""
count_result = self.language.execute(logical_form)
assert count_result == 2
# Replacing the filter value with an invalid value.
logical_form = """(count (filter_number_greater all_rows number_column:avg_attendance all_rows))"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
# Replacing the filter value with an invalid value.
logical_form = """(count (filter_number_greater all_rows number_column:avg_attendance
string:usl_a_league))"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_date_greater_equals(self):
# Selecting cell values from all rows that have date greater than or equal to 2005 February
# 1st.
logical_form = """(select_string (filter_date_greater_equals all_rows date_column:year
(date 2005 2 1)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ['usl_first_division']
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_date_greater_equals all_rows date_column:year
2005) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_number_lesser(self):
# Selecting cell values from all rows that have date lesser than 2005.
logical_form = """(select_string (filter_number_lesser all_rows number_column:avg_attendance
(max_number all_rows number_column:avg_attendance)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ['usl_first_division']
def test_execute_works_with_filter_date_lesser(self):
# Selecting cell values from all rows that have date less that 2005 January
logical_form = """(select_string (filter_date_lesser all_rows date_column:year
(date 2005 1 -1)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ["usl_a_league"]
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_date_lesser all_rows date_column:year
2005) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_number_lesser_equals(self):
# Counting rows that have year lesser than or equal to 2005.
logical_form = """(count (filter_number_lesser_equals all_rows number_column:avg_attendance 8000))"""
count_result = self.language.execute(logical_form)
assert count_result == 2
def test_execute_works_with_filter_date_lesser_equals(self):
# Selecting cell values from all rows that have date less that or equal to 2001 February 23
logical_form = """(select_string (filter_date_lesser_equals all_rows date_column:year
(date 2001 2 23)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ['usl_a_league']
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_date_lesser_equals all_rows date_column:year
2005) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_number_equals(self):
# Counting rows that have year equal to 2010.
logical_form = """(count (filter_number_equals all_rows number_column:avg_attendance 8000))"""
count_result = self.language.execute(logical_form)
assert count_result == 0
def test_execute_works_with_filter_date_equals(self):
# Selecting cell values from all rows that have date not equal to 2001
logical_form = """(select_string (filter_date_equals all_rows date_column:year
(date 2001 -1 -1)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ['usl_a_league']
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_date_equals all_rows date_column:year
2005) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_number_not_equals(self):
# Counting rows that have year not equal to 2010.
logical_form = """(count (filter_number_not_equals all_rows number_column:avg_attendance 8000))"""
count_result = self.language.execute(logical_form)
assert count_result == 2
def test_execute_works_with_filter_date_not_equals(self):
# Selecting cell values from all rows that have date not equal to 2001
logical_form = """(select_string (filter_date_not_equals all_rows date_column:year
(date 2001 -1 -1)) string_column:league)"""
cell_value_list = self.language.execute(logical_form)
assert cell_value_list == ['usl_first_division']
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_date_not_equals all_rows date_column:year
2005) string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_filter_in(self):
# Selecting "regular season" from rows that have "did not qualify" in "open cup" column.
logical_form = """(select_string (filter_in all_rows string_column:open_cup string:did_not_qualify)
string_column:regular_season)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["4th_western"]
def test_execute_works_with_select_nested_in_filter_in(self):
logical_form = """(filter_in all_rows string_column:regular_season (select_string (first all_rows)
string_column:regular_season))"""
row_list = self.language.execute(logical_form)
assert row_list == self.language.execute("(first all_rows)")
def test_execute_works_with_filter_not_in(self):
# Selecting "regular season" from rows that do not have "did not qualify" in "open cup" column.
logical_form = """(select_string (filter_not_in all_rows string_column:open_cup string:did_not_qualify)
string_column:regular_season)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["5th"]
# Replacing the filter value with an invalid value.
logical_form = """(select_string (filter_not_in all_rows string_column:open_cup 2000)
string_column:regular_season)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_execute_works_with_first(self):
# Selecting "regular season" from the first row.
logical_form = """(select_string (first all_rows) string_column:regular_season)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["4th_western"]
def test_execute_logs_warning_with_first_on_empty_list(self):
# Selecting "regular season" from the first row where year is greater than 2010.
with self.assertLogs("allennlp.semparse.domain_languages.wikitables_language") as log:
logical_form = """(select_string (first (filter_date_greater all_rows date_column:year
(date 2010 -1 -1)))
string_column:regular_season)"""
self.language.execute(logical_form)
self.assertEqual(log.output,
["WARNING:allennlp.semparse.domain_languages.wikitables_language:"
"Trying to get first row from an empty list"])
def test_execute_works_with_last(self):
# Selecting "regular season" from the last row where year is not equal to 2010.
logical_form = """(select_string (last (filter_date_not_equals all_rows date_column:year
(date 2010 -1 -1)))
string_column:regular_season)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["5th"]
def test_execute_logs_warning_with_last_on_empty_list(self):
# Selecting "regular season" from the last row where year is greater than 2010.
with self.assertLogs("allennlp.semparse.domain_languages.wikitables_language") as log:
logical_form = """(select_string (last (filter_date_greater all_rows date_column:year
(date 2010 -1 -1)))
string_column:regular_season)"""
self.language.execute(logical_form)
self.assertEqual(log.output,
["WARNING:allennlp.semparse.domain_languages.wikitables_language:"
"Trying to get last row from an empty list"])
def test_execute_works_with_previous(self):
# Selecting "regular season" from the row before last where year is not equal to 2010.
logical_form = """(select_string (previous (last (filter_date_not_equals
all_rows date_column:year (date 2010 -1 -1))))
string_column:regular_season)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["4th_western"]
def test_execute_works_with_next(self):
# Selecting "regular season" from the row after first where year is not equal to 2010.
logical_form = """(select_string (next (first (filter_date_not_equals
all_rows date_column:year (date 2010 -1 -1))))
string_column:regular_season)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["5th"]
def test_execute_works_with_max_date(self):
logical_form = """(max_date all_rows date_column:year)"""
cell_list = self.language.execute(logical_form)
assert str(cell_list) == "2005"
def test_execute_works_with_min_date(self):
logical_form = """(min_date all_rows date_column:year)"""
cell_list = self.language.execute(logical_form)
assert str(cell_list) == "2001"
def test_execute_works_with_mode_number(self):
# Most frequent division value.
logical_form = """(mode_number all_rows number_column:division)"""
cell_list = self.language.execute(logical_form)
assert cell_list == 2.0
def test_execute_works_with_mode_string(self):
logical_form = """(mode_string all_rows string_column:league)"""
cell_list = self.language.execute(logical_form)
# Returns the string values with frequency 1 (which is the max frequency)
assert cell_list == ["usl_a_league", "usl_first_division"]
def test_execute_works_with_mode_date(self):
logical_form = """(mode_date all_rows date_column:year)"""
cell_list = self.language.execute(logical_form)
assert str(cell_list) == "2001"
def test_execute_works_with_same_as(self):
# Select the "league" from all the rows that have the same value under "playoffs" as the
# row that has the string "a league" under "league".
logical_form = """(select_string (same_as (filter_in all_rows string_column:league string:a_league)
string_column:playoffs)
string_column:league)"""
cell_list = self.language.execute(logical_form)
assert cell_list == ["usl_a_league", "usl_first_division"]
def test_execute_works_with_sum(self):
# Get total "avg attendance".
logical_form = """(sum all_rows number_column:avg_attendance)"""
sum_value = self.language.execute(logical_form)
assert sum_value == 13197
# Total "avg attendance" where "playoffs" has "quarterfinals"
logical_form = """(sum (filter_in all_rows string_column:playoffs string:quarterfinals)
number_column:avg_attendance)"""
sum_value = self.language.execute(logical_form)
assert sum_value == 13197
def test_execute_works_with_average(self):
# Get average "avg attendance".
logical_form = """(average all_rows number_column:avg_attendance)"""
avg_value = self.language.execute(logical_form)
assert avg_value == 6598.5
# Average "avg attendance" where "playoffs" has "quarterfinals"
logical_form = """(average (filter_in all_rows string_column:playoffs string:quarterfinals)
number_column:avg_attendance)"""
avg_value = self.language.execute(logical_form)
assert avg_value == 6598.5
def test_execute_works_with_diff(self):
# Difference in "avg attendance" between rows with "usl_a_league" and "usl_first_division"
# in "league" columns.
logical_form = """(diff (filter_in all_rows string_column:league string:usl_a_league)
(filter_in all_rows string_column:league string:usl_first_division)
number_column:avg_attendance)"""
avg_value = self.language.execute(logical_form)
assert avg_value == 1141
def test_execute_fails_with_diff_on_non_numerical_columns(self):
logical_form = """(diff (filter_in all_rows string_column:league string:usl_a_league)
(filter_in all_rows string_column:league string:usl_first_division)
string_column:league)"""
with pytest.raises(ExecutionError):
self.language.execute(logical_form)
def test_number_comparison_works(self):
# TableQuestionContext normlaizes all strings according to some rules. We want to ensure
# that the original numerical values of number cells is being correctly processed here.
tokens = WordTokenizer().tokenize("when was the attendance the highest?")
tagged_file = self.FIXTURES_ROOT / "data" / "corenlp_processed_tables" / "TEST-2.table"
language = self._get_world_with_question_tokens_and_table_file(tokens, tagged_file)
result = language.execute("(select_date (argmax all_rows number_column:attendance) date_column:date)")
assert result == Date(-1, 11, 10)
def test_evaluate_logical_form(self):
logical_form = """(select_string (same_as (filter_in all_rows string_column:league string:a_league)
string_column:playoffs)
string_column:league)"""
assert self.language.evaluate_logical_form(logical_form, ["USL A-League",
"USL First Division"])
def test_evaluate_logical_form_with_invalid_logical_form(self):
logical_form = """(select_string (same_as (filter_in all_rows string_column:league INVALID_CONSTANT)
string_column:playoffs)
string_column:league)"""
assert not self.language.evaluate_logical_form(logical_form, ["USL A-League",
"USL First Division"])
def test_get_nonterminal_productions_all_column_types(self):
# This test is long, but worth it. These are all of the valid actions in the grammar, and
# we want to be sure they are what we expect.
productions = self.language.get_nonterminal_productions()
assert set(productions.keys()) == {
"@start@",
"<List[Row],StringColumn:List[str]>",
"<List[Row],DateColumn:Date>",
"<List[Row],NumberColumn,Number:List[Row]>",
"<List[Row],ComparableColumn:List[Row]>",
"<List[Row],Column:List[Row]>",
"<List[Row],List[Row],NumberColumn:Number>",
"<List[Row],StringColumn,List[str]:List[Row]>",
"<Number,Number,Number:Date>",
"<List[Row],DateColumn,Date:List[Row]>",
"<List[Row],NumberColumn:Number>",
"<List[Row]:List[Row]>",
'<List[Row],StringColumn:List[str]>',
"<List[Row]:Number>",
"List[str]",
"List[Row]",
"Date",
"Number",
"Column",
"StringColumn",
"ComparableColumn",
"NumberColumn",
"DateColumn",
"List[str]",
}
check_productions_match(productions['@start@'],
['Date', 'Number', 'List[str]'])
check_productions_match(productions['<List[Row],StringColumn:List[str]>'],
['select_string', 'mode_string'])
check_productions_match(productions['<List[Row],DateColumn:Date>'],
['select_date', 'max_date', 'min_date', 'mode_date'])
check_productions_match(productions['<List[Row],NumberColumn,Number:List[Row]>'],
['filter_number_equals', 'filter_number_greater',
'filter_number_greater_equals', 'filter_number_lesser',
'filter_number_lesser_equals', 'filter_number_not_equals'])
check_productions_match(productions['<List[Row],ComparableColumn:List[Row]>'],
['argmax', 'argmin'])
check_productions_match(productions['<List[Row],Column:List[Row]>'],
['same_as'])
check_productions_match(productions['<List[Row],List[Row],NumberColumn:Number>'],
['diff'])
check_productions_match(productions['<List[Row],StringColumn,List[str]:List[Row]>'],
['filter_in', 'filter_not_in'])
check_productions_match(productions['<Number,Number,Number:Date>'],
['date'])
check_productions_match(productions['<List[Row],DateColumn,Date:List[Row]>'],
['filter_date_equals', 'filter_date_greater',
'filter_date_greater_equals', 'filter_date_lesser',
'filter_date_lesser_equals', 'filter_date_not_equals'])
check_productions_match(productions['<List[Row],NumberColumn:Number>'],
['average', 'max_number', 'min_number', 'sum',
'select_number', 'mode_number'])
check_productions_match(productions['<List[Row]:List[Row]>'],
['first', 'last', 'next', 'previous'])
check_productions_match(productions['<List[Row]:Number>'],
['count'])
check_productions_match(productions['List[Row]'],
['all_rows',
'[<List[Row],DateColumn,Date:List[Row]>, List[Row], DateColumn, Date]',
'[<List[Row],Column:List[Row]>, List[Row], Column]',
'[<List[Row],ComparableColumn:List[Row]>, List[Row], ComparableColumn]',
'[<List[Row],NumberColumn,Number:List[Row]>, List[Row], NumberColumn, Number]',
'[<List[Row],StringColumn,List[str]:List[Row]>, List[Row], StringColumn, List[str]]', # pylint: disable=line-too-long
'[<List[Row]:List[Row]>, List[Row]]'])
check_productions_match(productions['Date'],
['[<Number,Number,Number:Date>, Number, Number, Number]',
'[<List[Row],DateColumn:Date>, List[Row], DateColumn]'])
# Some of the number productions are instance-specific, and some of them are from the
# grammar.
check_productions_match(productions['Number'],
['2001',
'2002',
'2005',
'2010',
'2013',
'-1',
'1',
'2',
'23',
'8000',
'[<List[Row],NumberColumn:Number>, List[Row], NumberColumn]',
'[<List[Row],List[Row],NumberColumn:Number>, List[Row], List[Row], NumberColumn]',
'[<List[Row]:Number>, List[Row]]'])
# These are the columns in table, and are instance specific.
check_productions_match(productions['Column'],
['string_column:league',
'string_column:playoffs',
'string_column:open_cup',
'string_column:regular_season',
'string_column:division',
'string_column:avg_attendance',
'string_column:year',
'date_column:year',
'number_column:open_cup',
'number_column:regular_season',
'number_column:avg_attendance',
'number_column:division',
'number_column:year'])
check_productions_match(productions['StringColumn'],
['string_column:league',
'string_column:playoffs',
'string_column:open_cup',
'string_column:year',
'string_column:division',
'string_column:avg_attendance',
'string_column:regular_season'])
check_productions_match(productions['ComparableColumn'],
['date_column:year',
'number_column:open_cup',
'number_column:regular_season',
'number_column:avg_attendance',
'number_column:division',
'number_column:year'])
check_productions_match(productions['DateColumn'],
['date_column:year'])
check_productions_match(productions['NumberColumn'],
['number_column:avg_attendance',
'number_column:open_cup',
'number_column:regular_season',
'number_column:division',
'number_column:year'])
# Strings come from the question - any span in the question that shows up as a cell in the
# table is a valid string production.
check_productions_match(productions['List[str]'],
['string:quarterfinals',
'string:did_not_qualify',
'string:a_league',
'string:usl_first_division',
'string:usl_a_league',
'string:1',
'string:2',
'string:2005',
'string:2001',
'[<List[Row],StringColumn:List[str]>, List[Row], StringColumn]'])
def test_world_processes_logical_forms_correctly(self):
logical_form = ("(select_date (filter_in all_rows string_column:league string:usl_a_league)"
" date_column:year)")
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
assert self.language.action_sequence_to_logical_form(action_sequence) == logical_form
def test_world_gets_correct_actions(self):
logical_form = """(select_date (filter_in all_rows string_column:league string:usl_a_league)
date_column:year)"""
expected_sequence = ['@start@ -> Date',
'Date -> [<List[Row],DateColumn:Date>, List[Row], DateColumn]',
'<List[Row],DateColumn:Date> -> select_date',
'List[Row] -> [<List[Row],StringColumn,List[str]:List[Row]>, '
'List[Row], StringColumn, List[str]]', # pylint: disable=bad-continuation
'<List[Row],StringColumn,List[str]:List[Row]> -> filter_in',
'List[Row] -> all_rows',
'StringColumn -> string_column:league',
'List[str] -> string:usl_a_league',
'DateColumn -> date_column:year']
assert self.language.logical_form_to_action_sequence(logical_form) == expected_sequence
def test_world_processes_logical_forms_with_number_correctly(self):
logical_form = ("(select_date (filter_number_greater all_rows number_column:avg_attendance 8000) "
"date_column:year)")
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
assert self.language.action_sequence_to_logical_form(action_sequence) == logical_form
def test_world_processes_logical_forms_with_date_correctly(self):
logical_form = ("(select_date (filter_date_greater all_rows date_column:year (date 2013 -1 -1)) "
"date_column:year)")
action_sequence = self.language.logical_form_to_action_sequence(logical_form)
assert self.language.action_sequence_to_logical_form(action_sequence) == logical_form
def test_get_agenda(self):
tokens = [Token(x) for x in ['what', 'was', 'the', 'difference', 'in', 'attendance',
'between', 'years', '2001', 'and', '2005', '?']]
world = self._get_world_with_question_tokens(tokens)
# "year" column does not match because "years" occurs in the question.
assert set(world.get_agenda()) == {'Number -> 2001',
'Number -> 2005',
'str -> string:2005',
'str -> string:2001',
'<List[Row],DateColumn,Date:List[Row]> -> filter_date_equals',
'<List[Row],List[Row],NumberColumn:Number> -> diff'}
# Conservative agenda does not have strings and numbers because they have multiple types.
assert set(world.get_agenda(conservative=True)) == {
'<List[Row],List[Row],NumberColumn:Number> -> diff',
'<List[Row],DateColumn,Date:List[Row]> -> filter_date_equals'}
tokens = [Token(x) for x in ['what', 'was', 'the', 'total', 'avg.', 'attendance', 'in',
'years', '2001', 'and', '2005', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'Number -> 2001',
'Number -> 2005',
'str -> string:2005',
'str -> string:2001',
'<List[Row],NumberColumn:Number> -> sum',
'<List[Row],DateColumn,Date:List[Row]> -> filter_date_equals',
'StringColumn -> string_column:avg_attendance',
'NumberColumn -> number_column:avg_attendance'}
# Conservative disallows "sum" for the question word "total" too.
assert set(world.get_agenda(conservative=True)) == {
'<List[Row],DateColumn,Date:List[Row]> -> filter_date_equals'}
tokens = [Token(x) for x in ['what', 'was', 'the', 'average', 'avg.', 'attendance', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<List[Row],NumberColumn:Number> -> average',
'StringColumn -> string_column:avg_attendance',
'NumberColumn -> number_column:avg_attendance'}
assert set(world.get_agenda(conservative=True)) == {'<List[Row],NumberColumn:Number> -> average'}
tokens = [Token(x) for x in ['what', 'was', 'the', 'largest', 'avg.', 'attendance', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<List[Row],ComparableColumn:List[Row]> -> argmax',
'StringColumn -> string_column:avg_attendance',
'NumberColumn -> number_column:avg_attendance'}
assert set(world.get_agenda(conservative=True)) == {'<List[Row],ComparableColumn:List[Row]> -> argmax'}
tokens = [Token(x) for x in ['when', 'was', 'the', 'least', 'avg.', 'attendance', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<List[Row],ComparableColumn:List[Row]> -> argmin',
'StringColumn -> string_column:avg_attendance',
'<List[Row],DateColumn:Date> -> select_date',
'NumberColumn -> number_column:avg_attendance'}
assert set(world.get_agenda(conservative=True)) == {'<List[Row],ComparableColumn:List[Row]> -> argmin',
'<List[Row],DateColumn:Date> -> select_date'}
tokens = [Token(x) for x in ['what', 'was', 'the', 'attendance', 'after', 'the',
'time', 'with', 'the', 'least', 'avg.', 'attendance', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<List[Row],ComparableColumn:List[Row]> -> argmin',
'StringColumn -> string_column:avg_attendance',
'<List[Row]:List[Row]> -> next',
'NumberColumn -> number_column:avg_attendance'}
# conservative disallows "after" mapping to "next"
assert set(world.get_agenda(conservative=True)) == {'<List[Row],ComparableColumn:List[Row]> -> argmin'}
tokens = [Token(x) for x in ['what', 'was', 'the', 'attendance', 'below', 'the',
'row', 'with', 'the', 'least', 'avg.', 'attendance', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<List[Row],ComparableColumn:List[Row]> -> argmin',
'StringColumn -> string_column:avg_attendance',
'<List[Row]:List[Row]> -> next',
'NumberColumn -> number_column:avg_attendance'}
assert set(world.get_agenda(conservative=True)) == {'<List[Row],ComparableColumn:List[Row]> -> argmin',
'<List[Row]:List[Row]> -> next'}
tokens = [Token(x) for x in ['what', 'was', 'the', 'attendance', 'before', 'the',
'time', 'with', 'the', 'least', 'avg.', 'attendance', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<List[Row],ComparableColumn:List[Row]> -> argmin',
'StringColumn -> string_column:avg_attendance',
'<List[Row]:List[Row]> -> previous',
'NumberColumn -> number_column:avg_attendance'}
# conservative disallows "before" mapping to "previous"
assert set(world.get_agenda(conservative=True)) == {'<List[Row],ComparableColumn:List[Row]> -> argmin'}
tokens = [Token(x) for x in ['what', 'was', 'the', 'attendance', 'above', 'the',
'row', 'with', 'the', 'least', 'avg.', 'attendance', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<List[Row],ComparableColumn:List[Row]> -> argmin',
'StringColumn -> string_column:avg_attendance',
'<List[Row]:List[Row]> -> previous',
'NumberColumn -> number_column:avg_attendance'}
assert set(world.get_agenda(conservative=True)) == {'<List[Row],ComparableColumn:List[Row]> -> argmin',
'<List[Row]:List[Row]> -> previous'}
tokens = [Token(x) for x in ['when', 'was', 'the', 'avg.', 'attendance', 'same', 'as', 'when',
'the', 'league', 'was', 'usl', 'a', 'league', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'StringColumn -> string_column:avg_attendance',
'NumberColumn -> number_column:avg_attendance',
'StringColumn -> string_column:league',
'str -> string:usl_a_league',
'<List[Row],Column:List[Row]> -> same_as',
'<List[Row],DateColumn:Date> -> select_date'}
assert set(world.get_agenda(conservative=True)) == {'StringColumn -> string_column:league',
'str -> string:usl_a_league',
'<List[Row],Column:List[Row]> -> same_as',
'<List[Row],DateColumn:Date> -> select_date'}
tokens = [Token(x) for x in ['what', 'is', 'the', 'least', 'avg.', 'attendance', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<List[Row],NumberColumn:Number> -> min_number',
'StringColumn -> string_column:avg_attendance',
'NumberColumn -> number_column:avg_attendance'}
assert set(world.get_agenda(conservative=True)) == {'<List[Row],NumberColumn:Number> -> min_number'}
tokens = [Token(x) for x in ['when', 'did', 'the', 'team', 'not', 'qualify', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<List[Row],DateColumn:Date> -> select_date', 'str -> string:qualify'}
assert set(world.get_agenda(conservative=True)) == {'<List[Row],DateColumn:Date> -> select_date',
'str -> string:qualify'}
tokens = [Token(x) for x in ['when', 'was', 'the', 'avg.', 'attendance', 'at', 'least',
'7000', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
'<List[Row],NumberColumn,Number:List[Row]> -> filter_number_greater_equals',
'<List[Row],DateColumn:Date> -> select_date',
'NumberColumn -> number_column:avg_attendance',
'StringColumn -> string_column:avg_attendance',
'Number -> 7000'}
assert set(world.get_agenda(conservative=True)) == {
'<List[Row],NumberColumn,Number:List[Row]> -> filter_number_greater_equals',
'<List[Row],DateColumn:Date> -> select_date',
'Number -> 7000'}
tokens = [Token(x) for x in ['when', 'was', 'the', 'avg.', 'attendance', 'more', 'than',
'7000', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<List[Row],NumberColumn,Number:List[Row]> -> filter_number_greater',
'<List[Row],DateColumn:Date> -> select_date',
'NumberColumn -> number_column:avg_attendance',
'StringColumn -> string_column:avg_attendance', 'Number -> 7000'}
assert set(world.get_agenda(conservative=True)) == {
'<List[Row],NumberColumn,Number:List[Row]> -> filter_number_greater',
'<List[Row],DateColumn:Date> -> select_date',
'Number -> 7000'}
tokens = [Token(x) for x in ['when', 'was', 'the', 'avg.', 'attendance', 'at', 'most',
'7000', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
'<List[Row],NumberColumn,Number:List[Row]> -> filter_number_lesser_equals',
'<List[Row],DateColumn:Date> -> select_date',
'NumberColumn -> number_column:avg_attendance',
'StringColumn -> string_column:avg_attendance',
'Number -> 7000'}
assert set(world.get_agenda(conservative=True)) == {
'<List[Row],NumberColumn,Number:List[Row]> -> filter_number_lesser_equals',
'<List[Row],DateColumn:Date> -> select_date',
'Number -> 7000'}
tokens = [Token(x) for x in ['when', 'was', 'the', 'avg.', 'attendance', 'no', 'more',
'than', '7000', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {
'<List[Row],NumberColumn,Number:List[Row]> -> filter_number_lesser_equals',
'<List[Row],DateColumn:Date> -> select_date',
'NumberColumn -> number_column:avg_attendance',
'StringColumn -> string_column:avg_attendance',
'Number -> 7000'}
assert set(world.get_agenda(conservative=True)) == {
'<List[Row],NumberColumn,Number:List[Row]> -> filter_number_lesser_equals',
'<List[Row],DateColumn:Date> -> select_date',
'Number -> 7000'}
tokens = [Token(x) for x in ['what', 'was', 'the', 'top', 'year', '?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<List[Row]:List[Row]> -> first', 'StringColumn -> string_column:year',
'NumberColumn -> number_column:year',
'DateColumn -> date_column:year'}
assert set(world.get_agenda(conservative=True)) == {'<List[Row]:List[Row]> -> first'}
tokens = [Token(x) for x in ['what', 'was', 'the', 'year', 'in', 'the', 'bottom', 'row',
'?']]
world = self._get_world_with_question_tokens(tokens)
assert set(world.get_agenda()) == {'<List[Row]:List[Row]> -> last', 'StringColumn -> string_column:year',
'NumberColumn -> number_column:year',
'DateColumn -> date_column:year'}
assert set(world.get_agenda(conservative=True)) == {'<List[Row]:List[Row]> -> last'}
| 60.836815
| 151
| 0.57634
|
5a8916e02c01e6939ab63b5423f2689225fe93a9
| 12,569
|
py
|
Python
|
src/adata/gui/text.py
|
txemavs/adata
|
89a6d27fa59dc26ae3036685bb9d8bfb8e983fd9
|
[
"MIT"
] | 1
|
2018-03-24T12:18:08.000Z
|
2018-03-24T12:18:08.000Z
|
src/adata/gui/text.py
|
txemavs/adata
|
89a6d27fa59dc26ae3036685bb9d8bfb8e983fd9
|
[
"MIT"
] | null | null | null |
src/adata/gui/text.py
|
txemavs/adata
|
89a6d27fa59dc26ae3036685bb9d8bfb8e983fd9
|
[
"MIT"
] | null | null | null |
# adata.gui.text
'''
Console emulation using scintilla.
- StyledText is a output text control
- Console includes a prompt
TODO: Hidden time and proccess name margins
'''
from .basic import *
from wx import stc
import json
import keyword
RGB = wx.Colour
SYS_COLOUR = wx.SystemSettings.GetColour
#self.GetEOLMode()
#See http://proton-ce.sourceforge.net/rc/scintilla/pyframe/www.pyframe.com/stc/markers.html
MARKERS = {
'prompt': (stc.STC_MARK_SHORTARROW, RGB(0, 0, 0), RGB(0, 255, 0) ),
'blue_arrow': (stc.STC_MARK_ARROW, RGB(0, 0, 0), RGB(0, 0, 255) ),
'blue_circle': (stc.STC_MARK_CIRCLE, RGB(0, 0, 0), RGB(0, 0, 255) ),
'red_arrow': (stc.STC_MARK_ARROW, RGB(0, 0, 0), RGB(255, 0, 0) ),
'red_rect': (stc.STC_MARK_SMALLRECT, RGB(0, 0, 0), RGB(255, 0, 0) ),
'red_back': (stc.STC_MARK_BACKGROUND, RGB(255, 255, 0), RGB(32, 0, 0) ),
'green_back': (stc.STC_MARK_BACKGROUND, RGB(255, 255, 0), RGB(0, 32, 0) ),
'blue_back': (stc.STC_MARK_BACKGROUND, RGB(255, 255, 0), RGB(0, 0, 32) ),
'red_circle': (stc.STC_MARK_CIRCLE, RGB(0, 0, 0), RGB(255, 64, 64) ),
'orange_arrow': (stc.STC_MARK_ARROW, RGB(0, 0, 0), RGB(255, 128, 0) ),
'green_arrow': (stc.STC_MARK_ARROW, RGB(0, 0, 0), RGB(32, 255, 32) ),
'dots': (stc.STC_MARK_DOTDOTDOT, )}
class StyledText(stc.StyledTextCtrl):
'''
Basic scintilla text control with styles, write and echo methods
See https://docs.wxpython.org/wx.stc.StyledTextCtrl.html
'''
_style_cache = [None]
def index_style(self, style):
''' Append to style cache if necesary and returns the index.
'''
if not style in self._style_cache:
self._style_cache.append(style)
self.StyleSetSpec(len(self._style_cache)-1, style)
return self._style_cache.index(style)
def echo(self, text=None, style=None, lf=True, marker=None, icon=None):
''' The print method
'''
if text is None: text=""
line = self.MarkerLineFromHandle(self.marker["prompt"])-1
pos = self.GetLineEndPosition(line)
self.InsertText(pos, '%s%s' % (text,'\n' if lf else '') )
if style is not None:
length = len(text)
self.StartStyling(pos=pos, mask=0xFF)
self.SetStyling(length=length, style=self.index_style(style))
mark = None
if icon is not None:
if icon in self.mark_number.keys():
markerNumber = self.mark_number[icon]
else:
markerNumber = 0
mark = self.MarkerAdd(line, markerNumber)
if marker is not None:
self.marker[marker] = self.MarkerAdd(line) if mark is None else mark
#TODO: Modify a previously marked line (EX: Task... [OK-1:21] )
self.EnsureCaretVisible()
def write(self, text):
''' Stardard write method
'''
if len(text)>80:
if text[0] in ("(","[","{"):
text = ',\n'.join(text.split(','))
#o = json.loads(text)
#text = json.dumps(o, indent=4, sort_keys=True)
wx.CallAfter(self.echo,
text = text,
lf = False,
)
def __init__(self, *args, **kwargs):
'''Initialize stc.StyledTextCtrl and set marker and style specs.
See stc.StyledTextCtrl documentation.
'''
stc.StyledTextCtrl.__init__(self, *args, **kwargs)
self.SetScrollWidthTracking(True)
self._styles = PlatformStyle()
self._styles.update({
'fore' : '#ffffff',
'back' : '#000002',
'calltip' : '#FFFF00',
'calltipback' : '#004000',
})
self.StyleSetSpec(
stc.STC_STYLE_DEFAULT,
"face:%(mono)s,size:%(size)d,fore:%(fore)s,back:%(back)s" % self._styles
)
self.StyleClearAll()
self.SetSelForeground(True, SYS_COLOUR(wx.SYS_COLOUR_HIGHLIGHTTEXT))
self.SetSelBackground(True, SYS_COLOUR(wx.SYS_COLOUR_HIGHLIGHT))
# Define markers
self.marker = {}
self.mark_number = {}
number = 0
for name, params in MARKERS.items():
number+=1
self.MarkerDefine(number, *params)
self.mark_number[name] = number
class TextEditor(StyledText):
def __init__(self, *args, **kwargs):
'''Initialize stc.StyledTextCtrl and set marker and style specs.
See stc.StyledTextCtrl documentation.
'''
StyledText.__init__(self, *args, **kwargs)
self.SetUseTabs(True)
self.SetTabWidth(4)
self.SetViewWhiteSpace(False)
self.SetLineNumbers(True)
self.StyleSetSpec(stc.STC_STYLE_LINENUMBER, "fore:#000000")
self.SetLexer(stc.STC_LEX_PYTHON)
self.SetKeyWords(0, ' '.join(keyword.kwlist))
try:
self.SetEndAtLastLine(False)
except AttributeError:
pass
#self.Bind(stc.EVT_STC_UPDATEUI, self.OnUpdateUI)
def SetLineNumbers(self, state):
self.lineNumbers = state
if state:
self.SetMarginType(1, stc.STC_MARGIN_NUMBER)
self.SetMarginWidth(1, 40)
else:
# Leave a small margin so the feature hidden lines marker can be seen
self.SetMarginType(1, 0)
self.SetMarginWidth(1, 10)
class Console(StyledText):
'''stc.StyledTextCtrl Styled text window with an input prompt.
:param args: See ``wx.stc.StyledTextCtrl``
:type args: parameters
'''
__history = []
__history_index = -1
__zoom = 0
__find = ""
__special_keys = [
wx.WXK_LEFT,
wx.WXK_UP,
wx.WXK_RIGHT,
wx.WXK_DOWN,
wx.WXK_RETURN,
wx.WXK_TAB,
wx.WXK_BACK,
wx.WXK_DELETE
]
def __init__(self, *args, **kwargs):
StyledText.__init__(self, *args, **kwargs)
self.SetMarginType(1, stc.STC_MARGIN_SYMBOL)
self.SetMarginWidth(1, 20)
self.SetCaretStyle(2)
self.SetCaretForeground(wx.Colour(0, 255, 0))
self.AddText("\n") # A blank line before prompt, echo always writes here
try:
self.prompt = sys.ps1 # Create a prompt
except AttributeError:
self.prompt = ">>> "
self.AddText(self.prompt)
length = len(self.prompt)
self.StartStyling(pos=0, mask=0xFF)
self.SetStyling(length=length, style=self.index_style("fore:#00ff00,bold"))
self.marker["prompt"] = self.MarkerAdd(1, self.mark_number["prompt"])
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
def GoPrompt(self, text=None):
'''Move cursor to propmt and optionally set the command text
:param text: Optional command
:type text: string
'''
line = self.MarkerLineFromHandle(self.marker["prompt"])
pos = self.GetLineIndentPosition(line)+len(self.prompt)
self.GotoPos( pos )
if text is not None:
self.DeleteRange(pos, self.GetLength()-pos)
if text!="":
self.AddText(text)
def Enter(self, cmd):
'''Override to send code somewhere
:param cmd: A command line.
:type cmd: string
'''
raise Exception("Not implemented - You must override me")
def OnKeyDown(self, event):
'''Key pressed event handler
If line is not prompt, does nothing. Else:
- Avoid prompt symbol modification
- Up / Down to navigate history
- Send code on Enter
:param event: Required event information.
:type event: ``wx.Event``
'''
line_current = self.GetCurrentLine()
line_prompt = self.MarkerLineFromHandle(self.marker["prompt"])
if line_current!=line_prompt:
return event.Skip()
pos_prompt = self.GetLineIndentPosition(line_prompt)
pos = self.GetCurrentPos()
if pos-pos_prompt<len(self.prompt): # U can't touch this
self.GoPrompt()
return
keycode = event.GetKeyCode()
if not keycode in self.__special_keys: # Not interested
return event.Skip()
if keycode in [wx.WXK_LEFT, wx.WXK_BACK]:
if (pos-pos_prompt)==len(self.prompt): # Don't go there
return
if keycode==wx.WXK_RETURN:
# Command
cmd = self.GetLine(line_prompt)[len(self.prompt):].strip()
if cmd=="": return
self.echo()
self.echo(self.prompt+cmd,"fore:#ffff00,bold")
self.GoPrompt('')
self.__history.insert(0, cmd)
self.__history_index = -1
self.Enter(cmd)
return
# Command history
if keycode in [wx.WXK_UP, wx.WXK_DOWN]:
if keycode==wx.WXK_UP and self.__history_index<len(self.__history)-1:
self.__history_index+=1
if keycode==wx.WXK_DOWN and self.__history_index>0:
self.__history_index-=1
if self.__history_index<0: return
self.GoPrompt( self.__history[self.__history_index] )
return
event.Skip()
def ToggleWrapMode(self, event=None):
'''Toggle text wrap mode
:param event: Optional
:type event: ``wx.Event``
:return: self.GetWrapMode
:rtype: boolean
'''
state = not self.GetWrapMode()
self.SetWrapMode( state )
return state
def FontSmaller(self, e=None):
'''Toggle text wrap mode
:param event: Optional
:type event: ``wx.Event``
:return: zoom level
:rtype: int
'''
self.__zoom -= 1
self.SetZoom( self.__zoom )
return self.__zoom
def FontBigger(self, e=None):
'''Toggle text wrap mode
:param event: Optional
:type event: ``wx.Event``
:return: zoom level
:rtype: int
'''
self.__zoom += 1
self.SetZoom( self.__zoom )
return self.__zoom
def GoPreviousMarker(self, e=None):
'''Go to previous marker
:param event: Optional
:type event: ``wx.Event``
:return: zoom level
:rtype: int
'''
self.GotoLine( self.MarkerPrevious( self.GetCurrentLine()-1, 0xFF ) )
def GoPromptHandler(self, e):
'''Go to prompt *****
:param event: Optional
:type event: ``wx.Event``
:return: line
:rtype: int
'''
line = self.MarkerLineFromHandle( self.marker["prompt"] )
self.GotoLine( line )
self.LineEnd()
return line
def SearchBox(self, e=None):
'''Find text
:param event: Optional
:type event: ``wx.Event``
'''
dialog = wx.TextEntryDialog( None, "Search", "Find", self.__find )
dialog.ShowModal()
value = dialog.GetValue()
if value=="": return
self.__find = value
self.CharRight()
self.SearchAnchor()
self.SearchNext(0xFF, value)
def SearchPreviousHandler(self, e=None):
'''Search previous occurence
:param event: Optional
:type event: ``wx.Event``
'''
if self.__find=="": return
self.CharLeft()
self.SearchAnchor()
self.SearchPrev(0xFF, self.__find)
self.EnsureCaretVisible()
def SearchNextHandler(self, e=None):
'''Search next occurence
:param event: Optional
:type event: ``wx.Event``
'''
if self.__find=="": return
self.CharRight()
self.SearchAnchor()
self.SearchNext(0xFF, self.__find)
self.EnsureCaretVisible()
def Mark(self, line, icon):
'''Create a new marker
:param line: Line number
:type line: int
:param icon: mark_number dictionary key
:type icon: string
:return: Marker ID
:rtype: int
'''
if not icon in self.mark_number.keys(): return
return self.MarkerAdd(line, self.mark_number[icon])
| 27.383442
| 96
| 0.555494
|
f476fc311e33cbf5f3b42266b9aa1dd36765ae2b
| 684
|
py
|
Python
|
api/order/models.py
|
csagar131/ecommerce-backend-api-s
|
7b580ad93677683a238a2ef699f55d85acc83dd0
|
[
"Apache-2.0"
] | null | null | null |
api/order/models.py
|
csagar131/ecommerce-backend-api-s
|
7b580ad93677683a238a2ef699f55d85acc83dd0
|
[
"Apache-2.0"
] | null | null | null |
api/order/models.py
|
csagar131/ecommerce-backend-api-s
|
7b580ad93677683a238a2ef699f55d85acc83dd0
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from api.user.models import CustomUser
from api.product.models import Product
# Create your models here.
class Order(models.Model):
user = models.ForeignKey(CustomUser, on_delete=models.CASCADE, null=True, blank=True)
product_names = models.CharField(max_length=500)
total_product = models.IntegerField(default=0)
transaction_id = models.CharField(max_length=50,default=0)
total_amount = models.CharField(default=0,max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return str(self.user) +"-["+ self.product_names + "]"
| 34.2
| 89
| 0.747076
|
68f040bcc28fc4c5e8523846e8a4b83da4da58ae
| 2,561
|
py
|
Python
|
bbg-firmware/GPS_test.py
|
pattern-of-life/Hardware
|
7b0eb1f2b39b9e152d5b52984553fcaac98d36e8
|
[
"MIT"
] | null | null | null |
bbg-firmware/GPS_test.py
|
pattern-of-life/Hardware
|
7b0eb1f2b39b9e152d5b52984553fcaac98d36e8
|
[
"MIT"
] | null | null | null |
bbg-firmware/GPS_test.py
|
pattern-of-life/Hardware
|
7b0eb1f2b39b9e152d5b52984553fcaac98d36e8
|
[
"MIT"
] | null | null | null |
from GSM import setup_serial, send_command, parse_response, close_serial
from time import sleep
def handle_commands(ser, commands):
for com in commands:
response = send_command(ser, com)
# print(response)
count = 1
while count:
if com == b'AT+HTTPREAD':
print("Last command was: AT+HTTPREAD")
sleep(3)
response = send_command(ser, b'AT+HTTPREAD')
print('Index of ACTION: {}'.format(response[0].find('ACTION:')))
# print(response)
count -= 1
else:
break
for i in response:
if type(i) == str and 'ERROR' in i:
sleep(1)
print("Resending command: {}".format(com))
response = send_command(ser, com)
return response
def parse_gps(word):
""" parse the CGNSINF response
('AT+CGNSINF\r\n+CGNSINF: 1,1,20161011222856.000,47.618717,-122.351538,38.000,0.80,328.3,1,,1.6,2.5,1.9,,11,8,,,38,,\r\n\r\nOK\r\n', 11)
"""
# word = "('AT+CGNSINF\r\n+CGNSINF: 1,1,20161011222856.000,47.618717,-122.351538,38.000,0.80,328.3,1,,1.6,2.5,1.9,,11,8,,,38,,\r\n\r\nOK\r\n', 11)"
split_word = word.split(':')
split_word = split_word[1].split('\r\n')
split_word = split_word[0].split(',', )
sw = split_word
# print("Datetime: {} Lat: {} Lng: {} Alt: {} Speed: {} Course: {}"
# .format(sw[2], sw[3], sw[4], sw[5], sw[6], sw[7]))
return split_word
def read_gps_datetime(datetime_str):
year = datetime_str[:4]
month = datetime_str[4:6]
day = datetime_str[6:8]
hours = datetime_str[8:10]
minutes = datetime_str[10:12]
seconds = datetime_str[12:14]
return '{}/{}/{} {}:{}:{}'.format(
year, month, day, hours, minutes, seconds
)
if __name__ == "__main__":
ser = setup_serial()
commands = []
commands.append(b'AT')
commands.append(b'AT+CBC')
commands.append(b'AT+CGNSPWR?')
commands.append(b'AT+CGNSPWR=1')
commands.append(b'AT+CGNSSEQ?')
commands.append(b'AT+CGNSSEQ=?')
commands.append(b'AT+CGNSSEQ=GGA')
handle_commands(ser, commands)
commands = []
commands.append(b'AT+CGNSINF')
count = 1
while count:
word, bytes_sent = handle_commands(ser, commands)
sw = parse_gps(word)
print("Datetime: {} Lat: {} Lng: {} Alt: {} Speed: {} Course: {}"
.format(sw[2], sw[3], sw[4], sw[5], sw[6], sw[7]))
sleep(10)
count -= 1
close_serial(ser)
| 30.129412
| 151
| 0.560718
|
79e7ad87f07386acac3d88ac3c285842d14e1d97
| 3,290
|
py
|
Python
|
test/test_redis_db_on_docker.py
|
LightStage-Aber/LightStage-Repo
|
92f21b1b8a9f701cac3976a8db7034ecfefc58c7
|
[
"Apache-2.0"
] | 10
|
2015-10-06T00:14:17.000Z
|
2022-02-04T14:03:30.000Z
|
test/test_redis_db_on_docker.py
|
LightStage-Aber/LightStage-Repo
|
92f21b1b8a9f701cac3976a8db7034ecfefc58c7
|
[
"Apache-2.0"
] | 10
|
2017-05-05T11:10:19.000Z
|
2019-06-04T15:30:24.000Z
|
test/test_redis_db_on_docker.py
|
LightStage-Aber/LightStage-Repo
|
92f21b1b8a9f701cac3976a8db7034ecfefc58c7
|
[
"Apache-2.0"
] | 2
|
2016-04-16T13:47:54.000Z
|
2019-10-09T20:16:41.000Z
|
default_path = "../src/"
service_path = "../src/service/"
import sys
sys.path.insert(0, default_path)
sys.path.insert(0, service_path)
import unittest
from db_access import RedisDBOnDocker
class Test_RedisDBOnDocker(unittest.TestCase):
def __init__(self, *args, **kwords):
unittest.TestCase.__init__(self, *args, **kwords)
self.redis = RedisDBOnDocker(ip=None, port=6379, db=10)
def test_redis_server_connection_is_up(self):
""""""
c = self.redis.get_connection()
actual = c.ping()
expected = True
self.assertTrue(actual == expected)
def test_redis_server_connection_db_name_is_correct(self):
""""""
c = self.redis.get_connection()
actual = c.echo("test")
expected = "test"
self.assertTrue(actual == expected)
def test_redis_server_connection_echo(self):
""""""
c = self.redis.get_connection()
actual = c.echo("test")
expected = "test"
self.assertTrue(actual == expected)
def test_redis_server_set(self):
""""""
c = self.redis.get_connection()
self.redis.set(key="test",value="test2")
actual = c.get("test")
expected = "test2"
self.assertTrue(actual == expected)
def test_redis_server_set_get(self):
""""""
c = self.redis.get_connection()
self.redis.set(key="test",value="test2")
actual = self.redis.get("test")
expected = "test2"
self.assertTrue(actual == expected)
def test_redis_server_set_overwrite_get(self):
""""""
c = self.redis.get_connection()
self.redis.set(key="test",value="test2")
self.redis.set(key="test",value="test3")
actual = self.redis.get("test")
expected = "test3"
self.assertTrue(actual == expected)
def test_redis_server_set_series_overwrite(self):
""""""
c = self.redis.get_connection()
self.redis.set_series([("test","test2"),("test","test3")])
actual = self.redis.get("test")
expected = "test3"
self.assertTrue(actual == expected)
def test_redis_server_set_series_dict(self):
""""""
c = self.redis.get_connection()
self.redis.set_series({"test1":"test2","test2":"test3"})
actual = self.redis.get("test2")
expected = "test3"
self.assertTrue(actual == expected)
def test_redis_server_set_series_list_tuples(self):
""""""
c = self.redis.get_connection()
self.redis.set_series([("test1","test2"),("test2","test3")])
actual = self.redis.get("test2")
expected = "test3"
self.assertTrue(actual == expected)
def test_redis_server_set_series_get_series(self):
""""""
c = self.redis.get_connection()
self.redis.set_series([("test1","test2"),("test2","test3")])
actual = self.redis.get_series(["test2","test1"])
expected = ["test3","test2"]
self.assertTrue(actual == expected)
def tearDown(self):
c = self.redis.get_connection()
c.flushdb()
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromModule( sys.modules[__name__] )
unittest.TextTestRunner(verbosity=3).run(suite)
| 29.909091
| 78
| 0.606687
|
76da04ab18927fa8ea6375f6c123415123b6a9e8
| 2,825
|
py
|
Python
|
read_and_train.py
|
iitmcvg/handwritten_ocr
|
b4b1e598aa39293eb3b929e3fa50ac3cab452bce
|
[
"Apache-2.0"
] | 20
|
2017-04-22T18:13:19.000Z
|
2021-11-08T11:20:53.000Z
|
read_and_train.py
|
Jagannathrk2020/OCR-Handwritten-Text
|
b4b1e598aa39293eb3b929e3fa50ac3cab452bce
|
[
"Apache-2.0"
] | 3
|
2016-10-15T05:31:24.000Z
|
2016-11-04T18:51:15.000Z
|
read_and_train.py
|
Jagannathrk2020/OCR-Handwritten-Text
|
b4b1e598aa39293eb3b929e3fa50ac3cab452bce
|
[
"Apache-2.0"
] | 18
|
2016-10-26T06:00:12.000Z
|
2020-09-21T19:10:51.000Z
|
import time
import os
import glob
import tensorflow as tf
import numpy as np
import cv2
import random
# path = 'by_class'
path = 'test'
batch_size=100
t1 = time.time()
file_names=glob.glob(os.path.join(path,'*','train_*','*.[pP][nN][gG]'))
no_of_files=len(file_names)
t2 = time.time()
#print(file_names[0])
print('Time to list files: ', t2-t1)
print('No of files: ',no_of_files)
unique_classes = [int(ele.split('/')[1], base=16) for ele in glob.glob(os.path.join(path,'*/'))]
no_of_classes = len(unique_classes)
labels=[int(ele.split('/')[1], base=16) for ele in file_names]
try:
label_names = [str(chr(i)) for i in labels] #python 3
except:
label_names = [str(unichr(i)) for i in labels] #python 2.7
label_encoding = dict()
for idx in range(len(unique_classes)):
try:
label_encoding[str(chr(unique_classes[idx]))] = idx
except:
label_encoding[str(unichr(unique_classes[idx]))] = idx
print('No of classes: ', no_of_classes)
print('Class encoding: ', label_encoding)
labels_oneHotEncoded = np.zeros((len(file_names),no_of_classes))
for k in range(no_of_files):
labels_oneHotEncoded[k,label_encoding[label_names[k]]]=1
t3 = time.time()
print('Time to list labels: ', t3-t2)
images = []
for i in range(no_of_files):
a=np.array(cv2.imread(file_names[i], 0))
images.append(a.ravel())
images = np.array(images)
t4 = time.time()
print('Time to read images: ',t4-t3)
# Takes about seconds to read test folder on my 4GB PC :-D
# And the code works!!
x = tf.placeholder(tf.float32, shape=[None, 128*128])
W = tf.Variable(tf.zeros([128*128, no_of_classes]))
b = tf.Variable(tf.zeros([no_of_classes]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, shape=[None, no_of_classes])
print('labels : ',labels_oneHotEncoded)
print('column size : ',images[1].shape)
print('no. of images :', len(images))
cv2.namedWindow('Input',0)
images=images*1.0/255.0
print('non zero :',np.count_nonzero(images[0]))
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.001,use_locking=False).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
#print(correct_prediction)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
acc_list = []
for i in range(1000):
rand_idx = random.sample(range(no_of_files), batch_size)
batch_x, batch_y = images[rand_idx], labels_oneHotEncoded[rand_idx]
#Training the NN
sess.run(train_step, feed_dict={x: batch_x, y_: batch_y})
print('Iteration {:} done'.format(i))
acc_list.append(sess.run(accuracy, feed_dict={x: images, y_: labels_oneHotEncoded}))
print(max(acc_list))
# print( W[0],x[0])
| 27.696078
| 96
| 0.703363
|
fd10eb578909ed2008a4c4c95bed49574bdb2509
| 26,873
|
py
|
Python
|
python/ks_api_client/api_client.py
|
ashwinkp/ksapi
|
c348765cefb4d51fd90febcbfa9ff890b67bdc7d
|
[
"Apache-2.0"
] | 7
|
2022-02-05T16:20:37.000Z
|
2022-02-27T16:48:28.000Z
|
python/ks_api_client/api_client.py
|
ashwinkp/ksapi
|
c348765cefb4d51fd90febcbfa9ff890b67bdc7d
|
[
"Apache-2.0"
] | 19
|
2022-02-03T12:40:08.000Z
|
2022-03-30T09:12:46.000Z
|
python/ks_api_client/api_client.py
|
ashwinkp/ksapi
|
c348765cefb4d51fd90febcbfa9ff890b67bdc7d
|
[
"Apache-2.0"
] | 12
|
2021-12-23T06:14:21.000Z
|
2022-03-28T07:47:19.000Z
|
# coding: utf-8
from __future__ import absolute_import
import atexit
import datetime
from dateutil.parser import parse
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from ks_api_client.configuration import Configuration
import ks_api_client.models
from ks_api_client import rest
from ks_api_client.exceptions import ApiValueError, ApiException
class ApiClient(object):
"""
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long, # noqa: F821
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=1):
if configuration is None:
configuration = Configuration.get_default_copy()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'KSTradeApi-python/1.0.0/python'
self.client_side_validation = configuration.client_side_validation
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_type=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, _host=None,
_request_auth=None):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
post_params.extend(self.files_parameters(files))
# auth setting
self.update_params_for_auth(
header_params, query_params, auth_settings,
request_auth=_request_auth)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
if _host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = _host + resource_path
try:
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
except ApiException as e:
e.body = e.body.decode('utf-8') if six.PY3 else e.body
raise e
content_type = response_data.getheader('content-type')
self.last_response = response_data
return_data = response_data
if not _preload_content:
return return_data
if six.PY3 and response_type not in ["file", "bytes"]:
match = None
if content_type is not None:
match = re.search(r"charset=([a-zA-Z\-\d]+)[\s\;]?", content_type)
encoding = match.group(1) if match else "utf-8"
response_data.data = response_data.data.decode(encoding)
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `openapi_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in six.iteritems(obj.openapi_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in six.iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
data = json.loads(response.data)
if data.get("fault"):
api_exception = ApiException(status = response.status, reason = response.reason, body = response.data)
api_exception.status=data.get('fault').get('code')
api_exception.reason=data.get('fault').get('message')
raise api_exception
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(ks_api_client.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datetime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, async_req=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, _host=None,
_request_auth=None):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_token: dict, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, _host,
_request_auth)
return self.pool.apply_async(self.__call_api, (resource_path,
method, path_params,
query_params,
header_params, body,
post_params, files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host, _request_auth))
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def files_parameters(self, files=None):
"""Builds form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings,
request_auth=None):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:param request_auth: if set, the provided settings will
override the token in the configuration.
"""
if not auth_settings:
return
if request_auth:
self._apply_auth_params(headers, querys, request_auth)
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
self._apply_auth_params(headers, querys, auth_setting)
def _apply_auth_params(self, headers, querys, auth_setting):
"""Updates the request parameters based on a single auth_setting
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_setting: auth settings for the endpoint
"""
if auth_setting['in'] == 'cookie':
headers['Cookie'] = auth_setting['value']
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return an original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datetime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
has_discriminator = False
if (hasattr(klass, 'get_real_child_model')
and klass.discriminator_value_class_map):
has_discriminator = True
if not klass.openapi_types and has_discriminator is False:
return data
kwargs = {}
if (data is not None and
klass.openapi_types is not None and
isinstance(data, (list, dict))):
for attr, attr_type in six.iteritems(klass.openapi_types):
if klass.attribute_map[attr] in data:
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if has_discriminator:
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance
| 39.87092
| 114
| 0.552116
|
407b832323caec320984e562c445aead70cd6ba4
| 537
|
py
|
Python
|
output/models/ms_data/schema/sch_t10_a_xsd/sch_t10_b.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/schema/sch_t10_a_xsd/sch_t10_b.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/schema/sch_t10_a_xsd/sch_t10_b.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "ns-a"
@dataclass
class BCt:
class Meta:
name = "b-ct"
att1: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
att2: str = field(
init=False,
default="bar",
metadata={
"type": "Attribute",
"required": True,
}
)
@dataclass
class BE1(BCt):
class Meta:
name = "b-e1"
namespace = "ns-a"
| 16.272727
| 40
| 0.506518
|
4ef98bc73e299015c0ac1283ba4c5121d0b5787b
| 16,189
|
py
|
Python
|
src/oci/waf/models/network_address_list.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/waf/models/network_address_list.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/waf/models/network_address_list.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class NetworkAddressList(object):
"""
IP addresses that can be used between different WebAppFirewallPolicies.
"""
#: A constant which can be used with the lifecycle_state property of a NetworkAddressList.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a NetworkAddressList.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a NetworkAddressList.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a NetworkAddressList.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a NetworkAddressList.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a NetworkAddressList.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the type property of a NetworkAddressList.
#: This constant has a value of "ADDRESSES"
TYPE_ADDRESSES = "ADDRESSES"
#: A constant which can be used with the type property of a NetworkAddressList.
#: This constant has a value of "VCN_ADDRESSES"
TYPE_VCN_ADDRESSES = "VCN_ADDRESSES"
def __init__(self, **kwargs):
"""
Initializes a new NetworkAddressList object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.waf.models.NetworkAddressListAddresses`
* :class:`~oci.waf.models.NetworkAddressListVcnAddresses`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this NetworkAddressList.
:type id: str
:param display_name:
The value to assign to the display_name property of this NetworkAddressList.
:type display_name: str
:param compartment_id:
The value to assign to the compartment_id property of this NetworkAddressList.
:type compartment_id: str
:param time_created:
The value to assign to the time_created property of this NetworkAddressList.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this NetworkAddressList.
:type time_updated: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this NetworkAddressList.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param lifecycle_details:
The value to assign to the lifecycle_details property of this NetworkAddressList.
:type lifecycle_details: str
:param type:
The value to assign to the type property of this NetworkAddressList.
Allowed values for this property are: "ADDRESSES", "VCN_ADDRESSES", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type type: str
:param freeform_tags:
The value to assign to the freeform_tags property of this NetworkAddressList.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this NetworkAddressList.
:type defined_tags: dict(str, dict(str, object))
:param system_tags:
The value to assign to the system_tags property of this NetworkAddressList.
:type system_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'compartment_id': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'lifecycle_state': 'str',
'lifecycle_details': 'str',
'type': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'system_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'compartment_id': 'compartmentId',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'lifecycle_state': 'lifecycleState',
'lifecycle_details': 'lifecycleDetails',
'type': 'type',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'system_tags': 'systemTags'
}
self._id = None
self._display_name = None
self._compartment_id = None
self._time_created = None
self._time_updated = None
self._lifecycle_state = None
self._lifecycle_details = None
self._type = None
self._freeform_tags = None
self._defined_tags = None
self._system_tags = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['type']
if type == 'ADDRESSES':
return 'NetworkAddressListAddresses'
if type == 'VCN_ADDRESSES':
return 'NetworkAddressListVcnAddresses'
else:
return 'NetworkAddressList'
@property
def id(self):
"""
**[Required]** Gets the id of this NetworkAddressList.
The `OCID`__ of the NetworkAddressList.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The id of this NetworkAddressList.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this NetworkAddressList.
The `OCID`__ of the NetworkAddressList.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param id: The id of this NetworkAddressList.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this NetworkAddressList.
NetworkAddressList display name, can be renamed.
:return: The display_name of this NetworkAddressList.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this NetworkAddressList.
NetworkAddressList display name, can be renamed.
:param display_name: The display_name of this NetworkAddressList.
:type: str
"""
self._display_name = display_name
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this NetworkAddressList.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this NetworkAddressList.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this NetworkAddressList.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this NetworkAddressList.
:type: str
"""
self._compartment_id = compartment_id
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this NetworkAddressList.
The time the NetworkAddressList was created. An RFC3339 formatted datetime string.
:return: The time_created of this NetworkAddressList.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this NetworkAddressList.
The time the NetworkAddressList was created. An RFC3339 formatted datetime string.
:param time_created: The time_created of this NetworkAddressList.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
Gets the time_updated of this NetworkAddressList.
The time the NetworkAddressList was updated. An RFC3339 formatted datetime string.
:return: The time_updated of this NetworkAddressList.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this NetworkAddressList.
The time the NetworkAddressList was updated. An RFC3339 formatted datetime string.
:param time_updated: The time_updated of this NetworkAddressList.
:type: datetime
"""
self._time_updated = time_updated
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this NetworkAddressList.
The current state of the NetworkAddressList.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this NetworkAddressList.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this NetworkAddressList.
The current state of the NetworkAddressList.
:param lifecycle_state: The lifecycle_state of this NetworkAddressList.
:type: str
"""
allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def lifecycle_details(self):
"""
Gets the lifecycle_details of this NetworkAddressList.
A message describing the current state in more detail.
For example, can be used to provide actionable information for a resource in FAILED state.
:return: The lifecycle_details of this NetworkAddressList.
:rtype: str
"""
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
"""
Sets the lifecycle_details of this NetworkAddressList.
A message describing the current state in more detail.
For example, can be used to provide actionable information for a resource in FAILED state.
:param lifecycle_details: The lifecycle_details of this NetworkAddressList.
:type: str
"""
self._lifecycle_details = lifecycle_details
@property
def type(self):
"""
**[Required]** Gets the type of this NetworkAddressList.
Type of NetworkAddressList.
Allowed values for this property are: "ADDRESSES", "VCN_ADDRESSES", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The type of this NetworkAddressList.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this NetworkAddressList.
Type of NetworkAddressList.
:param type: The type of this NetworkAddressList.
:type: str
"""
allowed_values = ["ADDRESSES", "VCN_ADDRESSES"]
if not value_allowed_none_or_none_sentinel(type, allowed_values):
type = 'UNKNOWN_ENUM_VALUE'
self._type = type
@property
def freeform_tags(self):
"""
**[Required]** Gets the freeform_tags of this NetworkAddressList.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this NetworkAddressList.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this NetworkAddressList.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this NetworkAddressList.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
**[Required]** Gets the defined_tags of this NetworkAddressList.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this NetworkAddressList.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this NetworkAddressList.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this NetworkAddressList.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def system_tags(self):
"""
**[Required]** Gets the system_tags of this NetworkAddressList.
Usage of system tag keys. These predefined keys are scoped to namespaces.
Example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
:return: The system_tags of this NetworkAddressList.
:rtype: dict(str, dict(str, object))
"""
return self._system_tags
@system_tags.setter
def system_tags(self, system_tags):
"""
Sets the system_tags of this NetworkAddressList.
Usage of system tag keys. These predefined keys are scoped to namespaces.
Example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
:param system_tags: The system_tags of this NetworkAddressList.
:type: dict(str, dict(str, object))
"""
self._system_tags = system_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.740343
| 245
| 0.651801
|
8c8f870d69958fbb5872e8dd3267d494e0a934bf
| 9,757
|
py
|
Python
|
tests/make_test.py
|
dengmingcong/httprunner
|
379c758f0ab5d828156ba706c7dacdbf4d7aaba0
|
[
"Apache-2.0"
] | null | null | null |
tests/make_test.py
|
dengmingcong/httprunner
|
379c758f0ab5d828156ba706c7dacdbf4d7aaba0
|
[
"Apache-2.0"
] | 1
|
2022-03-09T03:05:23.000Z
|
2022-03-09T03:05:23.000Z
|
tests/make_test.py
|
dengmingcong/httprunner
|
379c758f0ab5d828156ba706c7dacdbf4d7aaba0
|
[
"Apache-2.0"
] | 1
|
2022-03-02T03:13:27.000Z
|
2022-03-02T03:13:27.000Z
|
import os
import unittest
import pytest
from httprunner import loader
from httprunner.make import (
main_make,
convert_testcase_path,
pytest_files_made_cache_mapping,
make_config_chain_style,
make_teststep_chain_style,
pytest_files_run_set,
ensure_file_abs_path_valid,
)
class TestMake(unittest.TestCase):
def setUp(self) -> None:
pytest_files_made_cache_mapping.clear()
pytest_files_run_set.clear()
loader.project_meta = None
@pytest.mark.skip
def test_make_testcase(self):
path = ["examples/postman_echo/request_methods/request_with_variables.yml"]
testcase_python_list = main_make(path)
self.assertEqual(
testcase_python_list[0],
os.path.join(
os.getcwd(),
os.path.join(
"examples",
"postman_echo",
"request_methods",
"request_with_variables_test.py",
),
),
)
@pytest.mark.skip
def test_make_testcase_with_ref(self):
path = [
"examples/postman_echo/request_methods/request_with_testcase_reference.yml"
]
testcase_python_list = main_make(path)
self.assertEqual(len(testcase_python_list), 1)
self.assertIn(
os.path.join(
os.getcwd(),
os.path.join(
"examples",
"postman_echo",
"request_methods",
"request_with_testcase_reference_test.py",
),
),
testcase_python_list,
)
with open(
os.path.join(
"examples",
"postman_echo",
"request_methods",
"request_with_testcase_reference_test.py",
)
) as f:
content = f.read()
self.assertIn(
"""
from request_methods.request_with_functions_test import (
TestCaseRequestWithFunctions as RequestWithFunctions,
)
""",
content,
)
self.assertIn(
".call(RequestWithFunctions)", content,
)
def test_make_testcase_folder(self):
path = ["examples/postman_echo/request_methods/"]
testcase_python_list = main_make(path)
self.assertIn(
os.path.join(
os.getcwd(),
os.path.join(
"examples",
"postman_echo",
"request_methods",
"request_with_functions_test.py",
),
),
testcase_python_list,
)
def test_ensure_file_path_valid(self):
self.assertEqual(
ensure_file_abs_path_valid(
os.path.join(os.getcwd(), "tests", "data", "a-b.c", "2 3.yml")
),
os.path.join(os.getcwd(), "tests", "data", "a_b_c", "T2_3.yml"),
)
loader.project_meta = None
self.assertEqual(
ensure_file_abs_path_valid(
os.path.join(os.getcwd(), "examples", "postman_echo", "request_methods")
),
os.path.join(os.getcwd(), "examples", "postman_echo", "request_methods"),
)
loader.project_meta = None
self.assertEqual(
ensure_file_abs_path_valid(os.path.join(os.getcwd(), "README.md")),
os.path.join(os.getcwd(), "README.md"),
)
loader.project_meta = None
self.assertEqual(
ensure_file_abs_path_valid(os.getcwd()), os.getcwd(),
)
loader.project_meta = None
self.assertEqual(
ensure_file_abs_path_valid(
os.path.join(os.getcwd(), "tests", "data", ".csv")
),
os.path.join(os.getcwd(), "tests", "data", ".csv"),
)
def test_convert_testcase_path(self):
self.assertEqual(
convert_testcase_path(
os.path.join(os.getcwd(), "tests", "data", "a-b.c", "2 3.yml")
),
(
os.path.join(os.getcwd(), "tests", "data", "a_b_c", "T2_3_test.py"),
"T23",
),
)
self.assertEqual(
convert_testcase_path(
os.path.join(os.getcwd(), "tests", "data", "a-b.c", "中文case.yml")
),
(
os.path.join(
os.getcwd(),
os.path.join("tests", "data", "a_b_c", "中文case_test.py"),
),
"中文Case",
),
)
@pytest.mark.skip
def test_make_testsuite(self):
path = ["examples/postman_echo/request_methods/demo_testsuite.yml"]
testcase_python_list = main_make(path)
self.assertEqual(len(testcase_python_list), 2)
self.assertIn(
os.path.join(
os.getcwd(),
os.path.join(
"examples",
"postman_echo",
"request_methods",
"demo_testsuite_yml",
"request_with_functions_test.py",
),
),
testcase_python_list,
)
self.assertIn(
os.path.join(
os.getcwd(),
os.path.join(
"examples",
"postman_echo",
"request_methods",
"demo_testsuite_yml",
"request_with_testcase_reference_test.py",
),
),
testcase_python_list,
)
@pytest.mark.skip
def test_make_config_chain_style(self):
config = {
"name": "request methods testcase: validate with functions",
"variables": {"foo1": "bar1", "foo2": 22},
"base_url": "https://postman_echo.com",
"verify": False,
"path": "examples/postman_echo/request_methods/validate_with_functions_test.py",
}
self.assertEqual(
make_config_chain_style(config),
"""
Config("request methods testcase: validate with functions")
.variables(**{'foo1': 'bar1', 'foo2': 22})
.base_url("https://postman_echo.com").verify(False)""",
)
@pytest.mark.skip
def test_make_teststep_chain_style(self):
step = {
"name": "get with params",
"variables": {"foo1": "bar1", "foo2": 123, "sum_v": "${sum_two(1, 2)}", },
"request": {
"method": "GET",
"url": "/get",
"params": {"foo1": "$foo1", "foo2": "$foo2", "sum_v": "$sum_v"},
"headers": {"User-Agent": "HttpRunner/${get_httprunner_version()}"},
},
"testcase": "CLS_LB(TestCaseDemo)CLS_RB",
"extract": {
"session_foo1": "body.args.foo1",
"session_foo2": "body.args.foo2",
},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.args.sum_v", "3"]},
],
}
teststep_chain_style = make_teststep_chain_style(step)
self.assertEqual(
teststep_chain_style,
"""
Step(RunRequest("get with params")
.with_variables(**{'foo1': 'bar1', 'foo2': 123, 'sum_v': '${sum_two(1, 2)}'})
.get("/get")
.with_params(**{'foo1': '$foo1', 'foo2': '$foo2', 'sum_v': '$sum_v'})
.with_headers(**{'User-Agent': 'HttpRunner/${get_httprunner_version()}'})
.extract()
.with_jmespath('body.args.foo1', 'session_foo1')
.with_jmespath('body.args.foo2', 'session_foo2')
.validate().assert_equal("status_code", 200).assert_equal("body.args.sum_v", "3"))""",
)
@pytest.mark.skip
def test_make_requests_with_json_chain_style(self):
step = {
"name": "get with params",
"variables": {
"foo1": "bar1",
"foo2": 123,
"sum_v": "${sum_two(1, 2)}",
"myjson": {"name": "user", "password": "123456"},
},
"request": {
"method": "GET",
"url": "/get",
"params": {"foo1": "$foo1", "foo2": "$foo2", "sum_v": "$sum_v"},
"headers": {"User-Agent": "HttpRunner/${get_httprunner_version()}"},
"json": "$myjson",
},
"testcase": "CLS_LB(TestCaseDemo)CLS_RB",
"extract": {
"session_foo1": "body.args.foo1",
"session_foo2": "body.args.foo2",
},
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["body.args.sum_v", "3"]},
],
}
teststep_chain_style = make_teststep_chain_style(step)
self.assertEqual(
teststep_chain_style,
"""
Step(RunRequest("get with params")
.with_variables(
**{'foo1': 'bar1', 'foo2': 123, 'sum_v': '${sum_two(1, 2)}',
'myjson': {'name': 'user', 'password': '123456'}})
.get("/get")
.with_params(**{'foo1': '$foo1', 'foo2': '$foo2', 'sum_v': '$sum_v'})
.with_headers(**{'User-Agent': 'HttpRunner/${get_httprunner_version()}'})
.with_json("$myjson")
.extract()
.with_jmespath('body.args.foo1', 'session_foo1')
.with_jmespath('body.args.foo2', 'session_foo2')
.validate()
.assert_equal("status_code", 200)
.assert_equal("body.args.sum_v", "3"))""",
)
| 34.477032
| 98
| 0.489495
|
4c3f98a026c580ff50fbfdbd5b284d8ac5269b29
| 2,481
|
py
|
Python
|
rotkehlchen/tests/unit/test_graph.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 137
|
2018-03-05T11:53:29.000Z
|
2019-11-03T16:38:42.000Z
|
rotkehlchen/tests/unit/test_graph.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 385
|
2018-03-08T12:43:41.000Z
|
2019-11-10T09:15:36.000Z
|
rotkehlchen/tests/unit/test_graph.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 59
|
2018-03-08T10:08:27.000Z
|
2019-10-26T11:30:44.000Z
|
from contextlib import ExitStack
from unittest.mock import MagicMock, patch
import pytest
from rotkehlchen.chain.ethereum.graph import Graph, format_query_indentation
from rotkehlchen.constants.timing import QUERY_RETRY_TIMES
from rotkehlchen.errors.misc import RemoteError
TEST_URL_1 = 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2'
TEST_QUERY_1 = (
"""
tokenDayDatas
(
first: $limit,
) {{
date
token {{
id
}}
priceUSD
}}}}
"""
)
def test_exception_retries():
"""Test an exception raised by Client.execute() triggers the retry logic.
"""
graph = Graph(TEST_URL_1)
param_types = {'$limit': 'Int!'}
param_values = {'limit': 1}
querystr = format_query_indentation(TEST_QUERY_1.format())
client = MagicMock()
client.execute.side_effect = Exception("any message")
backoff_factor_patch = patch(
'rotkehlchen.chain.ethereum.graph.RETRY_BACKOFF_FACTOR',
new=0,
)
client_patch = patch.object(graph, 'client', new=client)
with ExitStack() as stack:
stack.enter_context(backoff_factor_patch)
stack.enter_context(client_patch)
with pytest.raises(RemoteError) as e:
graph.query(
querystr=querystr,
param_types=param_types,
param_values=param_values,
)
assert client.execute.call_count == QUERY_RETRY_TIMES
assert 'No retries left' in str(e.value)
def test_success_result():
"""Test a successful response returns result as expected and does not
triggers the retry logic.
"""
expected_result = {"schema": [{"data1"}, {"data2"}]}
graph = Graph(TEST_URL_1)
param_types = {'$limit': 'Int!'}
param_values = {'limit': 1}
querystr = format_query_indentation(TEST_QUERY_1.format())
client = MagicMock()
client.execute.return_value = expected_result
backoff_factor_patch = patch(
'rotkehlchen.chain.ethereum.graph.RETRY_BACKOFF_FACTOR',
return_value=0,
)
client_patch = patch.object(graph, 'client', new=client)
with ExitStack() as stack:
stack.enter_context(backoff_factor_patch)
stack.enter_context(client_patch)
result = graph.query(
querystr=querystr,
param_types=param_types,
param_values=param_values,
)
assert client.execute.call_count == 1
assert result == expected_result
| 27.876404
| 77
| 0.657396
|
d26aebab8dc1102e1573a57f48900fb81e0ba444
| 22,723
|
py
|
Python
|
AutomatedTesting/Gem/PythonTests/physics/force_region/C5959809_ForceRegion_RotationalOffset.py
|
fromasmtodisasm/o3de
|
0d728a76778cb0ca88caa5c07f17162fac668b2a
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
AutomatedTesting/Gem/PythonTests/physics/force_region/C5959809_ForceRegion_RotationalOffset.py
|
fromasmtodisasm/o3de
|
0d728a76778cb0ca88caa5c07f17162fac668b2a
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
AutomatedTesting/Gem/PythonTests/physics/force_region/C5959809_ForceRegion_RotationalOffset.py
|
fromasmtodisasm/o3de
|
0d728a76778cb0ca88caa5c07f17162fac668b2a
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# Test case ID : C5959809
# Test Case Title : Verify Force Region Rotational Offset
# fmt:off
class Tests:
# General tests
enter_game_mode = ("Entered game mode", "Failed to enter game mode")
exit_game_mode = ("Exited game mode", "Couldn't exit game mode")
test_completed = ("The test successfully completed", "The test timed out")
# ***** Entities found *****
# Force Regions
force_region_x_found = ("Force Region for X axis test was found", "Force Region for X axis test was NOT found")
force_region_y_found = ("Force Region for Y axis test was found", "Force Region for Y axis test was NOT found")
force_region_z_found = ("Force Region for Z axis test was found", "Force Region for Z axis test was NOT found")
# Force Region Pass Boxes
force_region_pass_box_x_found = ("Force Region Pass Box for X axis test was found", "Force Region Pass Box for X axis test was NOT found")
force_region_pass_box_y_found = ("Force Region Pass Box for Y axis test was found", "Force Region Pass Box for Y axis test was NOT found")
force_region_pass_box_z_found = ("Force Region Pass Box for Z axis test was found", "Force Region Pass Box for Z axis test was NOT found")
# External Pass Boxes
external_pass_box_x_found = ("External Pass Box for X axis test was found", "External Pass Box for X axis test was NOT found")
external_pass_box_y_found = ("External Pass Box for Y axis test was found", "External Pass Box for Y axis test was NOT found")
external_pass_box_z_found = ("External Pass Box for Z axis test was found", "External Pass Box for Z axis test was NOT found")
# Force Region Fail Boxes
force_region_fail_box_x_found = ("Force Region Fail Box for X axis test was found", "Force Region Fail Box for X axis test was NOT found")
force_region_fail_box_y_found = ("Force Region Fail Box for Y axis test was found", "Force Region Fail Box for Y axis test was NOT found")
force_region_fail_box_z_found = ("Force Region Fail Box for Z axis test was found", "Force Region Fail Box for Z axis test was NOT found")
# External Fail Boxes
external_fail_box_x_found = ("External Fail Box for X axis test was found", "External Fail Box for X axis test was NOT found")
external_fail_box_y_found = ("External Fail Box for Y axis test was found", "External Fail Box for Y axis test was NOT found")
external_fail_box_z_found = ("External Fail Box for Z axis test was found", "External Fail Box for Z axis test was NOT found")
# Pass spheres
sphere_pass_x_found = ("Pass Sphere for X axis test was found", "Pass Sphere for X axis test was NOT found")
sphere_pass_y_found = ("Pass Sphere for Y axis test was found", "Pass Sphere for Y axis test was NOT found")
sphere_pass_z_found = ("Pass Sphere for Z axis test was found", "Pass Sphere for Z axis test was NOT found")
# Bounce Spheres
sphere_bounce_x_found = ("Bounce Sphere for X axis test was found", "Bounce Sphere for X axis test was NOT found")
sphere_bounce_y_found = ("Bounce Sphere for Y axis test was found", "Bounce Sphere for Y axis test was NOT found")
sphere_bounce_z_found = ("Bounce Sphere for Z axis test was found", "Bounce Sphere for Z axis test was NOT found")
# ****** Entities' results ******
# Force Regions
force_region_x_mag_result = ("Force Region for X axis magnitude exerted was as expected", "Force Region for X axis magnitude exerted was NOT as expected")
force_region_y_mag_result = ("Force Region for Y axis magnitude exerted was as expected", "Force Region for Y axis magnitude exerted was NOT as expected")
force_region_z_mag_result = ("Force Region for Z axis magnitude exerted was as expected", "Force Region for Z axis magnitude exerted was NOT as expected")
force_region_x_norm_result = ("Force Region for X axis normal exerted was as expected", "Force Region for X axis normal exerted was NOT as expected")
force_region_y_norm_result = ("Force Region for Y axis normal exerted was as expected", "Force Region for Y axis normal exerted was NOT as expected")
force_region_z_norm_result = ("Force Region for Z axis normal exerted was as expected", "Force Region for Z axis normal exerted was NOT as expected")
# Force Region Pass Boxes
force_region_pass_box_x_result = ("Force Region Pass Box for X axis collided with exactly one sphere", "Force Region Pass Box for X axis DID NOT collide with exactly one sphere")
force_region_pass_box_y_result = ("Force Region Pass Box for Y axis collided with exactly one sphere", "Force Region Pass Box for Y axis DID NOT collide with exactly one sphere")
force_region_pass_box_z_result = ("Force Region Pass Box for Z axis collided with exactly one sphere", "Force Region Pass Box for Z axis DID NOT collide with exactly one sphere")
# External Pass Boxes
external_pass_box_x_result = ("External Pass Box for X axis collided with exactly one sphere", "External Pass Box for X axis DID NOT collide with exactly one sphere")
external_pass_box_y_result = ("External Pass Box for Y axis collided with exactly one sphere", "External Pass Box for Y axis DID NOT collide with exactly one sphere")
external_pass_box_z_result = ("External Pass Box for Z axis collided with exactly one sphere", "External Pass Box for Z axis DID NOT collide with exactly one sphere")
# Force Region Fail Boxes
force_region_fail_box_x_result = ("Force Region Fail Box for X axis collided with no spheres", "Force Region Fail Box for X axis DID collide with a sphere")
force_region_fail_box_y_result = ("Force Region Fail Box for Y axis collided with no spheres", "Force Region Fail Box for Y axis DID collide with a sphere")
force_region_fail_box_z_result = ("Force Region Fail Box for Z axis collided with no spheres", "Force Region Fail Box for Z axis DID collide with a sphere")
# External Fail Boxes
external_fail_box_x_result = ("External Fail Box for X axis collided with no spheres", "External Fail Box for X axis DID collide with a sphere")
external_fail_box_y_result = ("External Fail Box for Y axis collided with no spheres", "External Fail Box for Y axis DID collide with a sphere")
external_fail_box_z_result = ("External Fail Box for Z axis collided with no spheres", "External Fail Box for Z axis DID collide with a sphere")
# Pass spheres
sphere_pass_x_result = ("Pass Sphere for X axis collided with expected Box", "Pass Sphere for X axis DID NOT collide with expected Box")
sphere_pass_y_result = ("Pass Sphere for Y axis collided with expected Box", "Pass Sphere for Y axis DID NOT collide with expected Box")
sphere_pass_z_result = ("Pass Sphere for Z axis collided with expected Box", "Pass Sphere for Z axis DID NOT collide with expected Box")
# Bounce Spheres
sphere_bounce_x_result = ("Bounce Sphere for X axis collided with expected Box", "Bounce Sphere for X axis DID NOT collide with expected Box")
sphere_bounce_y_result = ("Bounce Sphere for Y axis collided with expected Box", "Bounce Sphere for Y axis DID NOT collide with expected Box")
sphere_bounce_z_result = ("Bounce Sphere for Z axis collided with expected Box", "Bounce Sphere for Z axis DID NOT collide with expected Box")
# fmt:on
@staticmethod
# Test tuple accessor via string
def get_test(test_name):
if test_name in Tests.__dict__:
return Tests.__dict__[test_name]
else:
return None
def C5959809_ForceRegion_RotationalOffset():
"""
Summary:
Force Region rotational offset is tested for each of the 3 axises (X, Y, and Z). Each axis's test has one
ForceRegion, two spheres and four boxes. By monitoring which box each sphere collides with we can validate the
integrity of the ForceRegions rotational offset.
Level Description:
Each axis's test has the following entities:
one force region - set for point force and with it's collider rotationally offset (on the axis in test).
two spheres - one positioned near the transform of the force region, one positioned near the [offset] collider for
the force region
four boxes - One box is positioned inside the force region's transform, one inside the force region's [offset]
collider. The other two boxes are positioned behind the two spheres (relative to the direction they will be
initially traveling)
Expected Behavior:
All three axises' tests run in parallel. when the tests begin, the spheres should move toward their expected
force regions. The spheres positioned to collide with their region's [offset] collider should be forced backwards
before entering the force region and collide with the box behind it. The spheres positioned by their force region's
transforms should pass straight into the transform and collide with the box inside the transform.
The boxes inside the Force Regions' [offset] colliders and the boxes behind the spheres set to move into the Force
Regions' transforms should not register any collisions.
Steps:
1) Open level and enter game mode
2) Set up tests and variables
3) Wait for test results (or time out)
(Report results)
4) Exit game mode and close the editor
:return: None
"""
import os
import sys
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
import azlmbr.legacy.general as general
import azlmbr.math as azmath
import azlmbr.bus
import azlmbr
# Constants
CLOSE_ENOUGH = 0.01 # Close enough threshold for comparing floats
TIME_OUT = 2.0 # Time out (in seconds) until test is aborted
FORCE_MAGNITUDE = 1000.0 # Point force magnitude for Force Regions
SPEED = 3.0 # Initial speed (in m/s) of the moving spheres.
# Full list for all spheres. Used for EntityId look up in event handlers
all_spheres = []
# Entity base class handles very general entity initialization
# Should be treated as a "virtual" class and all implementing child
# classes should implement a "self.result()" function referenced in EntityBase::report(self)
class EntityBase:
def __init__(self, name):
# type: (str) -> None
self.name = name
self.print_list = []
self.id = general.find_game_entity(name)
found_test = Tests.get_test(name + "_found")
Report.critical_result(found_test, self.id.IsValid())
# Reports this entity's result. Implicitly calls "get" on result.
# Subclasses implement their own definition of a successful result
def report(self):
# type: () -> None
result_test = Tests.get_test(self.name + "_result")
Report.result(result_test, self.result())
# Prints the print queue (with decorated header) if not empty
def print_log(self):
# type: () -> None
if self.print_list:
Report.info("*********** {} **********".format(self))
for line in self.print_list:
Report.info(line)
Report.info("")
# Quick string cast, returns entity name
def __str__(self):
# type: () -> str
return self.name
# ForceRegion handles all the data and behavior associated with a ForceRegion (for this test)
# They simply wait for a Sphere to collide with them. On collision they store the calculated force
# magnitude for verification.
class ForceRegion(EntityBase):
def __init__(self, name, magnitude):
# type: (str, float) -> None
EntityBase.__init__(self, name)
self.expected_magnitude = magnitude
self.actual_magnitude = None
self.expected_normal = None
self.actual_normal = None
# Set point force Magnitude
azlmbr.physics.ForcePointRequestBus(azlmbr.bus.Event, "SetMagnitude", self.id, magnitude)
# Set up handler
self.handler = azlmbr.physics.ForceRegionNotificationBusHandler()
self.handler.connect(None)
self.handler.add_callback("OnCalculateNetForce", self.on_calc_force)
# Callback function for OnCalculateNetForce event
def on_calc_force(self, args):
# type: ([EntityId, EntityId, azmath.Vector3, float]) -> None
if self.id.Equal(args[0]) and self.actual_magnitude is None:
for sphere in all_spheres:
if sphere.id.Equal(args[1]):
# Log event in print queue (for me and for the sphere)
self.print_list.append("Exerting force on {}:".format(sphere))
sphere.print_list.append("Force exerted by {}".format(self))
# Save calculated data to be compared later
self.actual_normal = args[2]
self.actual_magnitude = args[3]
pos = azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldTranslation", self.id)
sphere_pos = azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldTranslation", sphere.id)
self.expected_normal = sphere_pos.Subtract(pos).GetNormalizedSafe()
# Add expected/actual to print queue
self.print_list.append("Force Vector: ")
self.print_list.append(
" Expected: ({:.2f}, {:.2f}, {:.2f})".format(
self.expected_normal.x, self.expected_normal.y, self.expected_normal.z
)
)
self.print_list.append(
" Actual: ({:.2f}, {:.2f}, {:.2f})".format(
self.actual_normal.x, self.actual_normal.y, self.actual_normal.z
)
)
self.print_list.append("Force Magnitude: ")
self.print_list.append(" Expected: {}".format(self.expected_magnitude))
self.print_list.append(" Actual: {:.2f}".format(self.actual_magnitude))
# EntityBase::report() overload.
# Force regions have 2 test tuples to report on
def report(self):
magnitude_test = Tests.get_test(self.name + "_mag_result")
normal_test = Tests.get_test(self.name + "_norm_result")
Report.result(magnitude_test, self.magnitude_result())
Report.result(normal_test, self.normal_result())
# Test result calculations
# Used in EntityBase for reporting results
def result(self):
# type: () -> bool
return self.magnitude_result() and self.normal_result()
def magnitude_result(self):
# type: () -> bool
return (
self.actual_magnitude is not None
and abs(self.actual_magnitude - self.expected_magnitude) < CLOSE_ENOUGH
)
def normal_result(self):
# type: () -> bool
return (
self.actual_normal is not None
and self.expected_normal is not None
and self.expected_normal.IsClose(self.actual_normal, CLOSE_ENOUGH)
)
# Spheres are the objects that test the force regions. They store an expected collision entity and an
# actual collision entity
class Sphere(EntityBase):
def __init__(self, name, initial_velocity, expected_collision):
# type: (str, azmath.Vector3, EntityBase) -> None
EntityBase.__init__(self, name)
self.initial_velocity = initial_velocity
azlmbr.physics.RigidBodyRequestBus(azlmbr.bus.Event, "SetLinearVelocity", self.id, initial_velocity)
self.print_list.append(
"Initial velocity: ({:.2f}, {:.2f}, {:.2f})".format(
initial_velocity.x, initial_velocity.y, initial_velocity.z
)
)
self.expected_collision = expected_collision
self.print_list.append("Expected Collision: {}".format(expected_collision))
self.actual_collision = None
self.active = True
self.force_normal = None
# Registers a collision with this sphere. Saves a reference to the colliding entity for processing later.
# Deactivate self after collision is registered.
def collide(self, collision_entity):
# type: (EntityBase) -> None
# Log the event
self.print_list.append("Collided with {}".format(collision_entity))
self.actual_collision = collision_entity
# Deactivate self
azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity",
self.id)
self.active = False
# Calculates result
# Used in EntityBase for reporting results
def result(self):
if self.actual_collision is None:
return False
else:
return self.expected_collision.id.Equal(self.actual_collision.id)
# Box entities wait for a collision with a sphere as a means of validation the force region's offset
# worked according to plan.
class Box(EntityBase):
def __init__(self, name, expected_sphere_collisions):
# type: (str, int) -> None
EntityBase.__init__(self, name)
self.spheres_collided = 0
self.expected_sphere_collisions = expected_sphere_collisions
# Set up handler
self.handler = azlmbr.physics.CollisionNotificationBusHandler()
self.handler.connect(self.id)
self.handler.add_callback("OnCollisionBegin", self.on_collision_begin)
# Callback function for OnCollisionBegin event
def on_collision_begin(self, args):
for sphere in all_spheres:
if sphere.id.Equal(args[0]):
# Log event
self.print_list.append("Collided with {}".format(sphere))
# Register collision with sphere
sphere.collide(self)
self.spheres_collided += 1 # Count collisions for validation later
break
# Calculates test result
# Used in EntityBase for reporting results
def result(self):
return self.spheres_collided == self.expected_sphere_collisions
# Manages the entities required to run the test for one axis (X, Y, or Z)
class AxisTest:
def __init__(self, axis, init_velocity):
# type: (str, azmath.Vector3) -> None
self.name = axis + " axis test"
self.force_region = ForceRegion("force_region_" + axis, FORCE_MAGNITUDE)
self.spheres = [
Sphere("sphere_pass_" + axis, init_velocity, Box("force_region_pass_box_" + axis, 1)),
Sphere("sphere_bounce_" + axis, init_velocity, Box("external_pass_box_" + axis, 1)),
]
self.boxes = [
Box("external_fail_box_" + axis, 0),
Box("force_region_fail_box_" + axis, 0)
] + [
sphere.expected_collision for sphere in self.spheres
# Gets the Boxes passed to spheres on init
]
# Full list for all entities this test is responsible for
self.all_entities = self.boxes + self.spheres + [self.force_region]
# Add spheres to global "lookup" list
all_spheres.extend(self.spheres)
# Checks for all entities' test passing conditions
def passed(self):
return all([e.result() for e in self.all_entities])
# Returns true when this test has completed (i.e. when the spheres have collided and are deactivated)
def completed(self):
return all([not sphere.active for sphere in self.spheres])
# Reports results for all entities in this test
def report(self):
Report.info("::::::::::::::::::::::::::::: {} Results :::::::::::::::::::::::::::::".format(self.name))
for entity in self.all_entities:
entity.report()
# Prints the logs for all entities in this test
def print_log(self):
Report.info("::::::::::::::::::::::::::::: {} Log :::::::::::::::::::::::::::::".format(self.name))
for entity in self.all_entities:
entity.print_log()
# *********** Execution Code ***********
# 1) Open level
helper.init_idle()
helper.open_level("Physics", "C5959809_ForceRegion_RotationalOffset")
helper.enter_game_mode(Tests.enter_game_mode)
# 2) Variable set up
# Initial velocities for the three different directions spheres will be moving
x_vel = azmath.Vector3(SPEED, 0.0, 0.0)
y_vel = azmath.Vector3(0.0, SPEED, 0.0)
z_vel = azmath.Vector3(0.0, 0.0, SPEED)
# The three tests, one for each axis
axis_tests = [AxisTest("x", x_vel), AxisTest("y", y_vel), AxisTest("z", z_vel)]
# 3) Wait for test results or time out
Report.result(
Tests.test_completed, helper.wait_for_condition(
lambda: all([test.completed() for test in axis_tests]), TIME_OUT
)
)
# Report results
for test in axis_tests:
test.report()
# Print entity print queues for each failed test
for test in axis_tests:
if not test.passed():
test.print_log()
# 4) Exit game mode and close editor
helper.exit_game_mode(Tests.exit_game_mode)
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(C5959809_ForceRegion_RotationalOffset)
| 56.106173
| 182
| 0.639088
|
e04d3eaf89e31bd89c738900e099ce1858371baa
| 567
|
py
|
Python
|
lifelib/tests/data/generate_testdata.py
|
fumitoh/lifelib
|
01b6fec4453b309808c1c7ca6867c7dce50668dc
|
[
"MIT"
] | 77
|
2018-03-02T05:21:43.000Z
|
2022-03-26T20:29:59.000Z
|
lifelib/tests/data/generate_testdata.py
|
dayeoni-1376/lifelib
|
e65ba42843e8ae5f00ea795a8bb29ccd6e99ba54
|
[
"MIT"
] | 10
|
2018-02-17T03:07:20.000Z
|
2021-11-15T13:40:15.000Z
|
lifelib/tests/data/generate_testdata.py
|
dayeoni-1376/lifelib
|
e65ba42843e8ae5f00ea795a8bb29ccd6e99ba54
|
[
"MIT"
] | 24
|
2018-03-12T20:01:06.000Z
|
2022-03-07T06:06:18.000Z
|
import sys, os, pickle
from lifelib.projects.simplelife.scripts import simplelife
from lifelib.tests.data import round_signif
filepath = os.path.join(os.path.dirname(__file__), 'data_simplelife')
if '' not in sys.path:
sys.path.insert(0, '')
def generate_data(model):
data = []
proj = model.Projection
for i in range(10, 301, 10):
data.append(round_signif(proj(i).PV_NetCashflow(0), 10))
with open(filepath, 'wb') as file:
pickle.dump(data, file, protocol=4)
if __name__ == '__main__':
generate_data(simplelife.build())
| 24.652174
| 69
| 0.691358
|
f124f35980941071775aa588d64a86cb89301ccd
| 125,039
|
py
|
Python
|
kiali_qe/tests/__init__.py
|
prachiyadav/kiali-qe-python
|
86b5b491b5560f8afd7d07d6ced093c3531d47c0
|
[
"Apache-2.0"
] | 5
|
2019-02-28T16:53:23.000Z
|
2022-03-03T19:14:45.000Z
|
kiali_qe/tests/__init__.py
|
prachiyadav/kiali-qe-python
|
86b5b491b5560f8afd7d07d6ced093c3531d47c0
|
[
"Apache-2.0"
] | 321
|
2018-04-20T13:51:06.000Z
|
2022-01-27T08:56:46.000Z
|
kiali_qe/tests/__init__.py
|
prachiyadav/kiali-qe-python
|
86b5b491b5560f8afd7d07d6ced093c3531d47c0
|
[
"Apache-2.0"
] | 37
|
2018-04-17T06:29:42.000Z
|
2022-03-01T17:11:17.000Z
|
import random
import re
import time
import math
from kiali_qe.components import (
BreadCrumb,
wait_to_spinner_disappear,
ListViewAbstract
)
from kiali_qe.components.enums import (
ServicesPageFilter,
IstioConfigPageFilter,
WorkloadsPageFilter,
ApplicationsPageFilter,
OverviewPageFilter,
OverviewViewType,
IstioConfigObjectType as OBJECT_TYPE,
IstioConfigValidation,
MainMenuEnum as MENU,
MetricsSource,
MetricsHistograms,
InboundMetricsFilter,
OutboundMetricsFilter,
TimeIntervalUIText,
MetricsTimeInterval,
GraphRefreshInterval,
OverviewPageType,
RoutingWizardType,
ApplicationsPageSort,
OverviewPageSort,
WorkloadsPageSort,
ServicesPageSort,
IstioConfigPageSort,
RoutingWizardTLS,
RoutingWizardLoadBalancer,
TrafficType,
OverviewInjectionLinks,
OverviewGraphTypeLink,
OverviewTrafficLinks,
TailLines,
TLSMutualValues,
IstioConfigObjectType,
AuthPolicyType,
AuthPolicyActionType,
LabelOperation,
VersionLabel,
AppLabel,
IstioSidecar,
OverviewHealth,
OverviewMTSLStatus,
MeshWideTLSType
)
from kiali_qe.components.error_codes import (
KIA0201,
KIA0301,
KIA0205,
KIA0501,
KIA0401,
KIA0204,
KIA0206
)
from kiali_qe.rest.kiali_api import ISTIO_CONFIG_TYPES
from kiali_qe.rest.openshift_api import APP_NAME_REGEX
from kiali_qe.utils import (
is_equal,
is_sublist,
word_in_text,
get_url,
get_yaml_path,
remove_from_list,
dict_contains
)
from kiali_qe.utils.log import logger
from kiali_qe.utils.command_exec import oc_apply, oc_delete
from time import sleep
from selenium.webdriver.common.keys import Keys
from kiali_qe.pages import (
ServicesPage,
IstioConfigPage,
WorkloadsPage,
ApplicationsPage,
OverviewPage,
DistributedTracingPage,
GraphPage
)
class AbstractListPageTest(object):
FILTER_ENUM = None
SORT_ENUM = None
SELECT_ITEM = ListViewAbstract.ITEMS + '//a[text()="{}"]'
SELECT_ITEM_WITH_NAMESPACE = SELECT_ITEM + '/../../td[contains(text(), "{}")]/..//a'
def __init__(self, kiali_client, openshift_client, page):
self.kiali_client = kiali_client
self.openshift_client = openshift_client
self.page = page
def _namespaces_ui(self):
return self.page.namespace.items
def get_mesh_wide_tls(self):
return self.page.content.get_mesh_wide_tls()
def assert_all_items(self, namespaces=[], filters=[], force_clear_all=True,
label_operation=LabelOperation.OR):
"""
Apply supplied filter in to UI, REST, OC and assert content
Parameters
----------
namespaces: list of namespace names
filters : list
A list for filter. filter should be a dict.
filter = {'name': 'Namespace', 'value': 'bookinfo'}
Take filter name from pre defined enum
force_clear_all : boolean
Default True.
If this value is True, all existing applied filters will be removed.
otherwise, will be adjusted with pre filter.
on both case final outcome will be same.
"""
raise NotImplementedError('This method should be implemented on sub class')
def get_additional_filters(self, namespaces, current_filters):
raise NotImplementedError('This method should be implemented on sub class')
def open(self, name, namespace=None, force_refresh=False):
# TODO added wait for unstable performance
self.browser.send_keys_to_focused_element(Keys.ESCAPE)
sleep(0.5)
wait_to_spinner_disappear(self.browser)
if namespace is not None:
self.browser.click(self.browser.element(
self.SELECT_ITEM_WITH_NAMESPACE.format(name, namespace), parent=self))
else:
self.browser.click(self.browser.element(self.SELECT_ITEM.format(name), parent=self))
if force_refresh:
self.page.page_refresh()
wait_to_spinner_disappear(self.browser)
def sidecar_presents(self, sidecar_filter, item_sidecar):
if item_sidecar:
return sidecar_filter == IstioSidecar.PRESENT.text
else:
return sidecar_filter == IstioSidecar.NOT_PRESENT.text
def health_equals(self, health_filter, health):
return health and health_filter == health.text
def is_in_details_page(self, name, namespace):
breadcrumb = BreadCrumb(self.page)
if len(breadcrumb.locations) < 3:
return False
menu_location = breadcrumb.locations[0]
if menu_location != self.page.PAGE_MENU:
return False
namespace_location = breadcrumb.locations[1]
if namespace_location != "Namespace: " + namespace:
return False
object_location = breadcrumb.active_location
if object_location != "{}".format(name):
return False
return True
def apply_namespaces(self, namespaces, force_clear_all=False):
"""
Apply supplied namespaces in to UI and assert with supplied and applied namespaces
Parameters
----------
namespaces : list
A list for namespace names .
force_clear_all : boolean
Default False.
If this value is True, all existing applied namespaces will be removed.
"""
logger.debug('Setting namespace filter: {}'.format(namespaces))
_pre_filters = []
# clear all filters
if force_clear_all:
self.page.namespace.clear_all()
assert len(self.page.namespace.checked_items) == 0
else:
_pre_filters.extend(self.page.namespace.checked_items)
if not namespaces:
self.page.namespace.select_all()
else:
# apply namespaces
for _filter in namespaces:
if _filter not in _pre_filters:
self.page.namespace.check(_filter)
if _filter in _pre_filters:
_pre_filters.remove(_filter)
# remove filters not in list
for _filter in _pre_filters:
self.page.namespace.uncheck(_filter)
self.assert_applied_namespaces(namespaces)
def apply_filters(self, filters, force_clear_all=True):
"""
Apply supplied filter in to UI and assert with supplied and applied filters
Parameters
----------
filters : list
A list for filter. filter should be a dict.
filter = {'name': 'Health', 'value': 'Healthy'}
Take filter name from pre defined enum
force_clear_all : boolean
Default False.
If this value is True, all existing applied filters will be removed.
otherwise, will be adjusted with pre filter.
on both case final outcome will be same.
"""
logger.debug('Setting filters: {}'.format(filters))
_pre_filters = []
# clear all filters
if force_clear_all:
self.page.filter.clear_all()
assert len(self.page.filter.active_filters) == 0
else:
_pre_filters.extend(self.page.filter.active_filters)
# apply filter
for _filter in filters:
if _filter not in _pre_filters:
self.page.filter.apply(filter_name=_filter['name'], value=_filter['value'])
if _filter in _pre_filters:
_pre_filters.remove(_filter)
# remove filters not in list
for _filter in _pre_filters:
self.page.filter.remove(filter_name=_filter['name'], value=_filter['value'])
self.assert_applied_filters(filters)
self.browser.send_keys_to_focused_element(Keys.ESCAPE)
sleep(0.2)
def apply_label_operation(self, label_operation):
assert self.page.filter._label_operation.is_displayed, 'Label Operation is not displayed'
self.page.filter._label_operation.select(label_operation)
def assert_filter_options(self):
# test available options
options_defined = [item.text for item in self.FILTER_ENUM]
options_listed = self.page.filter.filters
logger.debug('Options[defined:{}, defined:{}]'.format(options_defined, options_listed))
assert is_equal(options_defined, options_listed), \
'Defined: {} Listed: {}'.format(options_defined, options_listed)
def assert_applied_filters(self, filters):
# validate applied filters
wait_to_spinner_disappear(self.browser)
_active_filters = self.page.filter.active_filters
logger.debug('Filters[applied:{}, active:{}]'.format(filters, _active_filters))
assert is_equal(filters, _active_filters), \
'Defined: {} Listed: {}'.format(filters, _active_filters)
def assert_applied_namespaces(self, filters):
# validate applied namespaces
_active_filters = self.page.namespace.checked_items
logger.debug('Filters[applied:{}, active:{}]'.format(filters, _active_filters))
assert is_equal(filters, _active_filters), \
'Defined: {} Listed: {}'.format(filters, _active_filters)
def assert_namespaces(self):
namespaces_ui = self._namespaces_ui()
namespaces_rest = self.kiali_client.namespace_list()
namespaces_oc = self.openshift_client.namespace_list()
logger.debug('Namespaces UI:{}'.format(namespaces_ui))
logger.debug('Namespaces REST:{}'.format(namespaces_rest))
logger.debug('Namespaces OC:{}'.format(namespaces_oc))
assert is_equal(namespaces_ui, namespaces_rest)
assert is_sublist(namespaces_rest, namespaces_oc)
def assert_filter_feature_random(self):
# clear filters if any
# TODO: do we need to fail the test if we have filter defined before test?
logger.debug('Filters before test:{}'.format(self.page.filter.active_filters))
self.page.filter.clear_all()
# get namespaces
namespaces_ui = self._namespaces_ui()
# apply a namespace filter
# generate random filters list
_defined_filters = []
# random namespace filters
assert len(namespaces_ui) > 0
if len(namespaces_ui) > 3:
_random_namespaces = random.sample(namespaces_ui, 3)
else:
_random_namespaces = namespaces_ui
# add additional filters
logger.debug('Adding additional filters')
_defined_filters.extend(self.get_additional_filters(_random_namespaces, _defined_filters))
logger.debug('Defined filters with additional filters:{}'.format(_defined_filters))
# apply filters test
_applied_filters = []
for _defined_filter in _defined_filters:
# add it in to applied list
_applied_filters.append(_defined_filter)
# apply filter and check the contents
self.assert_all_items(namespaces=_random_namespaces,
filters=_applied_filters,
force_clear_all=False)
# remove filters test
for _defined_filter in _defined_filters:
# remove it from our list
_applied_filters.remove(_defined_filter)
# apply filter and check the contents
self.assert_all_items(namespaces=_random_namespaces,
filters=_applied_filters,
force_clear_all=False)
# test remove all
if len(_applied_filters) == 2:
self.assert_all_items(namespaces=[], filters=[], force_clear_all=True)
break
def sort(self, sort_options=[]):
"""
Sorts the listed items.
Parameters
----------
sort_options : array of 2 values
option: SortEnum item, the sorting option to select
is_ascending: boolean, sort ascending or descending
"""
logger.debug('Sorting by: {}'.format(sort_options))
if len(sort_options) == 2:
self.page.sort.select(sort_options[0], sort_options[1])
def assert_sort_options(self):
# test available options
options_defined = [item.text for item in self.SORT_ENUM]
options_listed = self.page.sort.options
logger.debug('Options[defined:{}, defined:{}]'.format(options_defined, options_listed))
assert is_equal(options_defined, options_listed), \
'Defined: {} Listed: {}'.format(options_defined, options_listed)
def assert_metrics_options(self, metrics_page, check_grafana=False):
metrics_page.open()
self._assert_metrics_settings(metrics_page)
self._assert_metrics_destination(metrics_page)
self._assert_metrics_duration(metrics_page)
self._assert_metrics_interval(metrics_page)
if check_grafana:
self._assert_grafana_link(metrics_page)
def _assert_metrics_settings(self, metrics_page):
# test available filters
options_defined = [item.text for item in (
InboundMetricsFilter if "Inbound" in metrics_page.tab_name
else OutboundMetricsFilter)]
for item in MetricsHistograms:
options_defined.append(item.text)
options_listed = metrics_page.filter.items
logger.debug('Filter options[defined:{}, listed:{}]'
.format(options_defined, options_listed))
assert is_sublist(options_defined, options_listed), \
('Filter Options mismatch: defined:{}, listed:{}'
.format(options_defined, options_listed))
# enable disable each filter, use defined options
for filter_name in options_defined:
self._filter_test(metrics_page, filter_name)
def _filter_test(self, page, filter_name, uncheck=True):
# TODO 'Quantile 0.nnn' item's text is 2 lines
if "Quantile" in str(filter_name):
return
# test filter checked
page.filter.check(filter_name)
assert page.filter.is_checked(filter_name) is True
if uncheck:
# test filter unchecked
page.filter.uncheck(filter_name)
assert page.filter.is_checked(filter_name) is False
def _assert_metrics_destination(self, metrics_page):
self._assert_metrics_options(metrics_page, MetricsSource, 'destination')
def _assert_metrics_duration(self, metrics_page):
self._assert_metrics_options(metrics_page, MetricsTimeInterval, 'duration')
def _assert_metrics_interval(self, metrics_page):
self._assert_metrics_options(metrics_page, GraphRefreshInterval, 'interval')
def _assert_metrics_options(self, metrics_page, enum, attr_name):
options_defined = [item.text for item in enum]
attr = getattr(metrics_page, attr_name)
options_listed = attr.options
logger.debug('Options[defined:{}, listed:{}]'.format(options_defined, options_listed))
assert is_equal(options_defined, options_listed), \
('Options mismatch: defined:{}, listed:{}'.format(options_defined, options_listed))
def _assert_grafana_link(self, metrics_page):
_response = self.kiali_client.get_response('getStatus')
_products = _response['externalServices']
assert metrics_page.view_in_grafana
assert get_url(_products, 'Grafana') in metrics_page.view_in_grafana
def is_host_link(self, link_name):
self.browser.click(self.browser.element(locator=self.page.content.CONFIG_TAB_OVERVIEW,
parent=self.page.content.CONFIG_TABS_PARENT))
return len(self.browser.elements(
'.//div[@id="subsets"]//a[contains(text(), "{}")]'.format(
link_name),
parent=self.page.content.locator)) > 0
def assert_breadcrumb_menu(self, name, namespace):
breadcrumb = self.load_details_page(name, namespace, force_refresh=False, load_only=True)
menu_location = breadcrumb.locations[0]
assert menu_location == self.page.PAGE_MENU
breadcrumb.click_location(menu_location)
self.assert_applied_namespaces(filters=[namespace])
def assert_breadcrumb_namespace(self, name, namespace):
breadcrumb = self.load_details_page(name, namespace, force_refresh=False, load_only=True)
namespace_location = breadcrumb.locations[1]
assert namespace_location == "Namespace: " + namespace
breadcrumb.click_location(namespace_location)
self.assert_applied_namespaces(filters=[namespace])
def assert_breadcrumb_object(self, name, namespace):
breadcrumb = self.load_details_page(name, namespace, force_refresh=False, load_only=True)
object_location = breadcrumb.active_location
assert object_location == "{}".format(name)
def assert_traces_tab(self, traces_tab):
traces_tab.open()
self.assert_traces_tab_content(traces_tab)
def assert_traces_tab_content(self, traces_tab):
assert not traces_tab.traces.is_oc_login_displayed, "OC Login should not be displayed"
if not traces_tab.traces.has_no_results:
assert traces_tab.traces.has_results
def assert_logs_tab(self, logs_tab, all_pods=[]):
_filter = "GET"
logs_tab.open()
if len(all_pods) == 0:
assert 'No logs for Workload' in \
self.browser.text(locator='//h5[contains(@class, "pf-c-title")]', parent=self)
return
assert is_equal(all_pods, logs_tab.pods.options)
assert is_equal([item.text for item in TailLines],
logs_tab.tail_lines.options)
_interval_options = [item.text for item in TimeIntervalUIText]
_interval_options.append('Custom')
assert is_equal(_interval_options,
logs_tab.duration.options)
assert is_equal([item.text for item in GraphRefreshInterval],
logs_tab.interval.options)
logs_tab.log_hide.fill(_filter)
self.browser.click(logs_tab.refresh)
wait_to_spinner_disappear(self.browser)
assert _filter not in logs_tab.logs_textarea.text
def assert_traffic(self, name, traffic_tab, self_object_type, traffic_object_type):
bound_traffic = traffic_tab.traffic_items()
for bound_item in bound_traffic:
if bound_item.object_type == traffic_object_type:
# skip istio traffic
if "istio" in bound_item.name:
continue
outbound_traffic = traffic_tab.click_on(
object_type=traffic_object_type, name=bound_item.name)
found = False
for outbound_item in outbound_traffic:
if (outbound_item.name == name
and outbound_item.object_type == self_object_type
and outbound_item.request_type == bound_item.request_type
and outbound_item.bound_traffic_type != bound_item.bound_traffic_type):
found = True
assert bound_item.status == outbound_item.status, \
"Inbound Status {} is not equal to Outbound Status {} for {}".format(
bound_item.status, outbound_item.status, name)
assert math.isclose(bound_item.rps, outbound_item.rps, abs_tol=2.0), \
"Inbound RPS {} is not equal to Outbound RPS {} for {}".format(
bound_item.rps,
outbound_item.rps,
name)
assert math.isclose(bound_item.success_rate,
outbound_item.success_rate,
abs_tol=2.0), \
"Inbound Rate {} is not equal to Outbound Rate {} for {}".format(
bound_item.success_rate, outbound_item.success_rate, name)
if not found:
assert found, "{} {} {} not found in {}".format(name,
self_object_type,
bound_item.request_type,
outbound_traffic)
# check only the first item
break
def assert_graph_overview(self, name, namespace):
logger.debug('Asserting Graph Overview for: {}, in namespace: {}'.format(
name, namespace))
self.load_details_page(name, namespace, force_refresh=False, load_only=True)
self.page.content.graph_menu.select('Show full graph')
graph_page = GraphPage(self.browser)
side_panel = graph_page.side_panel
assert not side_panel.get_namespace()
if self.page.PAGE_MENU == MENU.APPLICATIONS.text:
assert not side_panel.get_workload()
assert side_panel.get_service()
if side_panel.get_application():
assert name == side_panel.get_application()
elif self.page.PAGE_MENU == MENU.WORKLOADS.text:
if side_panel.get_workload():
assert name == side_panel.get_workload()
assert side_panel.get_service()
assert side_panel.get_application()
elif self.page.PAGE_MENU == MENU.SERVICES.text:
assert not side_panel.get_workload()
if side_panel.get_service():
assert name == side_panel.get_service()
assert side_panel.get_application()
else:
assert False, "Graph Overview Page is not recognized"
assert side_panel.show_traffic()
assert side_panel.show_traces()
# assert this on the end of tests
_traces_tab = side_panel.go_to_traces()
assert _traces_tab
self.assert_traces_tab_content(_traces_tab)
self.browser.execute_script("history.back();")
def assert_istio_configs(self, object_ui, object_rest, object_oc, namespace):
assert len(object_rest.istio_configs) == len(object_ui.istio_configs), \
'UI configs should be equal to REST configs items'
assert len(object_rest.istio_configs) == len(object_oc.istio_configs), \
'REST configs should be equal to OC configs items'
for istio_config_ui in object_ui.istio_configs:
found = False
for istio_config_rest in object_rest.istio_configs:
if istio_config_ui.name == istio_config_rest.name and \
istio_config_ui.type == istio_config_rest.object_type and \
istio_config_ui.status == istio_config_rest.validation:
found = True
break
if not found:
assert found, 'Config {} not found in REST {}'.format(istio_config_ui,
istio_config_rest)
found = False
for istio_config_oc in object_oc.istio_configs:
if istio_config_ui.name == istio_config_oc.name and \
istio_config_ui.type == istio_config_oc.object_type and \
namespace == istio_config_oc.namespace:
found = True
break
if not found:
assert found, 'Config {} not found in OC {}'.format(istio_config_ui,
istio_config_oc)
config_overview_ui = self.page.content.card_view_istio_config.get_overview(
istio_config_ui.name,
istio_config_ui.type)
config_details_oc = self.openshift_client.istio_config_details(
namespace=namespace,
object_name=istio_config_ui.name,
object_type=istio_config_ui.type)
assert istio_config_ui.status == config_overview_ui.status, \
'UI Status {} not equal to Overview Status {}'.format(
istio_config_ui.status,
config_overview_ui.status)
assert istio_config_ui.status == istio_config_rest.validation, \
'UI Status {} not equal to REST Status {}'.format(
istio_config_ui.status,
istio_config_rest.status)
if istio_config_ui.type == IstioConfigObjectType.PEER_AUTHENTICATION.text:
assert '\'app\': \'{}\''.format(re.sub(
APP_NAME_REGEX,
'',
object_ui.name)) in config_overview_ui.text
elif istio_config_ui.type == IstioConfigObjectType.VIRTUAL_SERVICE.text:
for _host in config_overview_ui.hosts:
assert '\'host\': \'{}\''.format(_host) in config_details_oc.text
for _gateway in config_overview_ui.gateways:
assert _gateway.text in config_details_oc.text
else:
assert '\'host\': \'{}\''.format(config_overview_ui.host) in config_details_oc.text
for _rest_dr in object_rest.destination_rules:
if _rest_dr.name == istio_config_ui.name:
for _ui_subset in config_overview_ui.subsets:
found = False
for _rest_subset in _rest_dr.subsets:
if _ui_subset.name == _rest_subset.name and \
dict_contains(_ui_subset.labels, _rest_subset.labels) and \
_ui_subset.traffic_policy == _rest_subset.traffic_policy:
found = True
assert found, 'Subset {} not fund in REST {}'.format(
_ui_subset, _rest_subset)
class OverviewPageTest(AbstractListPageTest):
FILTER_ENUM = OverviewPageFilter
TYPE_ENUM = OverviewPageType
SORT_ENUM = OverviewPageSort
VIEW_ENUM = OverviewViewType
GRAPH_LINK_TYPES = {TYPE_ENUM.APPS: OverviewGraphTypeLink.APP,
TYPE_ENUM.SERVICES: OverviewGraphTypeLink.SERVICE,
TYPE_ENUM.WORKLOADS: OverviewGraphTypeLink.WORKLOAD}
def _namespaces_ui(self):
return self.page.filter.filter_options(filter_name=self.FILTER_ENUM.NAME.text)
def __init__(self, kiali_client, openshift_client, browser):
AbstractListPageTest.__init__(
self, kiali_client=kiali_client,
openshift_client=openshift_client, page=OverviewPage(browser))
self.browser = browser
def assert_type_options(self):
# test available type options
options_defined = [item.text for item in self.TYPE_ENUM]
options_listed = self.page.type.options
logger.debug('Options[defined:{}, defined:{}]'.format(options_defined, options_listed))
assert is_equal(options_defined, options_listed)
def assert_all_items(self, filters=[],
overview_type=TYPE_ENUM.APPS, force_clear_all=True,
list_type=VIEW_ENUM.COMPACT,
force_refresh=False):
# apply overview type
self.page.type.select(overview_type.text)
# apply filters
self.apply_filters(filters=filters, force_clear_all=force_clear_all)
if force_refresh:
self.page.page_refresh()
# get overviews from rest api
_ns = self.FILTER_ENUM.NAME.text
_namespaces = [_f['value'] for _f in filters if _f['name'] == _ns]
logger.debug('Namespaces:{}'.format(_namespaces))
overviews_rest = self._apply_overview_filters(self.kiali_client.overview_list(
namespaces=_namespaces,
overview_type=overview_type),
filters)
# get overviews from ui
if list_type == self.VIEW_ENUM.LIST:
overviews_ui = self.page.content.list_items
elif list_type == self.VIEW_ENUM.EXPAND:
overviews_ui = self.page.content.expand_items
else:
overviews_ui = self.page.content.compact_items
# compare all results
logger.debug('Namespaces:{}'.format(_namespaces))
logger.debug('Items count[UI:{}, REST:{}]'.format(
len(overviews_ui), len(overviews_rest)))
logger.debug('overviews UI:{}'.format(overviews_ui))
logger.debug('overviews REST:{}'.format(overviews_rest))
assert len(overviews_ui) == len(overviews_rest)
for overview_ui in overviews_ui:
found = False
for overview_rest in overviews_rest:
if overview_ui.is_equal(overview_rest, advanced_check=True):
found = True
assert (overview_ui.healthy +
overview_ui.unhealthy +
overview_ui.degraded +
overview_ui.na +
overview_ui.idle) == \
(overview_rest.healthy +
overview_rest.unhealthy +
overview_rest.degraded +
overview_rest.na +
overview_rest.idle)
break
if not found:
assert found, '{} not found in REST {}'.format(overview_ui, overviews_rest)
self._assert_overview_config_status(overview_ui.namespace, overview_ui.config_status)
assert self.kiali_client.namespace_labels(overview_ui.namespace) == \
self.openshift_client.namespace_labels(
overview_ui.namespace)
def _apply_overview_filters(self, overviews=[], filters=[],
skip_health=False,
skip_mtls=False):
_ol = self.FILTER_ENUM.LABEL.text
_labels = [_f['value'] for _f in filters if _f['name'] == _ol]
logger.debug('Namespace Labels:{}'.format(_labels))
_omtls = self.FILTER_ENUM.MTLS_STATUS.text
_mtls_filters = [_f['value'] for _f in filters if _f['name'] == _omtls]
logger.debug('mTls Status:{}'.format(_mtls_filters))
_oh = self.FILTER_ENUM.HEALTH.text
_healths = [_f['value'] for _f in filters if _f['name'] == _oh]
logger.debug('Health:{}'.format(_healths))
items = overviews
# filter by labels
if len(_labels) > 0:
filtered_list = []
filtered_list.extend(
[_i for _i in items if dict_contains(
_i.labels, _labels)])
items = set(filtered_list)
# filter by mtls
if len(_mtls_filters) > 0 and not skip_mtls:
filtered_list = []
for _mtls in _mtls_filters:
filtered_list.extend([_i for _i in items if
self._tls_equals(_mtls, _i.tls_type)])
items = set(filtered_list)
# filter by health
if len(_healths) > 0 and not skip_health:
filtered_list = []
for _health in _healths:
filtered_list.extend([_i for _i in items if self._health_equals(_health, _i)])
items = set(filtered_list)
return items
def _tls_equals(self, tls_filter, overview_tls):
if tls_filter == OverviewMTSLStatus.ENABLED.text:
return overview_tls == MeshWideTLSType.ENABLED
elif tls_filter == OverviewMTSLStatus.DISABLED.text:
return overview_tls == MeshWideTLSType.DISABLED
else:
return overview_tls == MeshWideTLSType.PARTLY_ENABLED
def _health_equals(self, health_filter, overview_item):
if health_filter == OverviewHealth.HEALTHY.text:
return overview_item.degraded == 0 and overview_item.unhealthy == 0 \
and overview_item.healthy > 0
elif health_filter == OverviewHealth.DEGRADED.text:
return overview_item.degraded > 0 and overview_item.unhealthy == 0
else:
return overview_item.degraded == 0 and overview_item.unhealthy > 0
def test_disable_enable_delete_auto_injection(self, namespace):
# load the page first
self.page.load(force_load=True)
self.apply_filters(filters=[{"name": OverviewPageFilter.NAME.text, "value": namespace}],
force_clear_all=True)
self.page.page_refresh()
overviews_ui = self.page.content.list_items
assert len(overviews_ui) == 1
overview_ui = overviews_ui[0]
assert overview_ui.namespace == namespace
if self.page.content.overview_action_present(namespace,
OverviewInjectionLinks.
ENABLE_AUTO_INJECTION.text):
self.page.content.select_action(
namespace,
OverviewInjectionLinks.ENABLE_AUTO_INJECTION.text)
self.page.page_refresh()
overviews_ui = self.page.content.list_items
overview_ui = overviews_ui[0]
assert 'istio-injection' in overview_ui.labels and \
overview_ui.labels['istio-injection'] == 'enabled', \
'istio-injection should be enabled in {}'.format(overview_ui.labels)
assert not self.page.content.overview_action_present(
namespace,
OverviewInjectionLinks.ENABLE_AUTO_INJECTION.text)
assert self.page.content.overview_action_present(
namespace,
OverviewInjectionLinks.DISABLE_AUTO_INJECTION.text)
assert self.page.content.overview_action_present(
namespace,
OverviewInjectionLinks.REMOVE_AUTO_INJECTION.text)
elif self.page.content.overview_action_present(namespace,
OverviewInjectionLinks.
DISABLE_AUTO_INJECTION.text):
self.page.content.select_action(
namespace,
OverviewInjectionLinks.DISABLE_AUTO_INJECTION.text)
self.page.page_refresh()
overviews_ui = self.page.content.list_items
overview_ui = overviews_ui[0]
assert 'istio-injection' in overview_ui.labels and \
overview_ui.labels['istio-injection'] == 'disabled', \
'istio-injection should be disabled in {}'.format(overview_ui.labels)
assert self.page.content.overview_action_present(
namespace,
OverviewInjectionLinks.ENABLE_AUTO_INJECTION.text)
assert not self.page.content.overview_action_present(
namespace,
OverviewInjectionLinks.DISABLE_AUTO_INJECTION.text)
assert self.page.content.overview_action_present(
namespace,
OverviewInjectionLinks.REMOVE_AUTO_INJECTION.text)
self.page.page_refresh()
self.page.content.select_action(
namespace,
OverviewInjectionLinks.ENABLE_AUTO_INJECTION.text)
elif self.page.content.overview_action_present(namespace,
OverviewInjectionLinks.
REMOVE_AUTO_INJECTION.text):
self.page.content.select_action(
namespace,
OverviewInjectionLinks.REMOVE_AUTO_INJECTION.text)
self.page.page_refresh()
overviews_ui = self.page.content.list_items
overview_ui = overviews_ui[0]
assert 'istio-injection' not in overview_ui.labels, \
'istio-injection should not be in {}'.format(overview_ui.labels)
assert self.page.content.overview_action_present(
namespace,
OverviewInjectionLinks.ENABLE_AUTO_INJECTION.text)
assert not self.page.content.overview_action_present(
namespace,
OverviewInjectionLinks.DISABLE_AUTO_INJECTION.text)
assert not self.page.content.overview_action_present(
namespace,
OverviewInjectionLinks.REMOVE_AUTO_INJECTION.text)
self.page.page_refresh()
self.page.content.select_action(
namespace,
OverviewInjectionLinks.ENABLE_AUTO_INJECTION.text)
def test_create_update_delete_traffic_policies(self, namespace):
# load the page first
self.page.load(force_load=True)
self.apply_filters(filters=[{"name": OverviewPageFilter.NAME.text, "value": namespace}],
force_clear_all=True)
if self.page.content.overview_action_present(namespace,
OverviewTrafficLinks.
DELETE_TRAFFIC_POLICIES.text):
self.page.page_refresh()
wait_to_spinner_disappear(self.browser)
if self.page.content.select_action(
namespace, OverviewTrafficLinks.DELETE_TRAFFIC_POLICIES.text):
wait_to_spinner_disappear(self.browser)
self.browser.wait_for_element(
parent=ListViewAbstract.DIALOG_ROOT,
locator=('.//button[text()="Delete"]'))
self.browser.click(self.browser.element(
parent=ListViewAbstract.DIALOG_ROOT,
locator=('.//button[text()="Delete"]')))
wait_to_spinner_disappear(self.browser)
self.page.page_refresh()
wait_to_spinner_disappear(self.browser)
self.page.content.list_items
assert not self.page.content.overview_action_present(
namespace,
OverviewTrafficLinks.DELETE_TRAFFIC_POLICIES.text)
assert not self.page.content.overview_action_present(
namespace,
OverviewTrafficLinks.UPDATE_TRAFFIC_POLICIES.text)
assert self.page.content.overview_action_present(
namespace,
OverviewTrafficLinks.CREATE_TRAFFIC_POLICIES.text)
elif self.page.content.overview_action_present(namespace,
OverviewTrafficLinks.
CREATE_TRAFFIC_POLICIES.text):
assert self.page.content.select_action(
namespace, OverviewTrafficLinks.CREATE_TRAFFIC_POLICIES.text)
wait_to_spinner_disappear(self.browser)
self.page.page_refresh()
wait_to_spinner_disappear(self.browser)
self.page.content.list_items
assert self.page.content.overview_action_present(
namespace,
OverviewTrafficLinks.DELETE_TRAFFIC_POLICIES.text)
assert self.page.content.overview_action_present(
namespace,
OverviewTrafficLinks.UPDATE_TRAFFIC_POLICIES.text)
assert not self.page.content.overview_action_present(
namespace,
OverviewTrafficLinks.CREATE_TRAFFIC_POLICIES.text)
elif self.page.content.overview_action_present(namespace,
OverviewTrafficLinks.
UPDATE_TRAFFIC_POLICIES.text):
assert self.page.content.select_action(
namespace, OverviewTrafficLinks.UPDATE_TRAFFIC_POLICIES.text)
wait_to_spinner_disappear(self.browser)
self.browser.wait_for_element(
parent=ListViewAbstract.DIALOG_ROOT,
locator=('.//button[text()="Update"]'))
self.browser.click(self.browser.element(
parent=ListViewAbstract.DIALOG_ROOT,
locator=('.//button[text()="Update"]')))
wait_to_spinner_disappear(self.browser)
self.page.page_refresh()
wait_to_spinner_disappear(self.browser)
self.page.content.list_items
assert self.page.content.overview_action_present(
namespace,
OverviewTrafficLinks.DELETE_TRAFFIC_POLICIES.text)
assert self.page.content.overview_action_present(
namespace,
OverviewTrafficLinks.UPDATE_TRAFFIC_POLICIES.text)
assert not self.page.content.overview_action_present(
namespace,
OverviewTrafficLinks.CREATE_TRAFFIC_POLICIES.text)
def _assert_overview_config_status(self, namespace, config_status):
expected_status = IstioConfigValidation.NA
# get configs from rest api
config_list_rest = self.kiali_client.istio_config_list(
namespaces=[namespace])
for config_rest in config_list_rest:
if hasattr(config_rest, 'validation'):
if config_rest.validation == IstioConfigValidation.NOT_VALID:
expected_status = IstioConfigValidation.NOT_VALID
elif config_rest.validation == IstioConfigValidation.WARNING:
if expected_status != IstioConfigValidation.NOT_VALID:
expected_status = IstioConfigValidation.WARNING
elif config_rest.validation == IstioConfigValidation.VALID:
if expected_status == IstioConfigValidation.NA:
expected_status = IstioConfigValidation.VALID
assert expected_status == config_status.validation, \
'Expected {} but got {} for {} as Config Status'.format(
expected_status, config_status.validation, namespace)
if config_status.validation != IstioConfigValidation.NA:
assert '/console/istio?namespaces={}'.format(
namespace) in \
config_status.link, 'Wrong config overview link {}'.format(
config_status.link)
class ApplicationsPageTest(AbstractListPageTest):
FILTER_ENUM = ApplicationsPageFilter
SORT_ENUM = ApplicationsPageSort
def __init__(self, kiali_client, openshift_client, browser):
AbstractListPageTest.__init__(
self, kiali_client=kiali_client,
openshift_client=openshift_client, page=ApplicationsPage(browser))
self.browser = browser
def _prepare_load_details_page(self, name, namespace):
# load the page first
self.page.load(force_load=True)
# apply namespace
self.apply_namespaces(namespaces=[namespace])
# apply filters
self.apply_filters(filters=[
{'name': ApplicationsPageFilter.APP_NAME.text, 'value': name}])
def load_details_page(self, name, namespace, force_refresh=False, load_only=False):
logger.debug('Loading details page for application: {}'.format(name))
if not self.is_in_details_page(name, namespace):
self._prepare_load_details_page(name, namespace)
self.open(name, namespace, force_refresh)
self.browser.wait_for_element(locator='//*[contains(., "Application")]')
return self.page.content.get_details(load_only)
def assert_random_details(self, namespaces=[], filters=[], force_refresh=False):
# get applications from rest api
_sn = self.FILTER_ENUM.APP_NAME.text
_application_names = [_f['value'] for _f in filters if _f['name'] == _sn]
logger.debug('Namespaces:{}, Application names:{}'.format(namespaces, _application_names))
applications_rest = self._apply_app_filters(self.kiali_client.application_list(
namespaces=namespaces), filters=filters)
# random applications filters
assert len(applications_rest) > 0
if len(applications_rest) > 3:
_random_applications = random.sample(applications_rest, 3)
else:
_random_applications = applications_rest
# create filters
for _idx, _selected_application in enumerate(_random_applications):
self.assert_details(
_selected_application.name,
_selected_application.namespace,
check_metrics=True if _idx == 0 else False,
force_refresh=force_refresh)
def assert_details(self, name, namespace, check_metrics=False, force_refresh=False):
logger.debug('Asserting details for: {}, in namespace: {}'.format(name, namespace))
# load application details page
application_details_ui = self.load_details_page(name, namespace, force_refresh)
assert application_details_ui
assert name == application_details_ui.name
# get application detals from rest
application_details_rest = self.kiali_client.application_details(
namespace=namespace,
application_name=name)
assert application_details_rest
assert name == application_details_rest.name
application_details_oc = self.openshift_client.application_details(
namespace=namespace,
application_name=name)
assert application_details_oc
assert application_details_ui.is_equal(application_details_rest,
advanced_check=True), \
'Application UI {} not equal to REST {}'\
.format(application_details_ui, application_details_rest)
'''TODO read health tooltip values
if application_details_ui.application_status:
assert application_details_ui.application_status.is_healthy() == \
application_details_ui.health, \
"Application Details Status {} is not equal to UI Health {} for {}"\
.format(
application_details_ui.application_status.is_healthy(),
application_details_ui.health,
application_details_ui.name)
if application_details_oc.application_status:
assert is_equal(application_details_ui.application_status.deployment_statuses,
application_details_oc.application_status.deployment_statuses), \
"Application REST Status {} is not equal to OC {} for {}"\
.format(
application_details_ui.application_status.deployment_statuses,
application_details_oc.application_status.deployment_statuses,
application_details_ui.name)'''
assert is_equal([_w.name for _w in application_details_ui.workloads],
[_w.name for _w in application_details_rest.workloads])
assert is_equal([_w.name for _w in application_details_oc.workloads],
[_w.name for _w in application_details_rest.workloads])
for workload_ui in application_details_ui.workloads:
found = False
for workload_rest in application_details_rest.workloads:
if workload_ui.is_equal(workload_rest,
advanced_check=True):
found = True
break
if not found:
assert found, 'Workload {} not found in REST {}'.format(workload_ui, workload_rest)
found = False
for workload_oc in application_details_oc.workloads:
if workload_ui.is_equal(workload_oc,
advanced_check=False):
found = True
break
if not found:
assert found, 'Workload {} not found in OC {}'.format(workload_ui, workload_oc)
assert application_details_ui.services == application_details_rest.services, \
'UI services {} not equal to REST {}'.format(
application_details_ui.services,
application_details_rest.services)
assert is_equal(application_details_ui.services, application_details_oc.services), \
'UI services {} not equal to OC {}'.format(
application_details_ui.services,
application_details_oc.services)
if check_metrics:
self.assert_metrics_options(application_details_ui.inbound_metrics)
self.assert_metrics_options(application_details_ui.outbound_metrics)
self.assert_traces_tab(application_details_ui.traces_tab)
self.assert_traffic(name, application_details_ui.traffic_tab,
self_object_type=TrafficType.APP, traffic_object_type=TrafficType.APP)
def assert_all_items(self, namespaces=[], filters=[], sort_options=[], force_clear_all=True,
label_operation=None):
# apply namespaces
self.apply_namespaces(namespaces, force_clear_all=force_clear_all)
# apply filters
self.apply_filters(filters=filters, force_clear_all=force_clear_all)
# apply sorting
self.sort(sort_options)
if label_operation:
self.apply_label_operation(label_operation)
logger.debug('Namespaces:{}'.format(namespaces))
# get applications from ui
applications_ui = self.page.content.all_items
# get from REST
applications_rest = self._apply_app_filters(self.kiali_client.application_list(
namespaces=namespaces),
filters,
label_operation)
# get from OC
applications_oc = self._apply_app_filters(self.openshift_client.application_list(
namespaces=namespaces),
filters,
label_operation,
True,
True)
# compare all results
logger.debug('Namespaces:{}'.format(namespaces))
logger.debug('Items count[UI:{}, REST:{}]'.format(
len(applications_ui), len(applications_rest)))
logger.debug('Applications UI:{}'.format(applications_ui))
logger.debug('Applications REST:{}'.format(applications_rest))
logger.debug('Applications OC:{}'.format(applications_oc))
assert len(applications_ui) == len(applications_rest), \
"UI {} and REST {} applications number not equal".format(applications_ui,
applications_rest)
assert len(applications_rest) <= len(applications_oc)
for application_ui in applications_ui:
found = False
for application_rest in applications_rest:
if application_ui.is_equal(application_rest, advanced_check=True):
found = True
if application_ui.application_status:
assert application_ui.application_status.is_healthy() == \
application_ui.health, \
"Application Tooltip Health {} is not equal to UI Health {} for {}"\
.format(
application_ui.application_status.is_healthy(),
application_ui.health,
application_ui.name)
break
if not found:
assert found, '{} not found in REST'.format(application_ui)
found = False
for application_oc in applications_oc:
logger.debug('{} {}'.format(application_oc.name, application_oc.namespace))
if application_ui.is_equal(application_oc, advanced_check=False):
# in OC it contains more labels, skip for jaeger and grafana
if application_ui.name != 'jaeger' and application_ui.name != 'grafana':
assert application_ui.labels.items() == application_oc.labels.items(), \
'Expected {} but got {} labels for application {}'.format(
application_oc.labels,
application_ui.labels,
application_ui.name)
found = True
if application_oc.application_status and \
application_oc.application_status.deployment_statuses:
assert is_equal(application_rest.application_status.deployment_statuses,
application_oc.application_status.deployment_statuses), \
"Application REST Status {} is not equal to OC {} for {}"\
.format(
application_rest.application_status.deployment_statuses,
application_oc.application_status.deployment_statuses,
application_ui.name)
break
if not found:
assert found, '{} not found in OC'.format(application_ui)
def _apply_app_filters(self, applications=[], filters=[], label_operation=None,
skip_health=False, skip_sidecar=False):
_an = self.FILTER_ENUM.APP_NAME.text
_application_names = [_f['value'] for _f in filters if _f['name'] == _an]
logger.debug('Application names:{}'.format(_application_names))
_al = self.FILTER_ENUM.LABEL.text
_labels = [_f['value'] for _f in filters if _f['name'] == _al]
logger.debug('Application Labels:{}'.format(_labels))
_ais = self.FILTER_ENUM.ISTIO_SIDECAR.text
_sidecars = [_f['value'] for _f in filters if _f['name'] == _ais]
logger.debug('Istio Sidecars:{}'.format(_sidecars))
_ah = self.FILTER_ENUM.HEALTH.text
_health = [_f['value'] for _f in filters if _f['name'] == _ah]
logger.debug('Health:{}'.format(_health))
# filter by application name
items = applications
if len(_application_names) > 0:
filtered_list = []
for _name in _application_names:
filtered_list.extend([_i for _i in items if _name in _i.name])
items = set(filtered_list)
# filter by labels
if len(_labels) > 0:
filtered_list = []
filtered_list.extend(
[_i for _i in items if dict_contains(
_i.labels, _labels,
(True if label_operation == LabelOperation.AND.text else False))])
items = set(filtered_list)
# filter by sidecars
if len(_sidecars) > 0 and not skip_sidecar:
filtered_list = []
for _sidecar in _sidecars:
filtered_list.extend([_i for _i in items if
self.sidecar_presents(_sidecar, _i.istio_sidecar)])
items = set(filtered_list)
# filter by health
if len(_health) > 0 and not skip_health:
filtered_list = []
filtered_list.extend([_i for _i in items if self.health_equals(_health[0], _i.health)])
items = set(filtered_list)
return items
class WorkloadsPageTest(AbstractListPageTest):
FILTER_ENUM = WorkloadsPageFilter
SORT_ENUM = WorkloadsPageSort
def __init__(self, kiali_client, openshift_client, browser):
AbstractListPageTest.__init__(
self, kiali_client=kiali_client,
openshift_client=openshift_client, page=WorkloadsPage(browser))
self.browser = browser
def _prepare_load_details_page(self, name, namespace):
# load the page first
self.page.load(force_load=True)
# apply namespace
self.apply_namespaces(namespaces=[namespace])
# apply filters
self.apply_filters(filters=[
{'name': WorkloadsPageFilter.WORKLOAD_NAME.text, 'value': name}])
def load_details_page(self, name, namespace, force_refresh=False, load_only=False):
logger.debug('Loading details page for workload: {}'.format(name))
if not self.is_in_details_page(name, namespace):
self._prepare_load_details_page(name, namespace)
self.open(name, namespace, force_refresh)
self.browser.wait_for_element(locator='//*[contains(., "Workload")]')
return self.page.content.get_details(load_only)
def assert_random_details(self, namespaces=[], filters=[],
force_clear_all=True, force_refresh=False):
# get workloads from rest api
logger.debug('Namespaces:{}'.format(namespaces))
workloads_rest = self._apply_workload_filters(self.kiali_client.workload_list(
namespaces=namespaces), filters)
# random workloads filters
assert len(workloads_rest) > 0
if len(workloads_rest) > 3:
_random_workloads = random.sample(workloads_rest, 3)
else:
_random_workloads = workloads_rest
# create filters
for _idx, _selected_workload in enumerate(_random_workloads):
self.assert_details(_selected_workload.name,
_selected_workload.namespace,
_selected_workload.workload_type,
check_metrics=True if _idx == 0 else False,
force_refresh=force_refresh)
def assert_details(self, name, namespace, workload_type, check_metrics=False,
force_refresh=False):
logger.debug('Asserting details for: {}, in namespace: {}'.format(name, namespace))
# load workload details page
workload_details_ui = self.load_details_page(name, namespace, force_refresh)
assert workload_details_ui
assert name == workload_details_ui.name
# get workload detals from rest
workload_details_rest = self.kiali_client.workload_details(
namespace=namespace,
workload_name=name,
workload_type=workload_type)
assert workload_details_rest
assert name == workload_details_rest.name
# get workload detals from rest
workload_details_oc = self.openshift_client.workload_details(
namespace=namespace,
workload_name=name,
workload_type=workload_type)
assert workload_details_oc
assert name == workload_details_oc.name
assert workload_details_ui.is_equal(workload_details_rest,
advanced_check=True), \
'Workload UI {} not equal to REST {}'\
.format(workload_details_ui, workload_details_rest)
assert workload_details_ui.is_equal(workload_details_oc,
advanced_check=False), \
'Workload UI {} not equal to OC {}'\
.format(workload_details_ui, workload_details_oc)
if workload_details_ui.workload_status:
assert workload_details_ui.workload_status.is_healthy() == \
workload_details_ui.health, \
"Workload Details Status {} is not equal to UI Health {} for {}"\
.format(
workload_details_ui.workload_status.is_healthy(),
workload_details_ui.health,
workload_details_ui.name)
assert is_equal(workload_details_ui.applications,
workload_details_rest.applications)
assert is_equal(workload_details_ui.services,
workload_details_rest.services)
all_pods = []
for pod_ui in workload_details_ui.pods:
all_pods.append(pod_ui.name)
found = False
for pod_rest in workload_details_rest.pods:
if pod_ui.is_equal(pod_rest,
advanced_check=True):
found = True
break
if not found:
assert found, 'Pod {} not found in REST {}'.format(pod_ui, pod_rest)
for pod_oc in workload_details_oc.pods:
found = False
for pod_rest in workload_details_rest.pods:
if pod_oc.is_equal(pod_rest,
advanced_check=False):
found = True
break
if not found:
assert found, 'OC Pod {} not found in REST {}'.format(pod_oc, pod_rest)
for service_ui in workload_details_ui.services:
found = False
for service_rest in workload_details_rest.services:
if service_ui == service_rest:
found = True
break
if not found:
assert found, 'Service {} not found in REST {}'.format(service_ui, service_rest)
self.assert_istio_configs(workload_details_ui,
workload_details_rest,
workload_details_oc,
namespace)
self.assert_logs_tab(workload_details_ui.logs_tab, all_pods)
if check_metrics:
self.assert_metrics_options(workload_details_ui.inbound_metrics, check_grafana=True)
self.assert_metrics_options(workload_details_ui.outbound_metrics, check_grafana=True)
self.assert_traces_tab(workload_details_ui.traces_tab)
self.assert_traffic(name, workload_details_ui.traffic_tab,
self_object_type=TrafficType.WORKLOAD,
traffic_object_type=TrafficType.WORKLOAD)
def assert_all_items(self, namespaces=[], filters=[], sort_options=[], force_clear_all=True,
label_operation=None):
# apply namespaces
self.apply_namespaces(namespaces, force_clear_all=force_clear_all)
# apply filters
self.apply_filters(filters=filters, force_clear_all=force_clear_all)
# apply sorting
self.sort(sort_options)
if label_operation:
self.apply_label_operation(label_operation)
# get workloads from rest api
workloads_rest = self._apply_workload_filters(self.kiali_client.workload_list(
namespaces=namespaces), filters, label_operation)
# get workloads from OC client
workloads_oc = self._apply_workload_filters(self.openshift_client.workload_list(
namespaces=(namespaces if namespaces else self.kiali_client.namespace_list())),
filters, label_operation,
skip_sidecar=True,
skip_health=True)
# get workloads from ui
workloads_ui = self.page.content.all_items
# compare all results
logger.debug('Namespaces:{}'.format(namespaces))
logger.debug('Items count[UI:{}, REST:{}, OC:{}]'.format(
len(workloads_ui), len(workloads_rest), len(workloads_oc)))
logger.debug('Workloads UI:{}'.format(workloads_ui))
logger.debug('Workloads REST:{}'.format(workloads_rest))
logger.debug('Workloads OC:{}'.format(workloads_oc))
assert len(workloads_ui) == len(workloads_rest), \
"UI {} and REST {} workloads number not equal".format(workloads_ui, workloads_rest)
assert len(workloads_rest) <= len(workloads_oc), \
"REST {} should be less or equal OC {}".format(workloads_rest, workloads_oc)
for workload_ui in workloads_ui:
found = False
for workload_rest in workloads_rest:
if workload_ui.is_equal(workload_rest, advanced_check=True):
found = True
if workload_ui.workload_status:
assert workload_ui.workload_status.is_healthy() == workload_ui.health, \
"Workload Tooltip Health {} is not equal to UI Health {} for {}"\
.format(
workload_ui.workload_status.is_healthy(),
workload_ui.health,
workload_ui.name)
break
if not found:
assert found, '{} not found in REST'.format(workload_ui)
found = False
for workload_oc in workloads_oc:
if workload_ui.is_equal(workload_oc, advanced_check=False) and \
workload_ui.labels.items() == workload_oc.labels.items():
found = True
if workload_oc.workload_status:
assert workload_rest.workload_status.workload_status.is_equal(
workload_oc.workload_status.workload_status), \
"Workload REST Status {} is not equal to OC {} for {}"\
.format(
workload_rest.workload_status.workload_status,
workload_oc.workload_status.workload_status,
workload_ui.name)
break
if not found:
assert found, '{} not found in OC'.format(workload_ui)
def _apply_workload_filters(self, workloads=[], filters=[], label_operation=None,
skip_sidecar=False, skip_health=False):
_sn = self.FILTER_ENUM.WORKLOAD_NAME.text
_names = [_f['value'] for _f in filters if _f['name'] == _sn]
logger.debug('Workload names:{}'.format(_names))
_wl = self.FILTER_ENUM.LABEL.text
_labels = [_f['value'] for _f in filters if _f['name'] == _wl]
logger.debug('Workload Labels:{}'.format(_labels))
_wt = self.FILTER_ENUM.WORKLOAD_TYPE.text
_types = [_f['value'] for _f in filters if _f['name'] == _wt]
logger.debug('Workload Types:{}'.format(_types))
_wis = self.FILTER_ENUM.ISTIO_SIDECAR.text
_sidecars = [_f['value'] for _f in filters if _f['name'] == _wis]
logger.debug('Istio Sidecars:{}'.format(_sidecars))
_wh = self.FILTER_ENUM.HEALTH.text
_health = [_f['value'] for _f in filters if _f['name'] == _wh]
logger.debug('Health:{}'.format(_health))
_version_label = None
for _f in filters:
if _f['name'] == self.FILTER_ENUM.VERSION_LABEL.text:
_version_label = _f['value']
break
logger.debug('Version Label:{}'.format(_version_label))
_app_label = None
for _f in filters:
if _f['name'] == self.FILTER_ENUM.APP_LABEL.text:
_app_label = _f['value']
break
logger.debug('App Label:{}'.format(_app_label))
items = workloads
# filter by name
if len(_names) > 0:
filtered_list = []
for _name in _names:
filtered_list.extend([_i for _i in items if _name in _i.name])
items = set(filtered_list)
# filter by labels
if len(_labels) > 0:
filtered_list = []
filtered_list.extend(
[_i for _i in workloads if dict_contains(
_i.labels, _labels,
(True if label_operation == LabelOperation.AND.text else False))])
items = set(filtered_list)
# filter by types
if len(_types) > 0:
filtered_list = []
for _type in _types:
filtered_list.extend([_i for _i in items if _type == _i.workload_type])
items = set(filtered_list)
# filter by sidecars
if len(_sidecars) > 0 and not skip_sidecar:
filtered_list = []
for _sidecar in _sidecars:
filtered_list.extend([_i for _i in items if
self.sidecar_presents(_sidecar, _i.istio_sidecar)])
items = set(filtered_list)
# filter by version label present
if _version_label:
filtered_list = []
filtered_list.extend([_i for _i in items if
(_version_label == VersionLabel.NOT_PRESENT.text)
^ dict_contains(
given_list=['version'], original_dict=_i.labels)])
items = set(filtered_list)
# filter by app label present
if _app_label:
filtered_list = []
filtered_list.extend([_i for _i in items if
(_app_label == AppLabel.NOT_PRESENT.text)
^ dict_contains(
given_list=['app'], original_dict=_i.labels)])
items = set(filtered_list)
# filter by health
if len(_health) > 0 and not skip_health:
filtered_list = []
filtered_list.extend([_i for _i in items if self.health_equals(_health[0], _i.health)])
items = set(filtered_list)
return items
def test_disable_enable_delete_auto_injection(self, name, namespace):
logger.debug('Auto Injection test for Workload: {}, {}'.format(name, namespace))
# load workload details page
self._prepare_load_details_page(name, namespace)
self.open(name, namespace)
if self.page.actions.is_disable_auto_injection_visible():
self.page.actions.select(OverviewInjectionLinks.DISABLE_AUTO_INJECTION.text)
self.page.page_refresh()
assert self.page.content._details_missing_sidecar()
assert self.page.actions.is_enable_auto_injection_visible()
assert self.page.actions.is_remove_auto_injection_visible()
assert not self.page.actions.is_disable_auto_injection_visible()
elif self.page.actions.is_remove_auto_injection_visible():
self.page.actions.select(OverviewInjectionLinks.REMOVE_AUTO_INJECTION.text)
self.page.page_refresh()
assert self.page.content._details_missing_sidecar()
assert self.page.actions.is_enable_auto_injection_visible()
assert not self.page.actions.is_remove_auto_injection_visible()
assert not self.page.actions.is_disable_auto_injection_visible()
elif self.page.actions.is_enable_auto_injection_visible():
self.page.actions.select(OverviewInjectionLinks.ENABLE_AUTO_INJECTION.text)
self.page.page_refresh()
assert self.page.content._details_missing_sidecar()
assert not self.page.actions.is_enable_auto_injection_visible()
assert self.page.actions.is_remove_auto_injection_visible()
assert self.page.actions.is_disable_auto_injection_visible()
class ServicesPageTest(AbstractListPageTest):
FILTER_ENUM = ServicesPageFilter
SORT_ENUM = ServicesPageSort
def __init__(self, kiali_client, openshift_client, browser):
AbstractListPageTest.__init__(
self, kiali_client=kiali_client,
openshift_client=openshift_client, page=ServicesPage(browser))
self.browser = browser
def _prepare_load_details_page(self, name, namespace):
# load the page first
self.page.load(force_load=True)
# apply namespace
self.apply_namespaces(namespaces=[namespace])
# apply filters
self.apply_filters(filters=[
{'name': ServicesPageFilter.SERVICE_NAME.text, 'value': name}])
def load_details_page(self, name, namespace, force_refresh=False, load_only=False):
logger.debug('Loading details page for service: {}'.format(name))
if not self.is_in_details_page(name, namespace):
self._prepare_load_details_page(name, namespace)
self.open(name, namespace, force_refresh)
self.browser.wait_for_element(locator='//*[contains(., "Service")]')
return self.page.content.get_details(load_only)
def assert_random_details(self, namespaces=[], filters=[], force_refresh=False):
# get services from rest api
services_rest = self._apply_service_filters(self.kiali_client.service_list(
namespaces=namespaces), filters=filters)
# random services filters
assert len(services_rest) > 0
if len(services_rest) > 2:
_random_services = random.sample(services_rest, 2)
else:
_random_services = services_rest
# create filters
for _idx, _selected_service in enumerate(_random_services):
self.assert_details(_selected_service.name, _selected_service.namespace,
check_metrics=True if _idx == 0 else False,
force_refresh=force_refresh)
def assert_details(self, name, namespace, check_metrics=False,
force_refresh=False):
logger.debug('Asserting details for: {}, in namespace: {}'.format(name, namespace))
# load service details page
service_details_ui = self.load_details_page(name, namespace, force_refresh)
assert service_details_ui
assert name == service_details_ui.name
# get service details from rest
service_details_rest = self.kiali_client.service_details(
namespace=namespace,
service_name=name)
assert service_details_rest
assert name == service_details_rest.name
service_details_oc = self.openshift_client.service_details(namespace=namespace,
service_name=name,
skip_workloads=False)
assert service_details_oc
assert name == service_details_oc.name
if namespace != 'istio-system':
assert service_details_rest.istio_sidecar\
== service_details_ui.istio_sidecar
assert service_details_ui.is_equal(service_details_rest,
advanced_check=True), \
'Service UI {} not equal to REST {}'\
.format(service_details_ui, service_details_rest)
assert service_details_ui.is_equal(service_details_oc,
advanced_check=False), \
'Service UI {} not equal to OC {}'\
.format(service_details_ui, service_details_oc)
assert is_equal(service_details_ui.applications,
service_details_rest.applications)
assert len(service_details_ui.workloads)\
== len(service_details_rest.workloads)
assert len(service_details_ui.istio_configs)\
== len(service_details_rest.istio_configs)
assert len(service_details_ui.workloads)\
== len(service_details_oc.workloads)
assert len(service_details_ui.istio_configs)\
== len(service_details_oc.istio_configs)
if service_details_ui.service_status:
assert service_details_ui.service_status.is_healthy() == \
service_details_ui.health, \
"Service Details Status {} is not equal to UI Health {} for {}"\
.format(
service_details_ui.service_status.is_healthy(),
service_details_ui.health,
service_details_ui.name)
for workload_ui in service_details_ui.workloads:
found = False
for workload_rest in service_details_rest.workloads:
if workload_ui == workload_rest.name:
found = True
break
if not found:
assert found, 'Workload {} not found in REST {}'.format(workload_ui,
workload_rest)
found = False
for workload_oc in service_details_oc.workloads:
if workload_ui == workload_oc.name:
found = True
break
if not found:
assert found, 'Workload {} not found in OC {}'.format(workload_ui,
workload_oc)
self.assert_istio_configs(service_details_ui,
service_details_rest,
service_details_oc,
namespace)
if check_metrics:
self.assert_metrics_options(service_details_ui.inbound_metrics, check_grafana=False)
self.assert_traces_tab(service_details_ui.traces_tab)
# service traffic is linked to workloads
self.assert_traffic(name, service_details_ui.traffic_tab,
self_object_type=TrafficType.SERVICE,
traffic_object_type=TrafficType.SERVICE)
def get_workload_names_set(self, source_workloads):
workload_names = []
for source_workload in source_workloads:
for workload in source_workload.workloads:
workload_names.append(workload)
return set(workload_names)
def assert_all_items(self, namespaces=[], filters=[], sort_options=[], force_clear_all=True,
label_operation=None):
# apply namespaces
self.apply_namespaces(namespaces, force_clear_all=force_clear_all)
# apply filters
self.apply_filters(filters=filters, force_clear_all=force_clear_all)
# apply sorting
self.sort(sort_options)
if label_operation:
self.apply_label_operation(label_operation)
# get services from ui
services_ui = self.page.content.all_items
# get services from rest api
services_rest = self._apply_service_filters(self.kiali_client.service_list(
namespaces=namespaces), filters=filters)
# get services from OC client
services_oc = self._apply_service_filters(self.openshift_client.service_list(
namespaces=namespaces), filters=filters)
# compare all results
logger.debug('Namespaces:{}'.format(namespaces))
logger.debug('Items count[UI:{}, REST:{}, OC:{}]'.format(
len(services_ui), len(services_rest), len(services_oc)))
logger.debug('Services UI:{}'.format(services_ui))
logger.debug('Services REST:{}'.format(services_rest))
logger.debug('Services OC:{}'.format(services_oc))
assert len(services_ui) == len(services_rest), \
"UI {} and REST {} services number not equal".format(services_ui, services_rest)
assert len(services_rest) <= len(services_oc)
for service_ui in services_ui:
found = False
for service_rest in services_rest:
if service_ui.is_equal(service_rest, advanced_check=True):
found = True
if service_ui.service_status:
assert service_ui.service_status.is_healthy() == service_ui.health, \
"Service Tooltip Health {} is not equal to UI Health {}".format(
service_ui.service_status.is_healthy(),
service_ui.health)
break
if not found:
assert found, '{} not found in REST'.format(service_ui)
found = False
for service_oc in services_oc:
if service_ui.is_equal(service_oc, advanced_check=False):
assert service_ui.labels.items() == service_oc.labels.items()
found = True
break
if not found:
assert found, '{} not found in OC'.format(service_ui)
if service_ui.config_status.validation != IstioConfigValidation.NA:
assert '/console/namespaces/{}/services/{}'.format(
service_ui.namespace,
service_ui.name) in \
service_ui.config_status.link, 'Wrong service link {}'.format(
service_ui.config_status.link)
def _apply_service_filters(self, services=[], filters=[], label_operation=None):
_sn = self.FILTER_ENUM.SERVICE_NAME.text
_service_names = [_f['value'] for _f in filters if _f['name'] == _sn]
logger.debug('Service names:{}'.format(_service_names))
_sis = self.FILTER_ENUM.ISTIO_SIDECAR.text
_sidecars = [_f['value'] for _f in filters if _f['name'] == _sis]
logger.debug('Istio Sidecars:{}'.format(_sidecars))
_sl = self.FILTER_ENUM.LABEL.text
_labels = [_f['value'] for _f in filters if _f['name'] == _sl]
items = services
# filter by service name
if len(_service_names) > 0:
filtered_list = []
for _name in _service_names:
filtered_list.extend([_i for _i in items if _name in _i.name])
items = set(filtered_list)
# filter by sidecars
if len(_sidecars) > 0:
filtered_list = []
for _sidecar in _sidecars:
filtered_list.extend([_i for _i in items if
self.sidecar_presents(_sidecar, _i.istio_sidecar)])
items = set(filtered_list)
# filter by labels
if len(_labels) > 0:
filtered_list = []
filtered_list.extend(
[_i for _i in items if dict_contains(
_i.labels, _labels,
(True if label_operation == LabelOperation.AND.text else False))])
items = set(filtered_list)
return items
def get_additional_filters(self, namespaces, current_filters):
logger.debug('Current filters:{}'.format(current_filters))
# get services of a namespace
_namespace = namespaces[0]
logger.debug('Running Services REST query for namespace:{}'.format(_namespace))
_services = self.kiali_client.service_list(namespaces=[_namespace])
logger.debug('Query response, Namespace:{}, Services:{}'.format(_namespace, _services))
# if we have a service, select a service randomly and return it
if len(_services) > 0:
_random_service = random.choice(_services)
return [
{
'name': self.FILTER_ENUM.SERVICE_NAME.text,
'value': _random_service.name
}
]
return []
def test_routing_create(self, name, namespace, routing_type,
peer_auth_mode=None,
tls=RoutingWizardTLS.ISTIO_MUTUAL, load_balancer=True,
load_balancer_type=RoutingWizardLoadBalancer.ROUND_ROBIN,
gateway=True, include_mesh_gateway=True,
circuit_braker=False,
skip_advanced=False):
logger.debug('Routing Wizard {} for Service: {}, {}'.format(routing_type, name, namespace))
# load service details page
self._prepare_load_details_page(name, namespace)
self.open(name, namespace)
self.page.actions.delete_all_routing()
if routing_type == RoutingWizardType.TRAFFIC_SHIFTING:
assert self.page.actions.create_weighted_routing(
tls=tls,
peer_auth_mode=peer_auth_mode,
load_balancer=load_balancer,
load_balancer_type=load_balancer_type, gateway=gateway,
include_mesh_gateway=include_mesh_gateway,
circuit_braker=circuit_braker,
skip_advanced=skip_advanced)
assert not self.page.actions.is_delete_disabled()
assert self.page.actions.is_update_weighted_enabled()
assert self.page.actions.is_create_matching_disabled()
assert self.page.actions.is_tcp_shifting_disabled()
assert self.page.actions.is_timeouts_disabled()
assert self.page.actions.is_suspend_disabled()
elif routing_type == RoutingWizardType.TCP_TRAFFIC_SHIFTING:
assert self.page.actions.create_tcp_traffic_shifting(
tls=tls,
peer_auth_mode=peer_auth_mode,
load_balancer=load_balancer,
load_balancer_type=load_balancer_type, gateway=gateway,
include_mesh_gateway=include_mesh_gateway,
skip_advanced=skip_advanced)
assert not self.page.actions.is_delete_disabled()
assert self.page.actions.is_tcp_shifting_enabled()
assert self.page.actions.is_create_weighted_disabled()
assert self.page.actions.is_create_matching_disabled()
assert self.page.actions.is_timeouts_disabled()
assert self.page.actions.is_suspend_disabled()
elif routing_type == RoutingWizardType.REQUEST_ROUTING:
assert self.page.actions.create_matching_routing(
tls=tls,
peer_auth_mode=peer_auth_mode,
load_balancer=load_balancer,
load_balancer_type=load_balancer_type, gateway=gateway,
include_mesh_gateway=include_mesh_gateway,
circuit_braker=circuit_braker,
skip_advanced=skip_advanced)
assert not self.page.actions.is_delete_disabled()
assert self.page.actions.is_update_matching_enabled()
assert self.page.actions.is_create_weighted_disabled()
assert self.page.actions.is_tcp_shifting_disabled()
assert self.page.actions.is_timeouts_disabled()
assert self.page.actions.is_suspend_disabled()
elif routing_type == RoutingWizardType.FAULT_INJECTION:
assert self.page.actions.suspend_traffic(
tls=tls,
peer_auth_mode=peer_auth_mode,
load_balancer=load_balancer,
load_balancer_type=load_balancer_type, gateway=gateway,
include_mesh_gateway=include_mesh_gateway,
circuit_braker=circuit_braker,
skip_advanced=skip_advanced)
assert not self.page.actions.is_delete_disabled()
assert self.page.actions.is_create_matching_disabled()
assert self.page.actions.is_create_weighted_disabled()
assert self.page.actions.is_tcp_shifting_disabled()
assert self.page.actions.is_timeouts_disabled()
assert self.page.actions.is_update_suspended_enabled()
elif routing_type == RoutingWizardType.REQUEST_TIMEOUTS:
assert self.page.actions.request_timeouts(
tls=tls,
peer_auth_mode=peer_auth_mode,
load_balancer=load_balancer,
load_balancer_type=load_balancer_type, gateway=gateway,
include_mesh_gateway=include_mesh_gateway,
circuit_braker=circuit_braker,
skip_advanced=skip_advanced)
assert not self.page.actions.is_delete_disabled()
assert self.page.actions.is_create_matching_disabled()
assert self.page.actions.is_create_weighted_disabled()
assert self.page.actions.is_tcp_shifting_disabled()
assert self.page.actions.is_suspend_disabled()
assert self.page.actions.is_timeouts_enabled()
# get service details from rest
service_details_rest = self.kiali_client.service_details(
namespace=namespace,
service_name=name)
assert len(service_details_rest.virtual_services) == 1, 'Service should have 1 VS'
assert len(service_details_rest.destination_rules) == 1, 'Service should have 1 DR'
assert service_details_rest.virtual_services[0].name == name
assert service_details_rest.destination_rules[0].name == name
if load_balancer_type:
assert word_in_text(load_balancer_type.text.lower(),
service_details_rest.destination_rules[0].traffic_policy,
load_balancer)
if tls:
assert word_in_text(tls.text.lower(),
service_details_rest.destination_rules[0].traffic_policy,
tls)
if tls == RoutingWizardTLS.MUTUAL:
assert word_in_text('{} {}'.format(TLSMutualValues.CLIENT_CERT.key.lower(),
TLSMutualValues.CLIENT_CERT.text),
service_details_rest.destination_rules[0].traffic_policy)
assert word_in_text('{} {}'.format(TLSMutualValues.PRIVATE_KEY.key.lower(),
TLSMutualValues.PRIVATE_KEY.text),
service_details_rest.destination_rules[0].traffic_policy)
assert word_in_text('{} {}'.format(TLSMutualValues.CA_CERT.key.lower(),
TLSMutualValues.CA_CERT.text),
service_details_rest.destination_rules[0].traffic_policy)
# get virtual service details from rest
istio_config_details_rest = self.kiali_client.istio_config_details(
namespace=namespace,
object_type=OBJECT_TYPE.VIRTUAL_SERVICE.text,
object_name=service_details_rest.virtual_services[0].name)
assert word_in_text('\"mesh\"',
istio_config_details_rest.text,
gateway and include_mesh_gateway)
# get destination rule details from rest
istio_config_details_rest = self.kiali_client.istio_config_details(
namespace=namespace,
object_type=OBJECT_TYPE.DESTINATION_RULE.text,
object_name=service_details_rest.destination_rules[0].name)
assert word_in_text('\"http1MaxPendingRequests\"',
istio_config_details_rest.text,
circuit_braker)
def test_routing_update(self, name, namespace, routing_type,
peer_auth_mode=None,
tls=RoutingWizardTLS.ISTIO_MUTUAL, load_balancer=True,
load_balancer_type=RoutingWizardLoadBalancer.ROUND_ROBIN,
gateway=True, include_mesh_gateway=True,
circuit_braker=False,
skip_advanced=False):
logger.debug('Routing Update Wizard {} for Service: {}, {}'.format(routing_type,
name,
namespace))
# load service details page
self._prepare_load_details_page(name, namespace)
self.open(name, namespace)
if routing_type == RoutingWizardType.TRAFFIC_SHIFTING:
assert self.page.actions.update_weighted_routing(
tls=tls,
peer_auth_mode=peer_auth_mode,
load_balancer=load_balancer,
load_balancer_type=load_balancer_type, gateway=gateway,
include_mesh_gateway=include_mesh_gateway,
circuit_braker=circuit_braker,
skip_advanced=skip_advanced)
assert not self.page.actions.is_delete_disabled()
assert self.page.actions.is_update_weighted_enabled()
assert self.page.actions.is_create_matching_disabled()
assert self.page.actions.is_tcp_shifting_disabled()
assert self.page.actions.is_timeouts_disabled()
assert self.page.actions.is_suspend_disabled()
elif routing_type == RoutingWizardType.TCP_TRAFFIC_SHIFTING:
assert self.page.actions.update_tcp_traffic_shifting(
tls=tls,
peer_auth_mode=peer_auth_mode,
load_balancer=load_balancer,
load_balancer_type=load_balancer_type, gateway=gateway,
include_mesh_gateway=include_mesh_gateway,
skip_advanced=skip_advanced)
assert not self.page.actions.is_delete_disabled()
assert self.page.actions.is_create_weighted_disabled()
assert self.page.actions.is_create_matching_disabled()
assert self.page.actions.is_tcp_shifting_enabled()
assert self.page.actions.is_timeouts_disabled()
assert self.page.actions.is_suspend_disabled()
elif routing_type == RoutingWizardType.REQUEST_ROUTING:
assert self.page.actions.update_matching_routing(
tls=tls,
peer_auth_mode=peer_auth_mode,
load_balancer=load_balancer,
load_balancer_type=load_balancer_type, gateway=gateway,
include_mesh_gateway=include_mesh_gateway,
skip_advanced=skip_advanced)
assert not self.page.actions.is_delete_disabled()
assert self.page.actions.is_update_matching_enabled()
assert self.page.actions.is_create_weighted_disabled()
assert self.page.actions.is_tcp_shifting_disabled()
assert self.page.actions.is_timeouts_disabled()
assert self.page.actions.is_suspend_disabled()
elif routing_type == RoutingWizardType.FAULT_INJECTION:
assert self.page.actions.update_suspended_traffic(
tls=tls,
peer_auth_mode=peer_auth_mode,
load_balancer=load_balancer,
load_balancer_type=load_balancer_type, gateway=gateway,
include_mesh_gateway=include_mesh_gateway,
circuit_braker=circuit_braker,
skip_advanced=skip_advanced)
assert not self.page.actions.is_delete_disabled()
assert self.page.actions.is_create_matching_disabled()
assert self.page.actions.is_create_weighted_disabled()
assert self.page.actions.is_tcp_shifting_disabled()
assert self.page.actions.is_timeouts_disabled()
assert self.page.actions.is_update_suspended_enabled()
elif routing_type == RoutingWizardType.REQUEST_TIMEOUTS:
assert self.page.actions.update_request_timeouts(
tls=tls,
peer_auth_mode=peer_auth_mode,
load_balancer=load_balancer,
load_balancer_type=load_balancer_type, gateway=gateway,
include_mesh_gateway=include_mesh_gateway,
circuit_braker=circuit_braker,
skip_advanced=skip_advanced)
assert not self.page.actions.is_delete_disabled()
assert self.page.actions.is_create_matching_disabled()
assert self.page.actions.is_create_weighted_disabled()
assert self.page.actions.is_tcp_shifting_disabled()
assert self.page.actions.is_suspend_disabled()
assert self.page.actions.is_timeouts_enabled()
# get service details from rest
service_details_rest = self.kiali_client.service_details(
namespace=namespace,
service_name=name)
assert len(service_details_rest.virtual_services) == 1, 'Service should have 1 VS'
assert len(service_details_rest.destination_rules) == 1, 'Service should have 1 DR'
assert service_details_rest.virtual_services[0].name == name
assert service_details_rest.destination_rules[0].name == name
if load_balancer_type:
assert word_in_text(load_balancer_type.text.lower(),
service_details_rest.destination_rules[0].traffic_policy,
load_balancer)
if tls and tls.text != RoutingWizardTLS.UNSET.text:
assert word_in_text(tls.text.lower(),
service_details_rest.destination_rules[0].traffic_policy,
tls)
# get virtual service details from rest
istio_config_details_rest = self.kiali_client.istio_config_details(
namespace=namespace,
object_type=OBJECT_TYPE.VIRTUAL_SERVICE.text,
object_name=service_details_rest.virtual_services[0].name)
assert word_in_text('\"mesh\"',
istio_config_details_rest.text,
gateway and include_mesh_gateway)
# get destination rule details from rest
istio_config_details_rest = self.kiali_client.istio_config_details(
namespace=namespace,
object_type=OBJECT_TYPE.DESTINATION_RULE.text,
object_name=service_details_rest.destination_rules[0].name)
assert word_in_text('\"http1MaxPendingRequests\"',
istio_config_details_rest.text,
circuit_braker)
def test_routing_delete(self, name, namespace):
logger.debug('Routing Delete for Service: {}, {}'.format(name, namespace))
# load service details page
self._prepare_load_details_page(name, namespace)
self.open(name, namespace)
assert self.page.actions.delete_all_routing()
assert self.page.actions.is_delete_disabled()
assert self.page.actions.is_create_weighted_enabled()
assert self.page.actions.is_create_matching_enabled()
assert self.page.actions.is_tcp_shifting_enabled()
assert self.page.actions.is_suspend_enabled()
assert self.page.actions.is_timeouts_enabled()
# get service details from rest
service_details_rest = self.kiali_client.service_details(
namespace=namespace,
service_name=name)
assert len(service_details_rest.virtual_services) == 0, 'Service should have no VS'
assert len(service_details_rest.destination_rules) == 0, 'Service should have no DR'
class IstioConfigPageTest(AbstractListPageTest):
FILTER_ENUM = IstioConfigPageFilter
SORT_ENUM = IstioConfigPageSort
def __init__(self, kiali_client, openshift_client, browser):
AbstractListPageTest.__init__(
self, kiali_client=kiali_client,
openshift_client=openshift_client, page=IstioConfigPage(browser))
self.browser = browser
def _prepare_load_details_page(self, name, namespace, object_type=None):
# load the page first
self.page.load(force_load=True)
# apply namespace
self.apply_namespaces(namespaces=[namespace])
# apply filters
_filters = [{'name': IstioConfigPageFilter.ISTIO_NAME.text, 'value': name}]
if object_type:
_filters.append({'name': IstioConfigPageFilter.ISTIO_TYPE.text, 'value': object_type})
self.apply_filters(filters=_filters)
def load_details_page(self, name, namespace, object_type=None,
force_refresh=False, load_only=False):
logger.debug('Loading details page for istio config: {}'.format(name))
if not self.is_in_details_page(name, namespace):
self._prepare_load_details_page(name, namespace, object_type)
wait_to_spinner_disappear(self.browser)
self.open(name, namespace, force_refresh)
wait_to_spinner_disappear(self.browser)
self.browser.wait_for_element(locator='//button[contains(., "YAML")]',
parent='//*[contains(@class, "pf-c-page__main-section")]')
wait_to_spinner_disappear(self.browser)
return self.page.content.get_details(name, load_only)
def assert_all_items(self, namespaces=[], filters=[], sort_options=[], force_clear_all=True):
logger.debug('Asserting all istio config items')
logger.debug('Filters:{}'.format(filters))
# apply namespaces
self.apply_namespaces(namespaces, force_clear_all=force_clear_all)
# apply filters
self.apply_filters(filters=filters, force_clear_all=force_clear_all)
# apply sorting
self.sort(sort_options)
_sn = self.FILTER_ENUM.ISTIO_NAME.text
_istio_names = [_f['value'] for _f in filters if _f['name'] == _sn]
# get rules from rest api
config_list_rest = self.kiali_client.istio_config_list(
namespaces=namespaces, config_names=_istio_names)
logger.debug('Istio config list REST:{}]'.format(config_list_rest))
# get rules from ui
config_list_ui = self.page.content.all_items
logger.debug('Istio config list UI:{}]'.format(config_list_ui))
# get configs from OC api
config_list_oc = self.openshift_client.istio_config_list(
namespaces=namespaces, config_names=_istio_names)
logger.debug('Istio config list OC API:{}]'.format(config_list_oc))
# compare 3 way results
assert len(config_list_ui) == len(config_list_rest), \
"UI {} and REST {} config number not equal".format(config_list_ui, config_list_rest)
assert len(config_list_ui) == len(config_list_oc)
for config_ui in config_list_ui:
found = False
for config_rest in config_list_rest:
if config_ui.is_equal(config_rest, advanced_check=True):
found = True
break
if not found:
assert found, '{} not found in REST'.format(config_ui)
found = False
for config_oc in config_list_oc:
if config_ui.is_equal(config_oc, advanced_check=False):
found = True
break
if not found:
assert found, '{} not found in OC'.format(config_ui)
if config_ui.validation != IstioConfigValidation.NA:
assert '/console/namespaces/{}/istio/{}/{}?list=yaml'.format(
config_ui.namespace,
ISTIO_CONFIG_TYPES[config_ui.object_type],
config_ui.name) in \
config_ui.config_link, 'Wrong config link {}'.format(
config_ui.config_link)
logger.debug('Done asserting all istio config items')
def assert_random_details(self, namespaces=[], filters=[]):
# get istio config from rest api
configs_rest = self.kiali_client.istio_config_list(namespaces, filters)
# random configs filters
assert len(configs_rest) > 0
if len(configs_rest) > 3:
_random_configs = random.sample(configs_rest, 3)
else:
_random_configs = configs_rest
# create filters
for _selected_config in _random_configs:
self.assert_details(_selected_config.name,
_selected_config.object_type,
_selected_config.namespace)
def assert_details(self, name, object_type,
namespace=None, error_messages=[], apply_filters=True):
logger.debug('Asserting details for: {}, in namespace: {}'.format(name, namespace))
# load config details page
config_details_ui = self.load_details_page(name, namespace, object_type,
force_refresh=False)
assert config_details_ui
assert name == config_details_ui.name
assert config_details_ui.text
# get config details from rest
config_details_rest = self.kiali_client.istio_config_details(
namespace=namespace,
object_type=object_type,
object_name=name)
assert config_details_rest
assert name == config_details_rest.name
assert config_details_rest.text
# get config details from OC
config_details_oc = self.openshift_client.istio_config_details(
namespace=namespace,
object_name=name,
object_type=object_type)
assert config_details_oc
assert name == config_details_oc.name
for error_message in error_messages:
assert error_message in config_details_rest.error_messages, \
'Expected Error messages:{} is not in REST:{}'.format(
error_message,
config_details_rest.error_messages)
for error_message in config_details_ui.error_messages:
assert error_message in config_details_rest.error_messages, \
'UI Error messages:{} is not in REST:{}'.format(
error_message,
config_details_rest.error_messages)
# TODO for Gateways there is no way to check in UI if it is valid or N/A
assert config_details_ui.is_equal(
config_details_rest,
advanced_check=True if
config_details_rest.validation != IstioConfigValidation.NA
else False)
# find key: value pairs from UI in a REST
for config_ui in re.split(' ',
str(config_details_ui.text).
replace('\'', '').
replace('~', 'null').
replace('selfLink: >- ', 'selfLink: ').
replace(': > ', ': ').
replace('{', '').
replace('}', '').
replace(':" /', ':"/').
replace('"', '').
replace(' ,', ',').
replace(',', '').
replace('[', '').
replace(']', '').
replace('\\', '').
replace(' :', ':').
replace(' .', '.').
replace('...', '').
replace(' \/', '\/')):
if config_ui.endswith(':'):
ui_key = config_ui
elif config_ui.strip() != '-': # skip this line, it was for formatting
# the previous one was the key of this value
found = False
# make the REST result into the same format as shown in UI
# to compare only the values
for config_rest in str(config_details_rest.text).\
replace('\\n', '').\
replace('\\', '').\
replace('{', '').\
replace('}', '').\
replace('"', '').\
replace(',', '').\
replace('[', '').\
replace(']', '').\
split(' '):
if config_rest.endswith(':'):
rest_key = config_rest
else:
# the previous one was the key of this value
if ui_key == rest_key and config_ui == config_rest:
found = True
break
if not found and not self._is_skip_key(ui_key):
assert found, '{} {} not found in REST'.format(ui_key, config_ui)
found = False
# make the OC result into the same format as shown in UI
# to compare only the values
config_oc_list = str(config_details_oc.text).\
replace('\n', '').\
replace('\'', '').\
replace("\\n", '').\
replace(' - ', '').\
replace('{', '').\
replace('}', '').\
replace('"', '').\
replace(',', '').\
replace('[', '').\
replace(']', '').\
split(' ')
config_oc_list.append('kind:')
config_oc_list.append(config_details_oc._type)
if ui_key == 'apiVersion:' or ui_key == 'selfLink:':
continue
for config_oc in config_oc_list:
if config_oc.endswith(':'):
oc_key = re.sub('^f:', '', config_oc)
else:
# the previous one was the key of this value
if (ui_key == oc_key and config_ui == config_oc) or config_ui == 'null':
found = True
break
if not found and not self._is_skip_key(ui_key):
assert found, '{} {} not found in OC'.format(ui_key, config_ui)
logger.debug('Done asserting details for: {}, in namespace: {}'.format(name, namespace))
def _is_skip_key(self, key):
return 'last-applied-configuration' in key \
or key.startswith('f:') \
or 'managedFields' in key \
or 'creationTimestamp' in key \
or 'selfLink' in key
def test_gateway_create(self, name, hosts, port_name, port_number, namespaces):
logger.debug('Creating Gateway: {}, from namespaces: {}'.format(name, namespaces))
# load the page first
self.page.load(force_load=True)
# apply namespace
self.apply_namespaces(namespaces=namespaces)
wait_to_spinner_disappear(self.browser)
self.page.actions.create_istio_config_gateway(name, hosts, port_name, port_number)
for namespace in namespaces:
self.assert_details(name, IstioConfigObjectType.GATEWAY.text, namespace)
def test_sidecar_create(self, name, egress_host, labels, namespaces):
logger.debug('Creating Sidecar: {}, from namespaces: {}'.format(name, namespaces))
# load the page first
self.page.load(force_load=True)
# apply namespace
self.apply_namespaces(namespaces=namespaces)
wait_to_spinner_disappear(self.browser)
self.page.actions.create_istio_config_sidecar(name, egress_host, labels)
for namespace in namespaces:
self.assert_details(name, IstioConfigObjectType.SIDECAR.text, namespace)
def test_authpolicy_create(self, name, policy, namespaces, labels=None, policy_action=None):
logger.debug('Creating AuthorizationPolicy: {}, from namespaces: {}'.format(name,
namespaces))
# load the page first
self.page.load(force_load=True)
# apply namespace
self.apply_namespaces(namespaces=namespaces)
wait_to_spinner_disappear(self.browser)
is_created = self.page.actions.create_istio_config_authpolicy(name=name,
policy=policy,
labels=labels,
policy_action=policy_action)
if policy_action == AuthPolicyActionType.DENY.text:
# in a case of DENY action the Create button is disabled
assert not is_created, "Should not create but in fact created AuthPolicy"
else:
assert is_created
if is_created:
for namespace in namespaces:
self.assert_details(name, IstioConfigObjectType.AUTHORIZATION_POLICY.text,
namespace)
config_details_rest = self.kiali_client.istio_config_details(
namespace=namespace,
object_type=IstioConfigObjectType.AUTHORIZATION_POLICY.text,
object_name=name)
if policy == AuthPolicyType.ALLOW_ALL.text or \
policy_action == AuthPolicyActionType.ALLOW.text:
assert '\"action\": \"ALLOW\"' in config_details_rest.text
def test_peerauth_create(self, name, namespaces, expected_created=True,
labels=None, mtls_mode=None, mtls_ports={}):
logger.debug('Creating PeerAuthentication: {}, from namespaces: {}'.format(name,
namespaces))
# load the page first
self.page.load(force_load=True)
# apply namespace
self.apply_namespaces(namespaces=namespaces)
wait_to_spinner_disappear(self.browser)
is_created = self.page.actions.create_istio_config_peerauth(
name, labels, mtls_mode, mtls_ports)
assert not expected_created ^ is_created, \
"Created expected {} but should be {}".format(expected_created,
is_created)
if is_created:
for namespace in namespaces:
self.assert_details(name, IstioConfigObjectType.PEER_AUTHENTICATION.text,
namespace)
config_details_rest = self.kiali_client.istio_config_details(
namespace=namespace,
object_type=IstioConfigObjectType.PEER_AUTHENTICATION.text,
object_name=name)
if mtls_mode:
assert '\"mode\": \"{}\"'.format(mtls_mode) in config_details_rest.text
if labels:
assert labels.replace('=', '\": \"') in config_details_rest.text
if mtls_ports:
for _key, _value in mtls_ports.items():
assert '\"portLevelMtls\": \"{}\": \"mode\": \"{}\"'.format(_key, _value) \
in config_details_rest.text.replace('{', '').replace('}', '')
def test_requestauth_create(self, name, namespaces, expected_created=True,
labels=None, jwt_rules={}):
logger.debug('Creating RequestAuthentication: {}, from namespaces: {}'.format(
name,
namespaces))
# load the page first
self.page.load(force_load=True)
# apply namespace
self.apply_namespaces(namespaces=namespaces)
wait_to_spinner_disappear(self.browser)
is_created = self.page.actions.create_istio_config_requestauth(name, labels, jwt_rules)
assert not expected_created ^ is_created, \
"Created expected {} but should be {}".format(expected_created,
is_created)
if is_created:
for namespace in namespaces:
self.assert_details(name, IstioConfigObjectType.REQUEST_AUTHENTICATION.text,
namespace)
config_details_rest = self.kiali_client.istio_config_details(
namespace=namespace,
object_type=IstioConfigObjectType.REQUEST_AUTHENTICATION.text,
object_name=name)
if labels:
assert labels.replace('=', '\": \"') in config_details_rest.text
if jwt_rules:
for _key, _value in jwt_rules.items():
assert '\"{}\": \"{}\"'.format(_key, _value) in config_details_rest.text
def delete_istio_config(self, name, object_type, namespace=None):
logger.debug('Deleting istio config: {}, from namespace: {}'.format(name, namespace))
self.load_details_page(name, namespace, object_type, force_refresh=False, load_only=True)
# TODO: wait for all notification boxes to disappear, those are blocking the button
time.sleep(10)
self.page.actions.select('Delete')
self.browser.click(self.browser.element(
parent=ListViewAbstract.DIALOG_ROOT,
locator=('.//button[text()="Delete"]')))
def assert_host_link(self, config_name, namespace, host_name, is_link_expected=True):
logger.debug('Asserting host link for: {}, in namespace: {}'.format(config_name, namespace))
# load config details page
self.load_details_page(config_name, namespace, force_refresh=False, load_only=True)
assert not is_link_expected ^ self.is_host_link(host_name)
def click_on_gateway(self, name, namespace):
self.browser.click(self.browser.element(locator=self.page.content.CONFIG_TAB_OVERVIEW,
parent=self.page.content.CONFIG_TABS_PARENT))
self.browser.click(
'.//a[contains(@href, "/namespaces/{}/istio/gateways/{}")]'.format(namespace, name),
parent=self.page.content.locator)
def get_additional_filters(self, namespaces, current_filters):
logger.debug('Current filters:{}'.format(current_filters))
# get rules of a namespace
_namespace = namespaces[0]
logger.debug('Running Rules REST query for namespace:{}'.format(_namespace))
_istio_config_list = self.kiali_client.istio_config_list(
namespaces=[_namespace])
logger.debug('Query response, Namespace:{}, Istio config list:{}'.format(
_namespace, _istio_config_list))
# if we have a config, select a config randomly and return it
if len(_istio_config_list) > 0:
_random_config = random.choice(_istio_config_list)
return [
{
'name': self.FILTER_ENUM.ISTIO_NAME.text,
'value': _random_config.name
}
]
return []
class DistributedTracingPageTest(AbstractListPageTest):
def load_page(self, namespaces, force_clear_all):
self.page.load(force_load=True)
# apply namespaces
self.apply_namespaces(namespaces, force_clear_all=force_clear_all)
def __init__(self, kiali_client, openshift_client, browser):
AbstractListPageTest.__init__(
self, kiali_client=kiali_client,
openshift_client=openshift_client, page=DistributedTracingPage(browser))
self.browser = browser
def assert_search_traces(self, service_name, namespaces=[], force_clear_all=True):
# test Search Traces for provided Namespace and Service
self.load_page(namespaces, force_clear_all)
self.page.traces.search_traces(service_name)
assert not self.page.traces.is_oc_login_displayed, "OC Login should not be displayed"
if not self.page.traces.has_no_results:
assert self.page.traces.has_results
class ValidationsTest(object):
def __init__(self, kiali_client, objects_path, openshift_client, browser=None):
self.kiali_client = kiali_client
self.openshift_client = openshift_client
self.browser = browser
self.objects_path = objects_path
def _istio_config_create(self, yaml_file, namespace):
self._istio_config_delete(yaml_file, namespace=namespace)
oc_apply(yaml_file=yaml_file,
namespace=namespace)
def _istio_config_delete(self, yaml_file, namespace):
oc_delete(yaml_file=yaml_file,
namespace=namespace)
def test_istio_objects(self, scenario, namespace=None,
config_validation_objects=[],
tls_type=None,
namespace_tls_objects=[],
ignore_common_errors=True):
"""
All the testing logic goes here.
It creates the provided scenario yaml into provider namespace.
And then validates the provided Istio objects if they have the error_messages
"""
yaml_file = get_yaml_path(self.objects_path, scenario)
try:
self._istio_config_create(yaml_file, namespace=namespace)
for _object in config_validation_objects:
self._test_validation_errors(object_type=_object.object_type,
object_name=_object.object_name,
namespace=_object.namespace,
error_messages=_object.error_messages,
ignore_common_errors=ignore_common_errors)
if tls_type:
self._test_mtls_settings(tls_type,
namespace_tls_objects)
finally:
self._istio_config_delete(yaml_file, namespace=namespace)
def test_service_validation(self, scenario, service_name, namespace,
service_validation_objects=[]):
"""
All the testing logic goes here.
It creates the provided service scenario yaml into provided namespace.
And then validates the provided Service objects if they have the error_messages
"""
yaml_file = get_yaml_path(self.objects_path, scenario)
try:
self._istio_config_create(yaml_file, namespace=namespace)
for _object in service_validation_objects:
service_details_rest = self.kiali_client.service_details(
namespace=namespace,
service_name=service_name)
found = False
for error_message in service_details_rest.validations:
if error_message == _object.error_message:
found = True
assert found, 'Error messages:{} is not in List:{}'.\
format(_object.error_message,
service_details_rest.validations)
finally:
self._istio_config_delete(yaml_file, namespace=namespace)
def _test_validation_errors(self, object_type, object_name, namespace,
error_messages=[], ignore_common_errors=True):
# get config detals from rest
config_details_rest = self.kiali_client.istio_config_details(
namespace=namespace,
object_type=object_type,
object_name=object_name)
rest_error_messages = config_details_rest.error_messages
if ignore_common_errors:
remove_from_list(rest_error_messages, KIA0201)
remove_from_list(rest_error_messages, KIA0301)
if self.openshift_client.is_auto_mtls():
# remove errors which are ignored during auto mtls
remove_from_list(error_messages, KIA0501)
remove_from_list(error_messages, KIA0204)
remove_from_list(error_messages, KIA0205)
remove_from_list(error_messages, KIA0401)
remove_from_list(error_messages, KIA0206)
assert len(error_messages) == len(rest_error_messages), \
'Error messages are different Expected:{}, Got:{}'.\
format(error_messages,
rest_error_messages)
for error_message in error_messages:
assert error_message in rest_error_messages, \
'Error messages:{} is not in List:{}'.\
format(error_message,
rest_error_messages)
def _test_mtls_settings(self, tls_type, namespace_tls_objects):
"""
Validates both Mesh-wide mTLS settings in toolbar,
and namespace wide TLS settings per namespace in Overview page.
"""
_tests = OverviewPageTest(
kiali_client=self.kiali_client, openshift_client=self.openshift_client,
browser=self.browser)
actual_mtls_type = _tests.get_mesh_wide_tls()
assert actual_mtls_type == tls_type, \
'Mesh-wide TLS type expected: {} got: {}'.format(tls_type, actual_mtls_type)
if namespace_tls_objects:
overview_items = _tests.page.content.all_items
for tls_object in namespace_tls_objects:
for overview_item in overview_items:
if overview_item.namespace == tls_object.namespace:
assert tls_object.tls_type == overview_item.tls_type, \
'Namespace TLS type expected: {} got: {} for {}'.format(
tls_object.tls_type,
overview_item.tls_type,
overview_item.namespace)
class ConfigValidationObject(object):
def __init__(self, object_type, object_name, namespace=None, error_messages=[]):
self.namespace = namespace
self.object_type = object_type
self.object_name = object_name
self.error_messages = error_messages
class ServiceValidationObject(object):
def __init__(self, error_message, severity=None):
self.error_message = error_message
self.severity = severity
class NamespaceTLSObject(object):
def __init__(self, namespace, tls_type):
self.namespace = namespace
self.tls_type = tls_type
| 48.370986
| 100
| 0.605571
|
6422bf4e55dad0fdee06ac0a5e229a777d58065b
| 1,047
|
py
|
Python
|
FsStationB/BCKG/REST/tools/PyClient/generator.py
|
BrunoKM/station-b-libraries
|
ea3591837e4a33f0bef789d905467754c27913b3
|
[
"MIT"
] | 6
|
2021-09-29T15:46:55.000Z
|
2021-12-14T18:39:51.000Z
|
FsStationB/BCKG/REST/tools/PyClient/generator.py
|
BrunoKM/station-b-libraries
|
ea3591837e4a33f0bef789d905467754c27913b3
|
[
"MIT"
] | null | null | null |
FsStationB/BCKG/REST/tools/PyClient/generator.py
|
BrunoKM/station-b-libraries
|
ea3591837e4a33f0bef789d905467754c27913b3
|
[
"MIT"
] | 3
|
2021-09-27T10:35:20.000Z
|
2021-10-02T17:53:07.000Z
|
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
import click
from openapi_parser.exporter import PackageWriter
from openapi_parser.parser.loader import OpenApiParser
@click.command()
@click.option(
"--source",
help="REST API .yml definition file",
default='../../../bckg_api.yml',
required=False,
)
@click.option(
"--pyclient",
help="Target directory for Python client library",
default='../../src/PyClient',
required=False,
)
def main(source, pyclient):
parser = OpenApiParser.open(source)
parser.load_all()
package_writer = PackageWriter(parser, destination_dir=pyclient)
package_writer.write_package(clean=True)
return 0
if (__name__ == '__main__'):
exit_code = main()
exit(exit_code)
| 29.914286
| 93
| 0.583572
|
7b908ad447d57da704bafc463482392d5b91679e
| 526
|
py
|
Python
|
osrsguru/osrsgame/migrations/0002_auto_20210322_1518.py
|
OSRSGuru/OSRS.guru-BackEnd
|
472096804bdbe4cf5d2abe45ab69e326043aa2be
|
[
"BSD-2-Clause"
] | null | null | null |
osrsguru/osrsgame/migrations/0002_auto_20210322_1518.py
|
OSRSGuru/OSRS.guru-BackEnd
|
472096804bdbe4cf5d2abe45ab69e326043aa2be
|
[
"BSD-2-Clause"
] | null | null | null |
osrsguru/osrsgame/migrations/0002_auto_20210322_1518.py
|
OSRSGuru/OSRS.guru-BackEnd
|
472096804bdbe4cf5d2abe45ab69e326043aa2be
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-03-22 15:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('osrsgame', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='server',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='servers', to='osrsgame.geographicallocation'),
),
]
| 26.3
| 165
| 0.655894
|
846ede6d543cb6759b38dece0f16c593fa59f278
| 601
|
py
|
Python
|
python3/koans/about_triangle_project2.py
|
zhangyanan0525/python_koans
|
ee6500feef9848976378ff6744623ca5cbaf02d1
|
[
"MIT"
] | null | null | null |
python3/koans/about_triangle_project2.py
|
zhangyanan0525/python_koans
|
ee6500feef9848976378ff6744623ca5cbaf02d1
|
[
"MIT"
] | null | null | null |
python3/koans/about_triangle_project2.py
|
zhangyanan0525/python_koans
|
ee6500feef9848976378ff6744623ca5cbaf02d1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
# You need to finish implementing triangle() in the file 'triangle.py'
from .triangle import *
class AboutTriangleProject2(Koan):
# The first assignment did not talk about how to handle errors.
# Let's handle that part now.
def test_illegal_triangles_throw_exceptions(self):
# All sides should be greater than 0
triangle(4, 4, 4)
triangle(3, 4, 4)
# The sum of any two sides should be greater than the third one
triangle(4,4, 3)
triangle(4, 5, 4)
| 27.318182
| 71
| 0.640599
|
59c241a87af6e0aa176230c60d7bc76ff9f916c6
| 743
|
py
|
Python
|
bot/models/Results/ResultsLoader.py
|
estebanthi/BinanceTradingBotV4
|
93ccac86db9bc8612248bf5cd1bc9e359749e383
|
[
"MIT"
] | 4
|
2021-11-22T13:59:11.000Z
|
2022-02-10T08:27:25.000Z
|
bot/models/Results/ResultsLoader.py
|
estebanthi/BinanceTradingBotV4
|
93ccac86db9bc8612248bf5cd1bc9e359749e383
|
[
"MIT"
] | null | null | null |
bot/models/Results/ResultsLoader.py
|
estebanthi/BinanceTradingBotV4
|
93ccac86db9bc8612248bf5cd1bc9e359749e383
|
[
"MIT"
] | 3
|
2021-11-15T18:49:20.000Z
|
2022-02-06T19:46:29.000Z
|
import pickle
import yaml
from models.MongoDriver import MongoDriver
class ResultsLoader:
"""
Useful for loading results
If it generates errors, add strategies in imports
"""
def __init__(self):
self.default_path = f"data/backtesting_results/"
def load(self, filename="results.dat", use_mongo=True):
with open("config.yml", "r") as file:
data = yaml.safe_load(file)
if data["mongo_url"] and use_mongo: # If MongoDB is used
mongo_driver = MongoDriver()
mongo_driver.connect()
return pickle.loads(mongo_driver.get_result(filename)['object'])
with open(self.default_path + filename, "rb") as file:
return pickle.load(file)
| 28.576923
| 76
| 0.647376
|
5f86d4e41d794d29f5bb6046f69e15bb224b7a42
| 4,696
|
py
|
Python
|
core/domain/visualization_registry_test.py
|
yash10019coder/oppia
|
8c349c61ac723a2fd507046b20957934cba70e3a
|
[
"Apache-2.0"
] | 5
|
2022-01-22T17:22:23.000Z
|
2022-02-04T09:21:24.000Z
|
core/domain/visualization_registry_test.py
|
yash10019coder/oppia
|
8c349c61ac723a2fd507046b20957934cba70e3a
|
[
"Apache-2.0"
] | null | null | null |
core/domain/visualization_registry_test.py
|
yash10019coder/oppia
|
8c349c61ac723a2fd507046b20957934cba70e3a
|
[
"Apache-2.0"
] | 2
|
2022-03-22T16:57:32.000Z
|
2022-03-24T17:38:42.000Z
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for methods in the visualization registry."""
from __future__ import annotations
import importlib
import inspect
import re
from core.domain import visualization_registry
from core.tests import test_utils
class VisualizationRegistryUnitTests(test_utils.GenericTestBase):
"""Test for the visualization registry."""
def test_visualization_registry(self):
"""Sanity checks on the visualization registry."""
self.assertGreater(
len(visualization_registry.Registry.get_all_visualization_ids()),
0)
def test_get_visualization_class_with_invalid_id_raises_error(self):
with self.assertRaisesRegex(
TypeError, 'is not a valid visualization id.'):
visualization_registry.Registry.get_visualization_class(
'invalid_visualization_id')
def test_visualization_class_with_invalid_option_names(self):
sorted_tiles = visualization_registry.Registry.get_visualization_class(
'SortedTiles')
sorted_tiles_instance = sorted_tiles('AnswerFrequencies', {}, True)
with self.assertRaisesRegex(
Exception,
re.escape(
'For visualization SortedTiles, expected option names '
'[\'header\', \'use_percentages\']; received names []')):
sorted_tiles_instance.validate()
def test_visualization_class_with_invalid_option_value(self):
sorted_tiles = visualization_registry.Registry.get_visualization_class(
'SortedTiles')
option_names = {
'header': 'Pretty Tiles!',
'use_percentages': 'invalid_value'
}
sorted_tiles_instance = sorted_tiles(
'AnswerFrequencies', option_names, True)
with self.assertRaisesRegex(
Exception, 'Expected bool, received invalid_value'):
sorted_tiles_instance.validate()
def test_visualization_class_with_invalid_addressed_info_is_supported_value(
self):
sorted_tiles = visualization_registry.Registry.get_visualization_class(
'SortedTiles')
option_names = {
'header': 'Pretty Tiles!',
'use_percentages': True
}
sorted_tiles_instance = sorted_tiles(
'AnswerFrequencies', option_names, 'invalid_value')
with self.assertRaisesRegex(
Exception,
'For visualization SortedTiles, expected a bool value for '
'addressed_info_is_supported; received invalid_value'):
sorted_tiles_instance.validate()
def test_get_all_visualization_ids(self):
visualization_ids = (
visualization_registry.Registry.get_all_visualization_ids())
expected_visualizations = ['FrequencyTable', 'ClickHexbins',
'EnumeratedFrequencyTable', 'SortedTiles']
self.assertEqual(
sorted(visualization_ids), sorted(expected_visualizations))
class VisualizationsNameTests(test_utils.GenericTestBase):
def test_visualization_names(self):
"""This function checks for duplicate visualizations."""
all_python_files = self.get_all_python_files()
all_visualizations = []
for file_name in all_python_files:
python_module = importlib.import_module(file_name)
for name, clazz in inspect.getmembers(
python_module, predicate=inspect.isclass):
all_base_classes = [base_class.__name__ for base_class in
(inspect.getmro(clazz))]
# Check that it is a subclass of 'BaseVisualization'.
if 'BaseVisualization' in all_base_classes:
all_visualizations.append(name)
expected_visualizations = ['BaseVisualization', 'FrequencyTable',
'EnumeratedFrequencyTable', 'ClickHexbins',
'SortedTiles']
self.assertEqual(
sorted(all_visualizations), sorted(expected_visualizations))
| 38.809917
| 80
| 0.669719
|
4644807a5ab01b0f61a49b057563ff68290e00dd
| 10,773
|
py
|
Python
|
fileson_backup.py
|
flyworker/fileson
|
80d9f0b9c060dff3aca68e4cac4df3daa4d454d5
|
[
"MIT"
] | null | null | null |
fileson_backup.py
|
flyworker/fileson
|
80d9f0b9c060dff3aca68e4cac4df3daa4d454d5
|
[
"MIT"
] | null | null | null |
fileson_backup.py
|
flyworker/fileson
|
80d9f0b9c060dff3aca68e4cac4df3daa4d454d5
|
[
"MIT"
] | 1
|
2021-04-29T01:28:23.000Z
|
2021-04-29T01:28:23.000Z
|
#!/usr/bin/env python3
from collections import defaultdict, namedtuple
from fileson import Fileson, gmt_str, gmt_epoch
from logdict import LogDict
from crypt import keygen as kg, AESFile, sha1, calc_etag
import argparse, os, sys, json, signal, time, hashlib, inspect, shutil, re
import boto3, threading
class BotoProgress(object):
def __init__(self, ptype):
self._seen = 0
self._last = 0
self._type = ptype
self._lock = threading.Lock()
def __call__(self, bytes_amount):
with self._lock:
self._seen += bytes_amount
if self._last + 2**20 > self._seen: return # every 1 MB
sys.stdout.write("\r%.2f MB %sed" % (self._seen / 2**20, self._type))
sys.stdout.flush()
self._last = self._seen
class S3Action(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
m = re.match('s3://(\w+)/(.+)', values)
if not m: raise ValueError('S3 address in format s3://bucket/objpath')
setattr(namespace, self.dest, (m.group(1), m.group(2)))
# Return key or if a filename, its contents
def key_or_file(key):
if isinstance(key, bytes): return key # passthrough
if os.path.exists(key):
with open(key, 'r') as f: key = ''.join(f.read().split())
return bytes.fromhex(key)
# These are the different argument types that can be added to a command
arg_adders = {
'password': lambda p: p.add_argument('password', type=str, nargs='?', help='Password', default=None),
'salt': lambda p: p.add_argument('salt', type=str, nargs='?', help='Salt', default=None),
'input': lambda p: p.add_argument('input', type=str, help='Input file'),
'output': lambda p: p.add_argument('output', type=str, help='Output file'),
's3path': lambda p: p.add_argument('s3path', type=str, action=S3Action,
help='S3 path in form s3://bucket/objpath'),
'deep_archive': lambda p: p.add_argument('-d', '--deep-archive', action='store_true',
help='Upload to S3 DEEP_ARCHIVE storage class'),
'in_obj': lambda p: p.add_argument('in_obj', type=str, help='Input file or S3 object name'),
'out_obj': lambda p: p.add_argument('out_obj', type=str, help='Output file or S3 object name'),
'key': lambda p: p.add_argument('key', type=str,
help='Key in hex format or filename of the keyfile'),
'keyfile': lambda p: p.add_argument('-k', '--keyfile', type=str,
help='Key in hex format or filename of the keyfile'),
'partsize': lambda p: p.add_argument('-p', '--partsize', type=int,
default=8, help='Multipart upload partsize (default 8 matching boto3)'),
'iterations': lambda p: p.add_argument('-i', '--iterations', type=str,
default='1M', help='PBKDF2 iterations (default 1M)'),
'dbfile': lambda p: p.add_argument('dbfile', type=str,
help='Database file (JSON format)'),
'logfile': lambda p: p.add_argument('logfile', type=str,
help='Logfile to append all operations to'),
'source': lambda p: p.add_argument('source', type=str,
help='Source directory'),
'destination': lambda p: p.add_argument('destination', type=str,
help='Destination directory'),
'dir': lambda p: p.add_argument('dir', nargs='?', type=str, default=None,
help='Directory to scan'),
'verbose': lambda p: p.add_argument('-v', '--verbose', action='count',
default=0, help='Print verbose status. Repeat for even more.'),
'force': lambda p: p.add_argument('-f', '--force', action='store_true',
help='Force action without additional prompts'),
}
logfiles = []
def close_logs():
while logfiles: logfiles.pop().close()
# Function per command
def keygen(args):
"""Create a 32 byte key for AES256 encryption with a password and salt."""
if not args.password:
if args.verbose: print('No password specified, generating random key')
print(os.urandom(32).hex())
return
if not args.salt:
print('Specify password AND salt or neither!')
return
iterations = int(args.iterations.replace('M', '000k').replace('k', '000'))
start = time.time()
keyhex = kg(args.password, args.salt, iterations).hex()
print(keyhex)
if args.verbose: print('Generating that took %.3f seconds' % (time.time()-start))
keygen.args = 'password salt iterations verbose'.split()
def cryptfile(infile, outfile, verbose=False):
startTime, bs = time.time(), 0
while True:
data = infile.read(65536)
if not data: break
outfile.write(data)
bs += len(data)
secs = time.time() - startTime
if verbose: print('%d b in %.1f s, %.2f GiB/s' % (bs, secs, bs/2**30/secs))
def encrypt(args):
if not args.force and os.path.exists(args.output) and not 'y' in \
input('Output exists! Do you wish to overwrite? [y/n] '): return
with AESFile(args.input, 'rb', key_or_file(args.key)) as fin:
with open(args.output, 'wb') as fout:
cryptfile(fin, fout, verbose=args.verbose)
encrypt.args = 'input output key verbose force'.split()
def decrypt(args):
if not args.force and os.path.exists(args.output) and not 'y' in \
input('Output exists! Do you wish to overwrite? [y/n] '): return
with open(args.input, 'rb') as fin:
with AESFile(args.output, 'wb', key_or_file(args.key)) as fout:
cryptfile(fin, fout, verbose=args.verbose)
decrypt.args = 'input output key verbose force'.split()
def etag(args):
with open(args.input, 'rb') as f: print(calc_etag(f, args.partsize))
etag.args = 'input partsize'.split()
def upload(args):
bucket, objpath = args.s3path
s3 = boto3.client('s3')
if args.keyfile: fp = AESFile(args.input, 'rb', key_or_file(args.keyfile))
else: fp = open(args.input, 'rb')
if args.verbose: print('Upload', args.input, 'to', bucket, objpath)
extra = {'Callback': BotoProgress('upload')}
if args.deep_archive: extra['ExtraArgs'] = {'StorageClass': 'DEEP_ARCHIVE'}
s3.upload_fileobj(fp, bucket, objpath, **extra)
fp.close()
upload.args = 'input s3path keyfile deep_archive verbose'.split()
def download(args):
bucket, objpath = args.s3path
s3 = boto3.client('s3')
if args.keyfile: fp = AESFile(args.output, 'wb', key_or_file(args.keyfile))
else: fp = open(args.output, 'wb')
if args.verbose: print('Download', bucket, objpath, 'to', args.output)
s3.download_fileobj(bucket, objpath, fp, Callback=BotoProgress('download'))
fp.close()
download.args = 's3path output keyfile verbose'.split()
def backup(args):
"""Perform backup based on latest Fileson DB state."""
fs = Fileson.load_or_scan(args.dbfile, checksum='sha1')
if fs.get(':checksum:', None) != 'sha1':
print('Backup only works with full SHA1 hash. Safety first.')
return
log = Fileson.load(args.logfile)
log.startLogging(args.logfile)
log[':backup:'] = log.get(':backup:', 0) + 1
log[':dbfile:'] = args.dbfile
log[':date_gmt:'] = gmt_str()
log[':destination:'] = args.destination
if args.keyfile:
key = key_or_file(args.keyfile)
log[':keyhash:'] = sha1(key).hex()
m = re.match('s3://(\w+)/(.+)', args.destination)
if m:
bucket, folder = m.group(1), m.group(2)
myargs = namedtuple('myargs', 'input s3path keyfile deep_archive verbose')
make_backup = lambda a,b: upload(myargs(a, (bucket, folder+'/'+b),
key if args.keyfile else None, args.deep_archive, True))
else:
if args.keyfile:
myargs = namedtuple('myargs', 'input output key verbose force')
make_backup = lambda a,b: encrypt(myargs(a,
os.path.join(args.destination, b), key, False, True))
else: make_backup = lambda a,b: shutil.copyfile(a,
os.path.join(args.destination, b))
uploaded = { log[p]['sha1']: p for p in log.files() }
seed = log[':date_gmt:'] # for backup filename generation
for p in fs.files():
o = fs[p]
if o['sha1'] in uploaded:
if args.verbose: print('Already uploaded', p)
continue
name = sha1(seed+o['sha1']).hex() # deterministic random name
print('Backup', p.split(os.sep)[-1], o['sha1'], 'to', name)
make_backup(os.path.join(fs[':directory:'], p), name)
log[name] = { 'sha1': o['sha1'], 'size': o['size'] }
log.endLogging()
backup.args = 'dbfile logfile destination keyfile deep_archive verbose'.split() # args to add
def restore(args):
"""Restore backup based on Fileson DB and backup log."""
fs = Fileson.load(args.dbfile)
if fs.get(':checksum:', None) != 'sha1':
print('Cannot restore without SHA1 hash.')
return
log = Fileson.load(args.logfile)
if args.keyfile:
key = key_or_file(args.keyfile)
myargs = namedtuple('myargs', 'input output key verbose force')
make_restore = lambda a,b: decrypt(myargs(a, b, key, False, True))
keyhash = sha1(key).hex()
if keyhash != log[':keyhash:']:
print(f'Provided key hash {keyhash} does not match backup file!')
return
else: make_restore = lambda a,b: shutil.copyfile(a, b)
uploaded = { log[p]['sha1']: p for p in log.files() }
for p in sorted(fs.dirs()):
fp = args.destination
if p != '.': fp = os.path.join(fp, p)
print('mkdir', fp)
os.makedirs(fp, exist_ok=True)
mtime = gmt_epoch(fs[p]['modified_gmt'])
os.utime(fp, (mtime, mtime))
for p in sorted(fs.files()):
b = uploaded.get(fs[p]['sha1'], None)
if not b:
print('Missing', p, fs[p])
continue
fp = os.path.join(args.destination, p)
bp = os.path.join(args.source, b)
print('get', fp, 'from', bp)
make_restore(bp, fp)
mtime = gmt_epoch(fs[p]['modified_gmt'])
os.utime(fp, (mtime, mtime))
restore.args = 'dbfile logfile source destination keyfile verbose'.split() # args to add
if __name__ == "__main__":
# register signal handler to close any open log files
signal.signal(signal.SIGINT, close_logs)
# create the top-level parser
parser = argparse.ArgumentParser(description='Fileson backup utilities')
subparsers = parser.add_subparsers(help='sub-command help')
# add commands using function metadata and properties
for name,cmd in inspect.getmembers(sys.modules[__name__]):
if inspect.isfunction(cmd) and hasattr(cmd, 'args'):
cmd.parser = subparsers.add_parser(cmd.__name__, description=cmd.__doc__)
for argname in cmd.args: arg_adders[argname](cmd.parser)
cmd.parser.set_defaults(func=cmd)
# parse the args and call whatever function was selected
args = parser.parse_args()
if len(sys.argv)==1: parser.print_help(sys.stderr)
else: args.func(args)
| 42.581028
| 101
| 0.640954
|
41114e650e03fde2ce23ce7e45dd55848131257a
| 199
|
py
|
Python
|
services/payment-service/payments/handlers.py
|
ptanlam/eShop
|
28391c32e0d6a888bd34840b9223fc8f82fb2495
|
[
"MIT"
] | 1
|
2022-01-11T05:49:01.000Z
|
2022-01-11T05:49:01.000Z
|
services/payment-service/payments/handlers.py
|
NguyenVinhPhuoc/eshop
|
da9ec6f8bea4b1bce66dd09ce0134272203c3e0f
|
[
"MIT"
] | 1
|
2022-01-02T08:18:47.000Z
|
2022-01-02T08:18:47.000Z
|
services/payment-service/payments/handlers.py
|
NguyenVinhPhuoc/eshop
|
da9ec6f8bea4b1bce66dd09ce0134272203c3e0f
|
[
"MIT"
] | 2
|
2022-01-11T05:48:57.000Z
|
2022-01-20T02:46:36.000Z
|
from .services import PaymentsService
import payments_pb2_grpc
def grpc_handler(server):
payments_pb2_grpc.add_PaymentsServiceServicer_to_server(
PaymentsService.as_servicer(), server)
| 24.875
| 60
| 0.819095
|
dd8f3987fa32f9d9734c87e5a60de60e84f93569
| 67
|
py
|
Python
|
run.py
|
andrefaranha/flask-blog
|
a491386bddc5b6d3cd4706347b497945d711a3ea
|
[
"MIT"
] | null | null | null |
run.py
|
andrefaranha/flask-blog
|
a491386bddc5b6d3cd4706347b497945d711a3ea
|
[
"MIT"
] | 2
|
2021-04-06T18:09:10.000Z
|
2021-06-02T02:41:15.000Z
|
wsgi.py
|
sh4rpy/wllpprs
|
678a004bcdfdc6c512f1fc9894172a49befb7e53
|
[
"BSD-3-Clause"
] | null | null | null |
from app.app import app
if __name__ == '__main__':
app.run()
| 11.166667
| 26
| 0.641791
|
ee2f8066cf7a484895d4e875195dbf03c0fb3c5c
| 1,152
|
py
|
Python
|
app/models/forms.py
|
rodrigolins92/Flask
|
253f2f3162184c2a3421903e87a71f3997e5e3dd
|
[
"Apache-2.0"
] | null | null | null |
app/models/forms.py
|
rodrigolins92/Flask
|
253f2f3162184c2a3421903e87a71f3997e5e3dd
|
[
"Apache-2.0"
] | 1
|
2019-09-29T16:09:39.000Z
|
2019-09-29T16:09:39.000Z
|
app/models/forms.py
|
rodrigolins92/Flask
|
253f2f3162184c2a3421903e87a71f3997e5e3dd
|
[
"Apache-2.0"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import *
from wtforms.validators import DataRequired
from datetime import date, datetime
class LoginForm(FlaskForm):
username = StringField("username", validators=[DataRequired()])
password = PasswordField("password", validators=[DataRequired()])
remember_me = BooleanField("remember_me")
class PedidoForm(FlaskForm):
servico = SelectField("servico", choices=[('-- Selecione --', '-- Selecione --'),
('Cópia preto e branco', 'Cópia preto e branco'),
('Cópia colorida', 'Cópia colorida'),
('Encadernação', 'Encadernação'),
('Plastificação', 'Plastificação'),
('Plotagem', 'Plotagem'),
('Outros', 'Outros')])
observacao = TextAreaField("observacao")
data_pedido = DateField(default=date.today())
quantidade = IntegerField()
preco = DecimalField(places=2)
status_conclusao = BooleanField()
class EstoqueForm(FlaskForm):
nome_item = StringField("nome_item", validators=[DataRequired()])
quantidade_estoque = IntegerField()
quantidade_minimo = IntegerField()
data_atualizacao = DateTimeField(default=datetime.now())
| 34.909091
| 82
| 0.701389
|
a46cbdce0065757d81e94584eef6531401ece5d5
| 1,467
|
py
|
Python
|
From Another World/ship.py
|
Grantlee11/From_Another_World_Pygame
|
1aa98162a458a1a4aacfbc9170eaa233db055e9e
|
[
"CC-BY-3.0"
] | null | null | null |
From Another World/ship.py
|
Grantlee11/From_Another_World_Pygame
|
1aa98162a458a1a4aacfbc9170eaa233db055e9e
|
[
"CC-BY-3.0"
] | null | null | null |
From Another World/ship.py
|
Grantlee11/From_Another_World_Pygame
|
1aa98162a458a1a4aacfbc9170eaa233db055e9e
|
[
"CC-BY-3.0"
] | null | null | null |
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, ai_settings, screen):
"""INITIALIZE THE SHIP AND SET ITS STARTING POSITION"""
super(Ship, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# LOAD THE SHIP IMAGE AND GET ITS RECT
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# START EACH NEW SHIP AT THE BOTTOM CENTER OF THE SCREEN
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
self.center = float(self.rect.centerx)
# MOVEMENT FLAGS
self.moving_right = False
self.moving_left = False
def update(self):
"""UPDATE THE SHIP'S POSITION BASED ON THE MOVEMENT FLAGS"""
# UPDATE THE SHIP'S CENTER VALUE, NOT THE RECT
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.ai_settings.ship_speed_factor
self.rect.centerx = self.center
def blitme(self):
"""DRAW THE SHIP AT ITS CURRENT LOCATION"""
self.screen.blit(self.image, self.rect)
def center_ship(self):
"""CENTER THE SHIP ON THE SCREEN"""
self.center = self.screen_rect.centerx
| 34.116279
| 74
| 0.64758
|
326ee8361f04ac1c435305aa39fbcd72a5c336b9
| 2,215
|
py
|
Python
|
script_example.py
|
EtalumaSupport/LumaViewPro
|
ab9678c04fc561e6fce8b774c5d87cc91d6f3e07
|
[
"MIT"
] | null | null | null |
script_example.py
|
EtalumaSupport/LumaViewPro
|
ab9678c04fc561e6fce8b774c5d87cc91d6f3e07
|
[
"MIT"
] | 59
|
2021-03-26T19:22:59.000Z
|
2021-12-04T00:42:12.000Z
|
script_example.py
|
EtalumaSupport/LumaViewPro
|
ab9678c04fc561e6fce8b774c5d87cc91d6f3e07
|
[
"MIT"
] | null | null | null |
'''
To run, you will need to install the following packages:
numpy
pyserial
pypylon
time
You will also need to install the camera driver from Basler
'''
# Additional LumaViewPro files
from trinamic import *
from ledboard import *
from pyloncamera import *
import time
from PIL import Image
led = LEDBoard()
xyz = TrinamicBoard()
cam = PylonCamera()
'''
# ----------------------------------------------------
# Controlling an LED
# ----------------------------------------------------
led.led_on(0, 50) # turn on LED at channel 0 at 50mA
time.sleep(1) # wait one second
led.led_off() # turn off all LEDs
# ----------------------------------------------------
# Controlling focus and XY stage
# ----------------------------------------------------
xyz.zhome() # home position (retracted) objective
xyz.xyhome() # home position of xy stage
xyz.move_abs_pos('X', 5800) # move to absolute position in um
xyz.move_abs_pos('Y', 3500) # move to absolute position in um
xyz.move_abs_pos('Z', 3270) # move to absolute position in um
# ----------------------------------------------------
# Controlling the Camera
# ----------------------------------------------------
if cam.active:
cam.frame_size(1900,1900)
cam.grab()
img = Image.fromarray(cam.array)
img.show()
'''
# ----------------------------------------------------
# Example
# ----------------------------------------------------
xyz.move_abs_pos('Z', 3270) # move to absolute position in um
time.sleep(2) # wait 1 sec
if cam.active:
cam.frame_size(1900,1900)
led.led_on(0, 50) # turn on LED at channel 0 at 50mA
time.sleep(1) # wait 1 sec
cam.grab()
img = Image.fromarray(cam.array)
img.show()
led.led_on(1, 100) # turn on LED at channel 0 at 50mA
time.sleep(1) # wait 1 sec
cam.grab()
img = Image.fromarray(cam.array)
img.show()
led.led_on(2, 150) # turn on LED at channel 0 at 50mA
time.sleep(1) # wait 1 sec
cam.grab()
img = Image.fromarray(cam.array)
img.show()
led.led_off() # turn off all LEDs
| 28.037975
| 65
| 0.49842
|
c38d1a46ba8a61295db467cd650167044d1e3b23
| 27,706
|
py
|
Python
|
pyop4/opbase/opConfigParser.py
|
cindy0123/duty-util01
|
c582a7c5dfc6585e421b1b72b2876741b0839c95
|
[
"MIT"
] | null | null | null |
pyop4/opbase/opConfigParser.py
|
cindy0123/duty-util01
|
c582a7c5dfc6585e421b1b72b2876741b0839c95
|
[
"MIT"
] | null | null | null |
pyop4/opbase/opConfigParser.py
|
cindy0123/duty-util01
|
c582a7c5dfc6585e421b1b72b2876741b0839c95
|
[
"MIT"
] | null | null | null |
"""Configuration file parser.
A setup file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
The option values can contain format strings which refer to other values in
the same section, or values in a special [DEFAULT] section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None)
create the parser and specify a dictionary of intrinsic defaults. The
keys must be strings, the values must be appropriate for %()s string
interpolation. Note that `__name__' is always an intrinsic default;
its value is the section's name.
sections()
return all the configuration section names, sans DEFAULT
has_section(section)
return whether the given section exists
has_option(section, option)
return whether the given option exists in the given section
options(section)
return list of configuration options for the named section
read(filenames)
read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
readfp(fp, filename=None)
read and parse one configuration file, given as a file object.
The filename defaults to fp.name; it is only used in error
messages (if fp has no `name' attribute, the string `<???>' is used).
get(section, option, raw=False, vars=None)
return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults.
getint(section, options)
like get(), but convert value to an integer
getfloat(section, options)
like get(), but convert value to a float
getboolean(section, options)
like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section, raw=False, vars=None)
return a list of tuples with (name, value) for each option
in the section.
remove_section(section)
remove the given file section and all its options
remove_option(section, option)
remove the given option from the given section
set(section, option, value)
set the given option
write(fp)
write the configuration state in .ini format
"""
try:
from collections import OrderedDict as _default_dict
except ImportError:
# fallback for setup.py which hasn't yet built _collections
_default_dict = dict
import re
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
"InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation in
BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation in
BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6. To prevent
# DeprecationWarning from popping up over this pre-existing attribute, use
# a new property that takes lookup precedence.
message = property(_get_message, _set_message)
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is multiply-created."""
def __init__(self, section):
Error.__init__(self, "Section %r already exists" % section)
self.section = section
self.args = (section, )
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text into which substitutions are made
does not conform to the required syntax."""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
self.args = (filename, )
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
class RawConfigParser:
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
if allow_no_value:
self._optcre = self.OPTCRE_NV
else:
self._optcre = self.OPTCRE
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return self._sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT or any of it's
case-insensitive variants.
"""
if section == "default":
raise ValueError, 'Invalid section name: %s' % section
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
second argument is the `filename', which if not given, is
taken from fp.name. If fp has no `name' attribute, `<???>' is
used.
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self._read(fp, filename)
def get(self, section, option):
opt = self.optionxform(option)
if section not in self._sections:
if section != DEFAULTSECT:
raise NoSectionError(section)
if opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
elif opt in self._sections[section]:
return self._sections[section][opt]
elif opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
def items(self, section):
try:
d2 = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
d2 = self._dict()
d = self._defaults.copy()
d.update(d2)
if "__name__" in d:
del d["__name__"]
return d.items()
def _get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self._get(section, int, option)
def getfloat(self, section, option):
return self._get(section, float, option)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def getboolean(self, section, option):
v = self.get(section, option)
if v not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % v
return self._boolean_states[v]
def optionxform(self, optionstr):
return optionstr
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
if not section or section == DEFAULTSECT:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key == "__name__":
continue
if (value is not None) or (self._optcre == self.OPTCRE):
key = " = ".join((key, str(value).replace('\n', '\n\t')))
fp.write("%s\n" % (key))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
return existed
#
# Regular expressions for parsing section headers and options.
#
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
OPTCRE_NV = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?:' # any number of space/tab,
r'(?P<vi>[:=])\s*' # optionally followed by
# separator (either : or
# =), followed by any #
# space/tab
r'(?P<value>.*))?$' # everything up to eol
)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0] == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname].append(value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
optname = self.optionxform(optname.rstrip())
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
# join the multi-line values collected while reading
all_sections = [self._defaults]
all_sections.extend(self._sections.values())
for options in all_sections:
for name, val in options.items():
if isinstance(val, list):
options[name] = '\n'.join(val)
import UserDict as _UserDict
class _Chainmap(_UserDict.DictMixin):
"""Combine multiple mappings for successive lookups.
For example, to emulate Python's normal lookup sequence:
import __builtin__
pylookup = _Chainmap(locals(), globals(), vars(__builtin__))
"""
def __init__(self, *maps):
self._maps = maps
def __getitem__(self, key):
for mapping in self._maps:
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def keys(self):
result = []
seen = set()
for mapping in self._maps:
for key in mapping:
if key not in seen:
result.append(key)
seen.add(key)
return result
class ConfigParser(RawConfigParser):
def get(self, section, option, raw=False, vars=None):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `defaults' in that order.
All % interpolations are expanded in the return values, unless the
optional argument `raw' is true. Values for interpolation keys are
looked up in the same manner as the option.
The section DEFAULT is special.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
vardict[self.optionxform(key)] = value
d = _Chainmap(vardict, sectiondict, self._defaults)
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
raise NoOptionError(option, section)
if raw or value is None:
return value
else:
return self._interpolate(section, option, value, d)
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options]
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError, e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % self.optionxform(s)
class SafeConfigParser(ConfigParser):
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
L = []
self._interpolate_some(option, L, rawval, section, vars, 1)
return ''.join(L)
_interpvar_re = re.compile(r"%\(([^)]+)\)s")
def _interpolate_some(self, option, accum, rest, section, map, depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._interpvar_re.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = self.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
def set(self, section, option, value=None):
"""Set an option. Extend ConfigParser.set: check for string values."""
# The only legal non-string value if we allow valueless
# options is None, so we need to check if the value is a
# string if:
# - we do not allow valueless options, or
# - we allow valueless options but the value is not None
if self._optcre is self.OPTCRE or value:
if not isinstance(value, basestring):
raise TypeError("option values must be strings")
if value is not None:
# check for bad percent signs:
# first, replace all "good" interpolations
tmp_value = value.replace('%%', '')
tmp_value = self._interpvar_re.sub('', tmp_value)
# then, check if there's a lone percent sign left
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
ConfigParser.set(self, section, option, value)
| 36.745358
| 80
| 0.556053
|
812cfbf8cb08759f0cad16b08a973d2f0a749115
| 1,230
|
py
|
Python
|
linear_model_src/train.py
|
nzpznk/futures_master
|
c09d27032a24bd7fa145b4dde8b3119ab2e8a4bc
|
[
"MIT"
] | null | null | null |
linear_model_src/train.py
|
nzpznk/futures_master
|
c09d27032a24bd7fa145b4dde8b3119ab2e8a4bc
|
[
"MIT"
] | null | null | null |
linear_model_src/train.py
|
nzpznk/futures_master
|
c09d27032a24bd7fa145b4dde8b3119ab2e8a4bc
|
[
"MIT"
] | null | null | null |
from sklearn import linear_model
from label_data import get_labeled_data
from config import __models_dir__
from config import contract_list
from config import train_sample
from numpy import random
import pickle
def train(class_name, sample, label):
model_path = __models_dir__ + class_name
classifier = linear_model.SGDClassifier(max_iter=10000,tol=0.01,warm_start='true', verbose=3)
classifier.fit(sample, label)
with open(model_path, 'wb') as mdf:
pickle.dump(classifier, mdf)
labeled_sample = get_labeled_data(train_sample)
train_sample = {}
train_label = {}
print('loaded')
for i in contract_list:
train_sample[i] = []
train_label[i] = []
c1 = labeled_sample[1][i].count(1)
sum = len(labeled_sample[1][i])
for j in range(len(labeled_sample[0][i])):
if(labeled_sample[1][i][j] == 1):
if(random.random() < 1.5*(sum-c1)/(2*sum)):
train_sample[i].append(labeled_sample[0][i][j])
train_label[i].append(labeled_sample[1][i][j])
else:
train_sample[i].append(labeled_sample[0][i][j])
train_label[i].append(labeled_sample[1][i][j])
print('training : ', i)
train(i, train_sample[i], train_label[i])
| 36.176471
| 97
| 0.673171
|
90949d2c0a1002786edb2d7afe22e69208732fc3
| 2,943
|
py
|
Python
|
setup.py
|
eldorplus/importlib
|
48047b7de74c0e75fecbc0b846864e523e57ecc6
|
[
"PSF-2.0",
"BSD-2-Clause"
] | null | null | null |
setup.py
|
eldorplus/importlib
|
48047b7de74c0e75fecbc0b846864e523e57ecc6
|
[
"PSF-2.0",
"BSD-2-Clause"
] | null | null | null |
setup.py
|
eldorplus/importlib
|
48047b7de74c0e75fecbc0b846864e523e57ecc6
|
[
"PSF-2.0",
"BSD-2-Clause"
] | null | null | null |
from distutils.core import setup
import os.path
import _util
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
RELEASING = os.path.exists(os.path.join(PROJECT_ROOT, "MANIFEST.in"))
if RELEASING:
_util.verify_release_branch()
vers = _util.load_version()
#################################################
# Define the package metadata.
NAME = 'importlib2' # both for the package and the distribution
VERSION = vers.VERSION # No need to use importlib2.__version__.
AUTHOR = 'Eric Snow'
EMAIL = 'ericsnowcurrently@gmail.com'
URL = 'https://bitbucket.org/ericsnowcurrently/importlib2/'
LICENSE = 'New BSD License'
SUMMARY = 'A backport of the Python 3 importlib package.'
# DESCRIPTION is dynamically built below.
KEYWORDS = ''
PLATFORMS = []
CLASSIFIERS = [
#'Development Status :: 1 - Planning',
#'Development Status :: 2 - Pre-Alpha',
#'Development Status :: 3 - Alpha',
'Development Status :: 4 - Beta',
#'Development Status :: 5 - Production/Stable',
#'Development Status :: 6 - Mature',
#'Development Status :: 7 - Inactive',
'Intended Audience :: Developers',
#'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
#'Programming Language :: Python :: 3.5',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
]
with open(os.path.join(PROJECT_ROOT, 'README.rst')) as readme_file:
DESCRIPTION = readme_file.read()
#################################################
# Set up packages.
PACKAGES = ['importlib2', 'importlib2._fixers', 'importlib2._version']
_verfiles = [os.path.basename(filename)
for filename in (vers.PY_REVISION_FILE,
vers.PY_VERSION_FILE,
vers.RELEASE_FILE)]
PACKAGE_DATA = {'importlib2._version': _verfiles}
#################################################
# Pull it all together.
kwargs = {'name': NAME,
'version': VERSION,
'author': AUTHOR,
'author_email': EMAIL,
#'maintainer': MAINTAINER,
#'maintainer_email': MAINTAINER_EMAIL,
'url': URL,
#'download_url': DOWNLOAD,
'license': LICENSE,
'description': SUMMARY,
'long_description': DESCRIPTION,
'keywords': KEYWORDS,
'platforms': PLATFORMS,
'classifiers': CLASSIFIERS,
'packages': PACKAGES,
'package_data': PACKAGE_DATA,
}
for key in list(kwargs):
if not kwargs[key]:
del kwargs[key]
if __name__ == '__main__':
setup(**kwargs)
| 30.340206
| 70
| 0.583418
|
d0e8797401ab6475c35ffa6834e0876d04330068
| 33,582
|
py
|
Python
|
superset/security/manager.py
|
tiagosousac/incubator-superset
|
93e75584e68fcaf158471115a1741c6451c3f962
|
[
"Apache-2.0"
] | 1
|
2021-08-18T19:26:04.000Z
|
2021-08-18T19:26:04.000Z
|
superset/security/manager.py
|
tiagosousac/incubator-superset
|
93e75584e68fcaf158471115a1741c6451c3f962
|
[
"Apache-2.0"
] | null | null | null |
superset/security/manager.py
|
tiagosousac/incubator-superset
|
93e75584e68fcaf158471115a1741c6451c3f962
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""A set of constants and methods to manage permissions and security"""
import logging
from typing import Any, Callable, List, Optional, Set, Tuple, TYPE_CHECKING, Union
from flask import current_app, g
from flask_appbuilder import Model
from flask_appbuilder.security.sqla import models as ab_models
from flask_appbuilder.security.sqla.manager import SecurityManager
from flask_appbuilder.security.sqla.models import (
assoc_permissionview_role,
assoc_user_role,
)
from flask_appbuilder.security.views import (
PermissionModelView,
PermissionViewModelView,
RoleModelView,
UserModelView,
ViewMenuModelView,
)
from flask_appbuilder.widgets import ListWidget
from sqlalchemy import or_
from sqlalchemy.engine.base import Connection
from sqlalchemy.orm.mapper import Mapper
from superset import sql_parse
from superset.connectors.connector_registry import ConnectorRegistry
from superset.constants import RouteMethod
from superset.exceptions import SupersetSecurityException
from superset.utils.core import DatasourceName
if TYPE_CHECKING:
from superset.common.query_context import QueryContext
from superset.connectors.base.models import BaseDatasource
from superset.models.core import Database
from superset.viz import BaseViz
logger = logging.getLogger(__name__)
class SupersetSecurityListWidget(ListWidget):
"""
Redeclaring to avoid circular imports
"""
template = "superset/fab_overrides/list.html"
class SupersetRoleListWidget(ListWidget):
"""
Role model view from FAB already uses a custom list widget override
So we override the override
"""
template = "superset/fab_overrides/list_role.html"
def __init__(self, **kwargs):
kwargs["appbuilder"] = current_app.appbuilder
super().__init__(**kwargs)
UserModelView.list_widget = SupersetSecurityListWidget
RoleModelView.list_widget = SupersetRoleListWidget
PermissionViewModelView.list_widget = SupersetSecurityListWidget
PermissionModelView.list_widget = SupersetSecurityListWidget
# Limiting routes on FAB model views
UserModelView.include_route_methods = RouteMethod.CRUD_SET | {
RouteMethod.ACTION,
RouteMethod.API_READ,
RouteMethod.ACTION_POST,
"userinfo",
}
RoleModelView.include_route_methods = RouteMethod.CRUD_SET
PermissionViewModelView.include_route_methods = {RouteMethod.LIST}
PermissionModelView.include_route_methods = {RouteMethod.LIST}
ViewMenuModelView.include_route_methods = {RouteMethod.LIST}
class SupersetSecurityManager(SecurityManager):
userstatschartview = None
READ_ONLY_MODEL_VIEWS = {"DatabaseAsync", "DatabaseView", "DruidClusterModelView"}
USER_MODEL_VIEWS = {
"UserDBModelView",
"UserLDAPModelView",
"UserOAuthModelView",
"UserOIDModelView",
"UserRemoteUserModelView",
}
GAMMA_READ_ONLY_MODEL_VIEWS = {
"SqlMetricInlineView",
"TableColumnInlineView",
"TableModelView",
"DruidColumnInlineView",
"DruidDatasourceModelView",
"DruidMetricInlineView",
"Datasource",
} | READ_ONLY_MODEL_VIEWS
ADMIN_ONLY_VIEW_MENUS = {
"AccessRequestsModelView",
"Manage",
"SQL Lab",
"Queries",
"Refresh Druid Metadata",
"ResetPasswordView",
"RoleModelView",
"LogModelView",
"Security",
"RowLevelSecurityFiltersModelView",
} | USER_MODEL_VIEWS
ALPHA_ONLY_VIEW_MENUS = {"Upload a CSV"}
ADMIN_ONLY_PERMISSIONS = {
"can_sql_json", # TODO: move can_sql_json to sql_lab role
"can_override_role_permissions",
"can_sync_druid_source",
"can_override_role_permissions",
"can_approve",
"can_update_role",
"all_query_access",
}
READ_ONLY_PERMISSION = {"can_show", "can_list", "can_get", "can_external_metadata"}
ALPHA_ONLY_PERMISSIONS = {
"muldelete",
"all_database_access",
"all_datasource_access",
}
OBJECT_SPEC_PERMISSIONS = {
"database_access",
"schema_access",
"datasource_access",
"metric_access",
}
ACCESSIBLE_PERMS = {"can_userinfo"}
def get_schema_perm(
self, database: Union["Database", str], schema: Optional[str] = None
) -> Optional[str]:
"""
Return the database specific schema permission.
:param database: The Superset database or database name
:param schema: The Superset schema name
:return: The database specific schema permission
"""
if schema:
return f"[{database}].[{schema}]"
return None
def unpack_schema_perm(self, schema_permission: str) -> Tuple[str, str]:
# [database_name].[schema_name]
schema_name = schema_permission.split(".")[1][1:-1]
database_name = schema_permission.split(".")[0][1:-1]
return database_name, schema_name
def can_access(self, permission_name: str, view_name: str) -> bool:
"""
Return True if the user can access the FAB permission/view, False
otherwise.
Note this method adds protection from has_access failing from missing
permission/view entries.
:param permission_name: The FAB permission name
:param view_name: The FAB view-menu name
:returns: Whether the use can access the FAB permission/view
"""
user = g.user
if user.is_anonymous:
return self.is_item_public(permission_name, view_name)
return self._has_view_access(user, permission_name, view_name)
def is_anonymous(self):
return g.user.is_anonymous
def can_access_all_queries(self) -> bool:
"""
Return True if the user can access all queries, False otherwise.
:returns: Whether the user can access all queries
"""
return self.can_access("all_query_access", "all_query_access")
def all_datasource_access(self) -> bool:
"""
Return True if the user can access all Superset datasources, False otherwise.
:returns: Whether the user can access all Superset datasources
"""
return self.can_access("all_datasource_access", "all_datasource_access")
def all_database_access(self) -> bool:
"""
Return True if the user can access all Superset databases, False otherwise.
:returns: Whether the user can access all Superset databases
"""
return self.can_access("all_database_access", "all_database_access")
def database_access(self, database: "Database") -> bool:
"""
Return True if the user can access the Superset database, False otherwise.
:param database: The Superset database
:returns: Whether the user can access the Superset database
"""
return (
self.all_datasource_access()
or self.all_database_access()
or self.can_access("database_access", database.perm)
)
def schema_access(self, datasource: "BaseDatasource") -> bool:
"""
Return True if the user can access the schema associated with the Superset
datasource, False otherwise.
Note for Druid datasources the database and schema are akin to the Druid cluster
and datasource name prefix, i.e., [schema.]datasource, respectively.
:param datasource: The Superset datasource
:returns: Whether the user can access the datasource's schema
"""
return (
self.all_datasource_access()
or self.database_access(datasource.database)
or self.can_access("schema_access", datasource.schema_perm)
)
def datasource_access(self, datasource: "BaseDatasource") -> bool:
"""
Return True if the user can access the Superset datasource, False otherwise.
:param datasource: The Superset datasource
:returns: Whether the use can access the Superset datasource
"""
return self.schema_access(datasource) or self.can_access(
"datasource_access", datasource.perm
)
def get_datasource_access_error_msg(self, datasource: "BaseDatasource") -> str:
"""
Return the error message for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The error message
"""
return f"""This endpoint requires the datasource {datasource.name}, database or
`all_datasource_access` permission"""
def get_datasource_access_link(self, datasource: "BaseDatasource") -> Optional[str]:
"""
Return the link for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The access URL
"""
from superset import conf
return conf.get("PERMISSION_INSTRUCTIONS_LINK")
def get_table_access_error_msg(self, tables: List[str]) -> str:
"""
Return the error message for the denied SQL tables.
Note the table names conform to the [[cluster.]schema.]table construct.
:param tables: The list of denied SQL table names
:returns: The error message
"""
quoted_tables = [f"`{t}`" for t in tables]
return f"""You need access to the following tables: {", ".join(quoted_tables)},
`all_database_access` or `all_datasource_access` permission"""
def get_table_access_link(self, tables: List[str]) -> Optional[str]:
"""
Return the access link for the denied SQL tables.
Note the table names conform to the [[cluster.]schema.]table construct.
:param tables: The list of denied SQL table names
:returns: The access URL
"""
from superset import conf
return conf.get("PERMISSION_INSTRUCTIONS_LINK")
def can_access_datasource(
self, database: "Database", table_name: str, schema: Optional[str] = None
) -> bool:
return self._datasource_access_by_name(database, table_name, schema=schema)
def _datasource_access_by_name(
self, database: "Database", table_name: str, schema: Optional[str] = None
) -> bool:
"""
Return True if the user can access the SQL table, False otherwise.
:param database: The SQL database
:param table_name: The SQL table name
:param schema: The Superset schema
:returns: Whether the use can access the SQL table
"""
from superset import db
if self.database_access(database) or self.all_datasource_access():
return True
schema_perm = self.get_schema_perm(database, schema)
if schema_perm and self.can_access("schema_access", schema_perm):
return True
datasources = ConnectorRegistry.query_datasources_by_name(
db.session, database, table_name, schema=schema
)
for datasource in datasources:
if self.can_access("datasource_access", datasource.perm):
return True
return False
def _get_schema_and_table(
self, table_in_query: str, schema: str
) -> Tuple[str, str]:
"""
Return the SQL schema/table tuple associated with the table extracted from the
SQL query.
Note the table name conforms to the [[cluster.]schema.]table construct.
:param table_in_query: The SQL table name
:param schema: The fallback SQL schema if not present in the table name
:returns: The SQL schema/table tuple
"""
table_name_pieces = table_in_query.split(".")
if len(table_name_pieces) == 3:
return tuple(table_name_pieces[1:]) # type: ignore
elif len(table_name_pieces) == 2:
return tuple(table_name_pieces) # type: ignore
return (schema, table_name_pieces[0])
def _datasource_access_by_fullname(
self, database: "Database", table_in_query: str, schema: str
) -> bool:
"""
Return True if the user can access the table extracted from the SQL query, False
otherwise.
Note the table name conforms to the [[cluster.]schema.]table construct.
:param database: The Superset database
:param table_in_query: The SQL table name
:param schema: The fallback SQL schema, i.e., if not present in the table name
:returns: Whether the user can access the SQL table
"""
table_schema, table_name = self._get_schema_and_table(table_in_query, schema)
return self._datasource_access_by_name(
database, table_name, schema=table_schema
)
def rejected_tables(self, sql: str, database: "Database", schema: str) -> List[str]:
"""
Return the list of rejected SQL table names.
Note the rejected table names conform to the [[cluster.]schema.]table construct.
:param sql: The SQL statement
:param database: The SQL database
:param schema: The SQL database schema
:returns: The rejected table names
"""
superset_query = sql_parse.ParsedQuery(sql)
return [
t
for t in superset_query.tables
if not self._datasource_access_by_fullname(database, t, schema)
]
def get_public_role(self) -> Optional[Any]: # Optional[self.role_model]
from superset import conf
if not conf.get("PUBLIC_ROLE_LIKE_GAMMA", False):
return None
from superset import db
return db.session.query(self.role_model).filter_by(name="Public").first()
def user_view_menu_names(self, permission_name: str) -> Set[str]:
from superset import db
base_query = (
db.session.query(self.viewmenu_model.name)
.join(self.permissionview_model)
.join(self.permission_model)
.join(assoc_permissionview_role)
.join(self.role_model)
)
if not g.user.is_anonymous:
# filter by user id
view_menu_names = (
base_query.join(assoc_user_role)
.join(self.user_model)
.filter(self.user_model.id == g.user.id)
.filter(self.permission_model.name == permission_name)
).all()
return set([s.name for s in view_menu_names])
# Properly treat anonymous user
public_role = self.get_public_role()
if public_role:
# filter by public role
view_menu_names = (
base_query.filter(self.role_model.id == public_role.id).filter(
self.permission_model.name == permission_name
)
).all()
return set([s.name for s in view_menu_names])
return set()
def schemas_accessible_by_user(
self, database: "Database", schemas: List[str], hierarchical: bool = True
) -> List[str]:
"""
Return the sorted list of SQL schemas accessible by the user.
:param database: The SQL database
:param schemas: The list of eligible SQL schemas
:param hierarchical: Whether to check using the hierarchical permission logic
:returns: The list of accessible SQL schemas
"""
from superset import db
from superset.connectors.sqla.models import SqlaTable
if hierarchical and (
self.database_access(database) or self.all_datasource_access()
):
return schemas
# schema_access
accessible_schemas = {
self.unpack_schema_perm(s)[1]
for s in self.user_view_menu_names("schema_access")
if s.startswith(f"[{database}].")
}
# datasource_access
perms = self.user_view_menu_names("datasource_access")
if perms:
tables = (
db.session.query(SqlaTable.schema)
.filter(SqlaTable.database_id == database.id)
.filter(SqlaTable.schema.isnot(None))
.filter(SqlaTable.schema != "")
.filter(or_(SqlaTable.perm.in_(perms)))
.distinct()
)
accessible_schemas.update([t.schema for t in tables])
return [s for s in schemas if s in accessible_schemas]
def get_datasources_accessible_by_user(
self,
database: "Database",
datasource_names: List[DatasourceName],
schema: Optional[str] = None,
) -> List[DatasourceName]:
"""
Return the list of SQL tables accessible by the user.
:param database: The SQL database
:param datasource_names: The list of eligible SQL tables w/ schema
:param schema: The fallback SQL schema if not present in the table name
:returns: The list of accessible SQL tables w/ schema
"""
from superset import db
if self.database_access(database) or self.all_datasource_access():
return datasource_names
if schema:
schema_perm = self.get_schema_perm(database, schema)
if schema_perm and self.can_access("schema_access", schema_perm):
return datasource_names
user_perms = self.user_view_menu_names("datasource_access")
schema_perms = self.user_view_menu_names("schema_access")
user_datasources = ConnectorRegistry.query_datasources_by_permissions(
db.session, database, user_perms, schema_perms
)
if schema:
names = {d.table_name for d in user_datasources if d.schema == schema}
return [d for d in datasource_names if d in names]
else:
full_names = {d.full_name for d in user_datasources}
return [d for d in datasource_names if f"[{database}].[{d}]" in full_names]
def merge_perm(self, permission_name: str, view_menu_name: str) -> None:
"""
Add the FAB permission/view-menu.
:param permission_name: The FAB permission name
:param view_menu_names: The FAB view-menu name
:see: SecurityManager.add_permission_view_menu
"""
logger.warning(
"This method 'merge_perm' is deprecated use add_permission_view_menu"
)
self.add_permission_view_menu(permission_name, view_menu_name)
def _is_user_defined_permission(self, perm: Model) -> bool:
"""
Return True if the FAB permission is user defined, False otherwise.
:param perm: The FAB permission
:returns: Whether the FAB permission is user defined
"""
return perm.permission.name in self.OBJECT_SPEC_PERMISSIONS
def create_custom_permissions(self) -> None:
"""
Create custom FAB permissions.
"""
self.add_permission_view_menu("all_datasource_access", "all_datasource_access")
self.add_permission_view_menu("all_database_access", "all_database_access")
self.add_permission_view_menu("all_query_access", "all_query_access")
def create_missing_perms(self) -> None:
"""
Creates missing FAB permissions for datasources, schemas and metrics.
"""
from superset import db
from superset.connectors.base.models import BaseMetric
from superset.models import core as models
logger.info("Fetching a set of all perms to lookup which ones are missing")
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
def merge_pv(view_menu, perm):
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
self.add_permission_view_menu(view_menu, perm)
logger.info("Creating missing datasource permissions.")
datasources = ConnectorRegistry.get_all_datasources(db.session)
for datasource in datasources:
merge_pv("datasource_access", datasource.get_perm())
merge_pv("schema_access", datasource.get_schema_perm())
logger.info("Creating missing database permissions.")
databases = db.session.query(models.Database).all()
for database in databases:
merge_pv("database_access", database.perm)
logger.info("Creating missing metrics permissions")
metrics: List[BaseMetric] = []
for datasource_class in ConnectorRegistry.sources.values():
metrics += list(db.session.query(datasource_class.metric_class).all())
def clean_perms(self) -> None:
"""
Clean up the FAB faulty permissions.
"""
logger.info("Cleaning faulty perms")
sesh = self.get_session
pvms = sesh.query(ab_models.PermissionView).filter(
or_(
ab_models.PermissionView.permission == None,
ab_models.PermissionView.view_menu == None,
)
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
logger.info("Deleted {} faulty permissions".format(deleted_count))
def sync_role_definitions(self) -> None:
"""
Initialize the Superset application with security roles and such.
"""
from superset import conf
logger.info("Syncing role definition")
self.create_custom_permissions()
# Creating default roles
self.set_role("Admin", self._is_admin_pvm)
self.set_role("Alpha", self._is_alpha_pvm)
self.set_role("Gamma", self._is_gamma_pvm)
self.set_role("granter", self._is_granter_pvm)
self.set_role("sql_lab", self._is_sql_lab_pvm)
if conf.get("PUBLIC_ROLE_LIKE_GAMMA", False):
self.set_role("Public", self._is_gamma_pvm)
self.create_missing_perms()
# commit role and view menu updates
self.get_session.commit()
self.clean_perms()
def set_role(self, role_name: str, pvm_check: Callable) -> None:
"""
Set the FAB permission/views for the role.
:param role_name: The FAB role name
:param pvm_check: The FAB permission/view check
"""
logger.info("Syncing {} perms".format(role_name))
sesh = self.get_session
pvms = sesh.query(ab_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
role = self.add_role(role_name)
role_pvms = [p for p in pvms if pvm_check(p)]
role.permissions = role_pvms
sesh.merge(role)
sesh.commit()
def _is_admin_only(self, pvm: Model) -> bool:
"""
Return True if the FAB permission/view is accessible to only Admin users,
False otherwise.
Note readonly operations on read only model views are allowed only for admins.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is accessible to only Admin users
"""
if (
pvm.view_menu.name in self.READ_ONLY_MODEL_VIEWS
and pvm.permission.name not in self.READ_ONLY_PERMISSION
):
return True
return (
pvm.view_menu.name in self.ADMIN_ONLY_VIEW_MENUS
or pvm.permission.name in self.ADMIN_ONLY_PERMISSIONS
)
def _is_alpha_only(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is accessible to only Alpha users,
False otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is accessible to only Alpha users
"""
if (
pvm.view_menu.name in self.GAMMA_READ_ONLY_MODEL_VIEWS
and pvm.permission.name not in self.READ_ONLY_PERMISSION
):
return True
return (
pvm.view_menu.name in self.ALPHA_ONLY_VIEW_MENUS
or pvm.permission.name in self.ALPHA_ONLY_PERMISSIONS
)
def _is_accessible_to_all(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is accessible to all, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is accessible to all users
"""
return pvm.permission.name in self.ACCESSIBLE_PERMS
def _is_admin_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is Admin user related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is Admin related
"""
return not self._is_user_defined_permission(pvm)
def _is_alpha_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is Alpha user related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is Alpha related
"""
return not (
self._is_user_defined_permission(pvm) or self._is_admin_only(pvm)
) or self._is_accessible_to_all(pvm)
def _is_gamma_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is Gamma user related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is Gamma related
"""
return not (
self._is_user_defined_permission(pvm)
or self._is_admin_only(pvm)
or self._is_alpha_only(pvm)
) or self._is_accessible_to_all(pvm)
def _is_sql_lab_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is SQL Lab related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is SQL Lab related
"""
return (
pvm.view_menu.name
in {"SQL Lab", "SQL Editor", "Query Search", "Saved Queries"}
or pvm.permission.name
in {
"can_sql_json",
"can_csv",
"can_search_queries",
"can_sqllab_viz",
"can_sqllab_table_viz",
"can_sqllab",
}
or (
pvm.view_menu.name in self.USER_MODEL_VIEWS
and pvm.permission.name == "can_list"
)
)
def _is_granter_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the user can grant the FAB permission/view, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the user can grant the FAB permission/view
"""
return pvm.permission.name in {"can_override_role_permissions", "can_approve"}
def set_perm(
self, mapper: Mapper, connection: Connection, target: "BaseDatasource"
) -> None:
"""
Set the datasource permissions.
:param mapper: The table mappper
:param connection: The DB-API connection
:param target: The mapped instance being persisted
"""
link_table = target.__table__ # pylint: disable=no-member
if target.perm != target.get_perm():
connection.execute(
link_table.update()
.where(link_table.c.id == target.id)
.values(perm=target.get_perm())
)
if (
hasattr(target, "schema_perm")
and target.schema_perm != target.get_schema_perm()
):
connection.execute(
link_table.update()
.where(link_table.c.id == target.id)
.values(schema_perm=target.get_schema_perm())
)
pvm_names = []
if target.__tablename__ in {"dbs", "clusters"}:
pvm_names.append(("database_access", target.get_perm()))
else:
pvm_names.append(("datasource_access", target.get_perm()))
if target.schema:
pvm_names.append(("schema_access", target.get_schema_perm()))
# TODO(bogdan): modify slice permissions as well.
for permission_name, view_menu_name in pvm_names:
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
pv = None
if not permission:
permission_table = (
self.permission_model.__table__ # pylint: disable=no-member
)
connection.execute(
permission_table.insert().values(name=permission_name)
)
permission = self.find_permission(permission_name)
if not view_menu:
view_menu_table = (
self.viewmenu_model.__table__ # pylint: disable=no-member
)
connection.execute(view_menu_table.insert().values(name=view_menu_name))
view_menu = self.find_view_menu(view_menu_name)
if permission and view_menu:
pv = (
self.get_session.query(self.permissionview_model)
.filter_by(permission=permission, view_menu=view_menu)
.first()
)
if not pv and permission and view_menu:
permission_view_table = (
self.permissionview_model.__table__ # pylint: disable=no-member
)
connection.execute(
permission_view_table.insert().values(
permission_id=permission.id, view_menu_id=view_menu.id
)
)
def assert_datasource_permission(self, datasource: "BaseDatasource") -> None:
"""
Assert the the user has permission to access the Superset datasource.
:param datasource: The Superset datasource
:raises SupersetSecurityException: If the user does not have permission
"""
if not self.datasource_access(datasource):
raise SupersetSecurityException(
self.get_datasource_access_error_msg(datasource),
self.get_datasource_access_link(datasource),
)
def assert_query_context_permission(self, query_context: "QueryContext") -> None:
"""
Assert the the user has permission to access the query context.
:param query_context: The query context
:raises SupersetSecurityException: If the user does not have permission
"""
self.assert_datasource_permission(query_context.datasource)
def assert_viz_permission(self, viz: "BaseViz") -> None:
"""
Assert the the user has permission to access the visualization.
:param viz: The visualization
:raises SupersetSecurityException: If the user does not have permission
"""
self.assert_datasource_permission(viz.datasource)
def get_rls_filters(self, table: "BaseDatasource"):
"""
Retrieves the appropriate row level security filters for the current user and the passed table.
:param table: The table to check against
:returns: A list of filters.
"""
if hasattr(g, "user") and hasattr(g.user, "id"):
from superset import db
from superset.connectors.sqla.models import (
RLSFilterRoles,
RowLevelSecurityFilter,
)
user_roles = (
db.session.query(assoc_user_role.c.role_id)
.filter(assoc_user_role.c.user_id == g.user.id)
.subquery()
)
filter_roles = (
db.session.query(RLSFilterRoles.c.rls_filter_id)
.filter(RLSFilterRoles.c.role_id.in_(user_roles))
.subquery()
)
query = (
db.session.query(
RowLevelSecurityFilter.id, RowLevelSecurityFilter.clause
)
.filter(RowLevelSecurityFilter.table_id == table.id)
.filter(RowLevelSecurityFilter.id.in_(filter_roles))
)
return query.all()
return []
def get_rls_ids(self, table: "BaseDatasource") -> List[int]:
"""
Retrieves the appropriate row level security filters IDs for the current user and the passed table.
:param table: The table to check against
:returns: A list of IDs.
"""
ids = [f.id for f in self.get_rls_filters(table)]
ids.sort() # Combinations rather than permutations
return ids
| 35.386723
| 107
| 0.636799
|
74971917bbd68cf6b1cb0ed4fc3fcab8c65acc85
| 5,160
|
py
|
Python
|
gym_electric_motor/envs/gym_pmsm/perm_mag_syn_motor_env.py
|
54hanxiucao/gym-electric-motor
|
911432388b00675e8a93f4a7937fdc575f106f22
|
[
"MIT"
] | 1
|
2021-03-29T07:47:32.000Z
|
2021-03-29T07:47:32.000Z
|
gym_electric_motor/envs/gym_pmsm/perm_mag_syn_motor_env.py
|
54hanxiucao/gym-electric-motor
|
911432388b00675e8a93f4a7937fdc575f106f22
|
[
"MIT"
] | null | null | null |
gym_electric_motor/envs/gym_pmsm/perm_mag_syn_motor_env.py
|
54hanxiucao/gym-electric-motor
|
911432388b00675e8a93f4a7937fdc575f106f22
|
[
"MIT"
] | null | null | null |
from ...core import ElectricMotorEnvironment
from ...physical_systems.physical_systems import SynchronousMotorSystem
from ...reference_generators import WienerProcessReferenceGenerator
from ...reward_functions import WeightedSumOfErrors
from ...constraints import SquaredConstraint
class PermanentMagnetSynchronousMotorEnvironment(ElectricMotorEnvironment):
def __init__(self, motor='PMSM', reward_function=None, reference_generator=None, constraints=None, **kwargs):
"""
Args:
motor(ElectricMotor): Electric Motor used in the PhysicalSystem
reward_function(RewardFunction): Reward Function for the environment
reference_generator(ReferenceGenerator): Reference Generator for the environment
kwargs(dict): Further kwargs tot pass to the superclass and the submodules
"""
physical_system = SynchronousMotorSystem(motor=motor, **kwargs)
reference_generator = reference_generator or WienerProcessReferenceGenerator(**kwargs)
reward_function = reward_function or WeightedSumOfErrors(**kwargs)
constraints_ = constraints if constraints is not None \
else ('i_a', 'i_b', 'i_c', SquaredConstraint(('i_sd', 'i_sq')))
super().__init__(
physical_system, reference_generator=reference_generator, reward_function=reward_function,
constraints=constraints_, **kwargs
)
class DiscPermanentMagnetSynchronousMotorEnvironment(PermanentMagnetSynchronousMotorEnvironment):
"""
Description:
Environment to simulate a discretely controlled Permanent Magnet Synchronous Motor (PMSM).
Key:
`PMSMDisc-v1`
Default Modules:
Physical System:
SCMLSystem/DcMotorSystem with:
| IdealVoltageSupply
| DiscB6BridgeConverter
| PermanentMagnetSynchronousMotor
| PolynomialStaticLoad
| GaussianWhiteNoiseGenerator
| EulerSolver
| tau=1e-5
Reference Generator:
WienerProcessReferenceGenerator
Reference Quantity. 'omega'
Reward Function:
WeightedSumOfErrors(reward_weights= {'omega': 1 })
Visualization:
ElectricMotorVisualization (Dummy for no Visualization)
State Variables:
``['omega' , 'torque', 'i_sa', 'i_sb', 'i_sc', 'i_sd', 'i_sq',``
``'u_sa', 'u_sb', 'u_sc', 'u_sd', 'u_sq','epsilon', 'u_sup']``
Observation Space:
Type: Tuple(State_Space, Reference_Space)
State Space:
Box(low=[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0], high=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
Reference Space:
Box(low=[-1], high=[1])
Action Space:
Type: Discrete(8)
Starting State:
Zeros on all state variables.
Episode Termination:
Termination if current limits are violated. The terminal reward -10 is used as reward.
(Have a look at the reward functions.)
u_sup and u_nominal must be the same
"""
def __init__(self, tau=1e-5, converter='Disc-B6C', **kwargs):
super().__init__(tau=tau, converter=converter, **kwargs)
class ContPermanentMagnetSynchronousMotorEnvironment(PermanentMagnetSynchronousMotorEnvironment):
"""
Description:
Environment to simulate a continuously controlled Permanent Magnet Synchronous Motor (PMSM).
Key:
`PMSMCont-v1`
Default Modules:
Physical System:
SCMLSystem/DcMotorSystem with:
| IdealVoltageSupply
| ContinuousB6BridgeConverter
| PermanentMagnetSynchronousMotor
| PolynomialStaticLoad
| GaussianWhiteNoiseGenerator
| EulerSolver
| tau=1e-4
Reference Generator:
WienerProcessReferenceGenerator
Reference Quantity. 'omega'
Reward Function:
WeightedSumOfErrors(reward_weights= {'omega': 1 })
Visualization:
ElectricMotorVisualization (Dummy for no Visualization)
State Variables:
``['omega' , 'torque', 'i_sa', 'i_sb', 'i_sc', 'i_sd',``
``'i_sq', 'u_sa', 'u_sb', 'u_sc', 'u_sd', 'u_sq','epsilon', 'u_sup']``
Observation Space:
Type: Tuple(State_Space, Reference_Space)
State Space:
Box(low=[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0], high=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
Reference Space:
Box(low=[-1], high=[1])
Action Space:
Type: Box(low=[-1, -1, -1], high=[1, 1, 1])
Starting State:
Zeros on all state variables.
Episode Termination:
Termination if current limits are violated. The terminal reward -10 is used as reward.
(Have a look at the reward functions.)
u_sup and u_nominal must be the same
"""
def __init__(self, tau=1e-4, converter='Cont-B6C', **kwargs):
super().__init__(tau=tau, converter=converter, **kwargs)
| 35.342466
| 114
| 0.61686
|
0e78220d477c40c05d4f78fa3eb8f934e30051d5
| 4,591
|
py
|
Python
|
stripe/api_resources/__init__.py
|
nomadmtb/stripe-python
|
bc872674ba5c8ffd7d830f83ccb8e2adab53fd86
|
[
"MIT"
] | null | null | null |
stripe/api_resources/__init__.py
|
nomadmtb/stripe-python
|
bc872674ba5c8ffd7d830f83ccb8e2adab53fd86
|
[
"MIT"
] | null | null | null |
stripe/api_resources/__init__.py
|
nomadmtb/stripe-python
|
bc872674ba5c8ffd7d830f83ccb8e2adab53fd86
|
[
"MIT"
] | null | null | null |
# File generated from our OpenAPI spec
from __future__ import absolute_import, division, print_function
# flake8: noqa
from stripe.api_resources.error_object import ErrorObject, OAuthErrorObject
from stripe.api_resources.list_object import ListObject
from stripe.api_resources import billing_portal
from stripe.api_resources import checkout
from stripe.api_resources import identity
from stripe.api_resources import issuing
from stripe.api_resources import radar
from stripe.api_resources import reporting
from stripe.api_resources import sigma
from stripe.api_resources import terminal
from stripe.api_resources import test_helpers
from stripe.api_resources.account import Account
from stripe.api_resources.account_link import AccountLink
from stripe.api_resources.alipay_account import AlipayAccount
from stripe.api_resources.apple_pay_domain import ApplePayDomain
from stripe.api_resources.application_fee import ApplicationFee
from stripe.api_resources.application_fee_refund import ApplicationFeeRefund
from stripe.api_resources.balance import Balance
from stripe.api_resources.balance_transaction import BalanceTransaction
from stripe.api_resources.bank_account import BankAccount
from stripe.api_resources.bitcoin_receiver import BitcoinReceiver
from stripe.api_resources.bitcoin_transaction import BitcoinTransaction
from stripe.api_resources.capability import Capability
from stripe.api_resources.card import Card
from stripe.api_resources.charge import Charge
from stripe.api_resources.country_spec import CountrySpec
from stripe.api_resources.coupon import Coupon
from stripe.api_resources.credit_note import CreditNote
from stripe.api_resources.credit_note_line_item import CreditNoteLineItem
from stripe.api_resources.customer import Customer
from stripe.api_resources.customer_balance_transaction import (
CustomerBalanceTransaction,
)
from stripe.api_resources.dispute import Dispute
from stripe.api_resources.ephemeral_key import EphemeralKey
from stripe.api_resources.event import Event
from stripe.api_resources.exchange_rate import ExchangeRate
from stripe.api_resources.file import File
from stripe.api_resources.file import FileUpload
from stripe.api_resources.file_link import FileLink
from stripe.api_resources.invoice import Invoice
from stripe.api_resources.invoice_item import InvoiceItem
from stripe.api_resources.invoice_line_item import InvoiceLineItem
from stripe.api_resources.issuer_fraud_record import IssuerFraudRecord
from stripe.api_resources.line_item import LineItem
from stripe.api_resources.login_link import LoginLink
from stripe.api_resources.mandate import Mandate
from stripe.api_resources.order import Order
from stripe.api_resources.order_return import OrderReturn
from stripe.api_resources.payment_intent import PaymentIntent
from stripe.api_resources.payment_link import PaymentLink
from stripe.api_resources.payment_method import PaymentMethod
from stripe.api_resources.payout import Payout
from stripe.api_resources.person import Person
from stripe.api_resources.plan import Plan
from stripe.api_resources.price import Price
from stripe.api_resources.product import Product
from stripe.api_resources.promotion_code import PromotionCode
from stripe.api_resources.quote import Quote
from stripe.api_resources.recipient import Recipient
from stripe.api_resources.recipient_transfer import RecipientTransfer
from stripe.api_resources.refund import Refund
from stripe.api_resources.reversal import Reversal
from stripe.api_resources.review import Review
from stripe.api_resources.setup_attempt import SetupAttempt
from stripe.api_resources.setup_intent import SetupIntent
from stripe.api_resources.shipping_rate import ShippingRate
from stripe.api_resources.sku import SKU
from stripe.api_resources.source import Source
from stripe.api_resources.source_transaction import SourceTransaction
from stripe.api_resources.subscription import Subscription
from stripe.api_resources.subscription_item import SubscriptionItem
from stripe.api_resources.subscription_schedule import SubscriptionSchedule
from stripe.api_resources.tax_code import TaxCode
from stripe.api_resources.tax_id import TaxId
from stripe.api_resources.tax_rate import TaxRate
from stripe.api_resources.three_d_secure import ThreeDSecure
from stripe.api_resources.token import Token
from stripe.api_resources.topup import Topup
from stripe.api_resources.transfer import Transfer
from stripe.api_resources.usage_record import UsageRecord
from stripe.api_resources.usage_record_summary import UsageRecordSummary
from stripe.api_resources.webhook_endpoint import WebhookEndpoint
| 50.450549
| 76
| 0.88826
|
6983152e248f6e373b0154333d75ea7f30841b3c
| 14,808
|
py
|
Python
|
superset/charts/data/api.py
|
m-ajay/superset
|
2cd80543581155225f2b538ad8cd5ebc7de5a9ff
|
[
"Apache-2.0"
] | 18,621
|
2017-06-19T09:57:44.000Z
|
2021-01-05T06:28:21.000Z
|
superset/charts/data/api.py
|
m-ajay/superset
|
2cd80543581155225f2b538ad8cd5ebc7de5a9ff
|
[
"Apache-2.0"
] | 9,043
|
2017-07-05T16:10:48.000Z
|
2021-01-05T17:58:01.000Z
|
superset/charts/data/api.py
|
vecatom/incubator-superset
|
0a21057bf837acf876ca25394a974bb345a646ca
|
[
"Apache-2.0"
] | 5,527
|
2017-07-06T01:39:43.000Z
|
2021-01-05T06:01:11.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import logging
from typing import Any, Dict, Optional, TYPE_CHECKING
import simplejson
from flask import g, make_response, request
from flask_appbuilder.api import expose, protect
from flask_babel import gettext as _
from marshmallow import ValidationError
from superset import is_feature_enabled, security_manager
from superset.charts.api import ChartRestApi
from superset.charts.commands.exceptions import (
ChartDataCacheLoadError,
ChartDataQueryFailedError,
)
from superset.charts.data.commands.create_async_job_command import (
CreateAsyncChartDataJobCommand,
)
from superset.charts.data.commands.get_data_command import ChartDataCommand
from superset.charts.data.query_context_cache_loader import QueryContextCacheLoader
from superset.charts.post_processing import apply_post_process
from superset.charts.schemas import ChartDataQueryContextSchema
from superset.common.chart_data import ChartDataResultFormat, ChartDataResultType
from superset.connectors.base.models import BaseDatasource
from superset.exceptions import QueryObjectValidationError
from superset.extensions import event_logger
from superset.utils.async_query_manager import AsyncQueryTokenException
from superset.utils.core import json_int_dttm_ser
from superset.views.base import CsvResponse, generate_download_headers
from superset.views.base_api import statsd_metrics
if TYPE_CHECKING:
from flask import Response
from superset.common.query_context import QueryContext
logger = logging.getLogger(__name__)
class ChartDataRestApi(ChartRestApi):
include_route_methods = {"get_data", "data", "data_from_cache"}
@expose("/<int:pk>/data/", methods=["GET"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.data",
log_to_statsd=False,
)
def get_data(self, pk: int) -> Response:
"""
Takes a chart ID and uses the query context stored when the chart was saved
to return payload data response.
---
get:
description: >-
Takes a chart ID and uses the query context stored when the chart was saved
to return payload data response.
parameters:
- in: path
schema:
type: integer
name: pk
description: The chart ID
- in: query
name: format
description: The format in which the data should be returned
schema:
type: string
- in: query
name: type
description: The type in which the data should be returned
schema:
type: string
responses:
200:
description: Query result
content:
application/json:
schema:
$ref: "#/components/schemas/ChartDataResponseSchema"
202:
description: Async job details
content:
application/json:
schema:
$ref: "#/components/schemas/ChartDataAsyncResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
500:
$ref: '#/components/responses/500'
"""
chart = self.datamodel.get(pk, self._base_filters)
if not chart:
return self.response_404()
try:
json_body = json.loads(chart.query_context)
except (TypeError, json.decoder.JSONDecodeError):
json_body = None
if json_body is None:
return self.response_400(
message=_(
"Chart has no query context saved. Please save the chart again."
)
)
# override saved query context
json_body["result_format"] = request.args.get(
"format", ChartDataResultFormat.JSON
)
json_body["result_type"] = request.args.get("type", ChartDataResultType.FULL)
try:
query_context = self._create_query_context_from_form(json_body)
command = ChartDataCommand(query_context)
command.validate()
except QueryObjectValidationError as error:
return self.response_400(message=error.message)
except ValidationError as error:
return self.response_400(
message=_(
"Request is incorrect: %(error)s", error=error.normalized_messages()
)
)
# TODO: support CSV, SQL query and other non-JSON types
if (
is_feature_enabled("GLOBAL_ASYNC_QUERIES")
and query_context.result_format == ChartDataResultFormat.JSON
and query_context.result_type == ChartDataResultType.FULL
):
return self._run_async(json_body, command)
try:
form_data = json.loads(chart.params)
except (TypeError, json.decoder.JSONDecodeError):
form_data = {}
return self._get_data_response(
command=command, form_data=form_data, datasource=query_context.datasource
)
@expose("/data", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.data",
log_to_statsd=False,
)
def data(self) -> Response:
"""
Takes a query context constructed in the client and returns payload
data response for the given query.
---
post:
description: >-
Takes a query context constructed in the client and returns payload data
response for the given query.
requestBody:
description: >-
A query context consists of a datasource from which to fetch data
and one or many query objects.
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/ChartDataQueryContextSchema"
responses:
200:
description: Query result
content:
application/json:
schema:
$ref: "#/components/schemas/ChartDataResponseSchema"
202:
description: Async job details
content:
application/json:
schema:
$ref: "#/components/schemas/ChartDataAsyncResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
500:
$ref: '#/components/responses/500'
"""
json_body = None
if request.is_json:
json_body = request.json
elif request.form.get("form_data"):
# CSV export submits regular form data
try:
json_body = json.loads(request.form["form_data"])
except (TypeError, json.JSONDecodeError):
pass
if json_body is None:
return self.response_400(message=_("Request is not JSON"))
try:
query_context = self._create_query_context_from_form(json_body)
command = ChartDataCommand(query_context)
command.validate()
except QueryObjectValidationError as error:
return self.response_400(message=error.message)
except ValidationError as error:
return self.response_400(
message=_(
"Request is incorrect: %(error)s", error=error.normalized_messages()
)
)
# TODO: support CSV, SQL query and other non-JSON types
if (
is_feature_enabled("GLOBAL_ASYNC_QUERIES")
and query_context.result_format == ChartDataResultFormat.JSON
and query_context.result_type == ChartDataResultType.FULL
):
return self._run_async(json_body, command)
form_data = json_body.get("form_data")
return self._get_data_response(command, form_data=form_data)
@expose("/data/<cache_key>", methods=["GET"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".data_from_cache",
log_to_statsd=False,
)
def data_from_cache(self, cache_key: str) -> Response:
"""
Takes a query context cache key and returns payload
data response for the given query.
---
get:
description: >-
Takes a query context cache key and returns payload data
response for the given query.
parameters:
- in: path
schema:
type: string
name: cache_key
responses:
200:
description: Query result
content:
application/json:
schema:
$ref: "#/components/schemas/ChartDataResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
cached_data = self._load_query_context_form_from_cache(cache_key)
query_context = self._create_query_context_from_form(cached_data)
command = ChartDataCommand(query_context)
command.validate()
except ChartDataCacheLoadError:
return self.response_404()
except ValidationError as error:
return self.response_400(
message=_("Request is incorrect: %(error)s", error=error.messages)
)
return self._get_data_response(command, True)
def _run_async(
self, form_data: Dict[str, Any], command: ChartDataCommand
) -> Response:
"""
Execute command as an async query.
"""
# First, look for the chart query results in the cache.
try:
result = command.run(force_cached=True)
except ChartDataCacheLoadError:
result = None # type: ignore
already_cached_result = result is not None
# If the chart query has already been cached, return it immediately.
if already_cached_result:
return self._send_chart_response(result)
# Otherwise, kick off a background job to run the chart query.
# Clients will either poll or be notified of query completion,
# at which point they will call the /data/<cache_key> endpoint
# to retrieve the results.
async_command = CreateAsyncChartDataJobCommand()
try:
async_command.validate(request)
except AsyncQueryTokenException:
return self.response_401()
result = async_command.run(form_data, g.user.get_id())
return self.response(202, **result)
def _send_chart_response(
self,
result: Dict[Any, Any],
form_data: Optional[Dict[str, Any]] = None,
datasource: Optional[BaseDatasource] = None,
) -> Response:
result_type = result["query_context"].result_type
result_format = result["query_context"].result_format
# Post-process the data so it matches the data presented in the chart.
# This is needed for sending reports based on text charts that do the
# post-processing of data, eg, the pivot table.
if result_type == ChartDataResultType.POST_PROCESSED:
result = apply_post_process(result, form_data, datasource)
if result_format == ChartDataResultFormat.CSV:
# Verify user has permission to export CSV file
if not security_manager.can_access("can_csv", "Superset"):
return self.response_403()
# return the first result
data = result["queries"][0]["data"]
return CsvResponse(data, headers=generate_download_headers("csv"))
if result_format == ChartDataResultFormat.JSON:
response_data = simplejson.dumps(
{"result": result["queries"]},
default=json_int_dttm_ser,
ignore_nan=True,
)
resp = make_response(response_data, 200)
resp.headers["Content-Type"] = "application/json; charset=utf-8"
return resp
return self.response_400(message=f"Unsupported result_format: {result_format}")
def _get_data_response(
self,
command: ChartDataCommand,
force_cached: bool = False,
form_data: Optional[Dict[str, Any]] = None,
datasource: Optional[BaseDatasource] = None,
) -> Response:
try:
result = command.run(force_cached=force_cached)
except ChartDataCacheLoadError as exc:
return self.response_422(message=exc.message)
except ChartDataQueryFailedError as exc:
return self.response_400(message=exc.message)
return self._send_chart_response(result, form_data, datasource)
# pylint: disable=invalid-name, no-self-use
def _load_query_context_form_from_cache(self, cache_key: str) -> Dict[str, Any]:
return QueryContextCacheLoader.load(cache_key)
# pylint: disable=no-self-use
def _create_query_context_from_form(
self, form_data: Dict[str, Any]
) -> QueryContext:
try:
return ChartDataQueryContextSchema().load(form_data)
except KeyError as ex:
raise ValidationError("Request is incorrect") from ex
except ValidationError as error:
raise error
| 37.112782
| 88
| 0.621353
|
b59e7571f85c6ffc83068ab75d1f61019c13e8e6
| 1,992
|
py
|
Python
|
playground/detection/coco/poto.res50.fpn.coco.800size.3x_ms.3dmf_wo_gn.aux/config.py
|
attilab97/DeFCN
|
811335665f62624d63fa28c6f13edd53ce5cc395
|
[
"Apache-2.0"
] | null | null | null |
playground/detection/coco/poto.res50.fpn.coco.800size.3x_ms.3dmf_wo_gn.aux/config.py
|
attilab97/DeFCN
|
811335665f62624d63fa28c6f13edd53ce5cc395
|
[
"Apache-2.0"
] | null | null | null |
playground/detection/coco/poto.res50.fpn.coco.800size.3x_ms.3dmf_wo_gn.aux/config.py
|
attilab97/DeFCN
|
811335665f62624d63fa28c6f13edd53ce5cc395
|
[
"Apache-2.0"
] | null | null | null |
import os.path as osp
from cvpods.configs.fcos_config import FCOSConfig
_config_dict = dict(
MODEL=dict(
WEIGHTS="detectron2://ImageNetPretrained/MSRA/R-50.pkl",
RESNETS=dict(DEPTH=50),
SHIFT_GENERATOR=dict(
NUM_SHIFTS=1,
OFFSET=0.5,
),
FCOS=dict(
NORM_REG_TARGETS=True,
NMS_THRESH_TEST=1.0, # disable NMS when NMS threshold is 1.0
BBOX_REG_WEIGHTS=(1.0, 1.0, 1.0, 1.0),
FOCAL_LOSS_GAMMA=2.0,
FOCAL_LOSS_ALPHA=0.25,
IOU_LOSS_TYPE="giou",
REG_WEIGHT=2.0,
),
POTO=dict(
ALPHA=0.8,
CENTER_SAMPLING_RADIUS=1.5,
AUX_TOPK=9,
FILTER_KERNEL_SIZE=3,
FILTER_TAU=2,
),
),
DATASETS=dict(
TRAIN=("coco_2017_train",),
TEST=("coco_2017_val",),
),
SOLVER=dict(
CHECKPOINT_PERIOD=5000,
LR_SCHEDULER=dict(
MAX_ITER=200000,
STEPS=(150000, 200000),
),
OPTIMIZER=dict(
BASE_LR=0.01,
),
IMS_PER_BATCH=512,
),
INPUT=dict(
AUG=dict(
TRAIN_PIPELINES=[
("ResizeShortestEdge",
dict(short_edge_length=(640, 672, 704, 736, 768, 800), max_size=1333, sample_style="choice")),
("RandomFlip", dict()),
],
TEST_PIPELINES=[
("ResizeShortestEdge",
dict(short_edge_length=800, max_size=1333, sample_style="choice")),
],
)
),
TEST=dict(
EVAL_PEROID=5000,
),
OUTPUT_DIR=osp.join(
'/content/drive/MyDrive/Master/Disertatie',
osp.split(osp.realpath(__file__))[0].split("playground/")[-1]),
)
class CustomFCOSConfig(FCOSConfig):
def __init__(self):
super(CustomFCOSConfig, self).__init__()
self._register_configuration(_config_dict)
config = CustomFCOSConfig()
| 26.918919
| 111
| 0.538655
|
6b21c5f820cc8806caceae8a4e532b902b0d0013
| 2,676
|
py
|
Python
|
old/modules/libraries/telepot/routing.py
|
deklungel/iRulez
|
eca073d0af55f8bf7006bf37c2cd69116e926211
|
[
"MIT"
] | 1
|
2018-03-21T15:08:22.000Z
|
2018-03-21T15:08:22.000Z
|
old/modules/libraries/telepot/routing.py
|
deklungel/iRulez
|
eca073d0af55f8bf7006bf37c2cd69116e926211
|
[
"MIT"
] | 1
|
2017-08-25T06:12:08.000Z
|
2017-08-25T06:13:58.000Z
|
old/modules/libraries/telepot/routing.py
|
deklungel/iRulez
|
eca073d0af55f8bf7006bf37c2cd69116e926211
|
[
"MIT"
] | 1
|
2020-07-23T11:57:06.000Z
|
2020-07-23T11:57:06.000Z
|
import re
from . import glance, _isstring, all_content_types
def by_content_type():
def f(msg):
content_type = glance(msg, flavor='chat')[0]
return content_type, (msg[content_type],)
return f
def by_command(extractor, prefix=('/',), separator=' ', pass_args=False):
if not isinstance(prefix, (tuple, list)):
prefix = (prefix,)
def f(msg):
text = extractor(msg)
for px in prefix:
if text.startswith(px):
chunks = text[len(px):].split(separator)
return chunks[0], (chunks[1:],) if pass_args else ()
return (None,), # to distinguish with `None`
return f
def by_chat_command(prefix=('/',), separator=' ', pass_args=False):
return by_command(lambda msg: msg['text'], prefix, separator, pass_args)
def by_text():
return lambda msg: msg['text']
def by_data():
return lambda msg: msg['data']
def by_regex(extractor, regex, key=1):
if _isstring(regex):
regex = re.compile(regex)
def f(msg):
text = extractor(msg)
match = regex.search(text)
if match:
index = key if isinstance(key, tuple) else (key,)
return match.group(*index), (match,)
else:
return (None,), # to distinguish with `None`
return f
def process_key(processor, fn):
def f(*aa, **kw):
k = fn(*aa, **kw)
if isinstance(k, (tuple, list)):
return (processor(k[0]),) + tuple(k[1:])
else:
return processor(k)
return f
def lower_key(fn):
def lower(key):
try:
return key.lower()
except AttributeError:
return key
return process_key(lower, fn)
def upper_key(fn):
def upper(key):
try:
return key.upper()
except AttributeError:
return key
return process_key(upper, fn)
def make_routing_table(obj, keys, prefix='on_'):
def maptuple(k):
if isinstance(k, tuple):
if len(k) == 2:
return k
elif len(k) == 1:
return k[0], lambda *aa, **kw: getattr(obj, prefix+k[0])(*aa, **kw)
else:
raise ValueError()
else:
return k, lambda *aa, **kw: getattr(obj, prefix+k)(*aa, **kw)
# Use `lambda` to delay evaluation of `getattr`.
# I don't want to require definition of all methods.
# Let users define only the ones he needs.
return dict([maptuple(k) for k in keys])
def make_content_type_routing_table(obj, prefix='on_'):
return make_routing_table(obj, all_content_types, prefix)
| 29.733333
| 83
| 0.56278
|
9d070439684b906fd9cd3078ff6a78292e606dbe
| 4,047
|
py
|
Python
|
app/recipe/tests/test_tags_api.py
|
Seiya7/django-api-recipe-app
|
bac4e4d5808990ddd7aca0b399bcebc52a12625d
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
Seiya7/django-api-recipe-app
|
bac4e4d5808990ddd7aca0b399bcebc52a12625d
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
Seiya7/django-api-recipe-app
|
bac4e4d5808990ddd7aca0b399bcebc52a12625d
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model # noqa
from django.urls import reverse # noqa
from django.test import TestCase # noqa
from rest_framework import status # noqa
from rest_framework.test import APIClient # noqa
from core.models import Tag, Recipe # noqa
from recipe.serializers import TagSerializer # noqa
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@emal.com',
'password'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for authenticated user"""
user2 = get_user_model().objects.create_user(
'other@email.com',
'testpass'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Test Tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user, name=payload['name']
).exists()
self.assertTrue(exists)
def test_tag_invalid_name(self):
"""test creating new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name="Vegan")
tag2 = Tag.objects.create(user=self.user, name="Vegetarian")
recipe = Recipe.objects.create(
user=self.user,
title="Tikka Maala",
time_minutes=17,
price=8.00
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
"""Test filtering tags by assigned returns unique items"""
tag = Tag.objects.create(user=self.user, name="Vegan")
Tag.objects.create(user=self.user, name="Vegetarian")
recipe1 = Recipe.objects.create(
user=self.user,
title="Chicken wings",
time_minutes=15,
price=8.00
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
user=self.user,
title="Pancakes",
time_minutes=6,
price=3.00
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
self.assertTrue(len(res.data), 1)
| 31.866142
| 71
| 0.629849
|
531f14688dd1d406dd28f661a889a5b037720001
| 1,154
|
py
|
Python
|
problems/CR/auto/problem82_CR.py
|
sunandita/ICAPS_Summer_School_RAE_2020
|
a496b62185bcfdd2c76eb7986ae99cfa85708d28
|
[
"BSD-3-Clause"
] | 5
|
2020-10-15T14:40:03.000Z
|
2021-08-20T17:45:41.000Z
|
problems/CR/auto/problem82_CR.py
|
sunandita/ICAPS_Summer_School_RAE_2020
|
a496b62185bcfdd2c76eb7986ae99cfa85708d28
|
[
"BSD-3-Clause"
] | null | null | null |
problems/CR/auto/problem82_CR.py
|
sunandita/ICAPS_Summer_School_RAE_2020
|
a496b62185bcfdd2c76eb7986ae99cfa85708d28
|
[
"BSD-3-Clause"
] | 2
|
2020-10-15T07:06:14.000Z
|
2020-10-15T17:33:01.000Z
|
__author__ = 'patras'
from domain_chargeableRobot import *
from timer import DURATION
from state import state
DURATION.TIME = {
'put': 2,
'take': 2,
'perceive': 3,
'charge': 5,
'move': 10,
'moveToEmergency': 5,
'moveCharger': 15,
'addressEmergency': 10,
'wait': 5,
}
DURATION.COUNTER = {
'put': 2,
'take': 2,
'perceive': 3,
'charge': 5,
'move': 10,
'moveToEmergency': 5,
'moveCharger': 15,
'addressEmergency': 10,
'wait': 5,
}
rv.LOCATIONS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
rv.EDGES = {1: [7], 2: [8], 3: [8], 4: [8, 9], 5: [7], 6: [7], 7: [1, 5, 6, 8], 8: [2, 3, 4, 7], 9: [4, 10], 10: [9]}
rv.OBJECTS=['o1']
rv.ROBOTS=['r1','r2']
def ResetState():
state.loc = {'r1': 2, 'r2': 1}
state.charge = {'r1': 3, 'r2': 3}
state.load = {'r1': NIL, 'r2': NIL}
state.pos = {'c1': 'r2', 'o1': 2}
state.containers = { 1:[],2:['o1'],3:[],4:[],5:[],6:[],7:[],8:[],9:[],10:[],}
state.emergencyHandling = {'r1': False, 'r2': False}
state.view = {}
for l in rv.LOCATIONS:
state.view[l] = False
tasks = {
7: [['fetch', 'r1', 'o1']],
}
eventsEnv = {
}
| 22.627451
| 117
| 0.496534
|
0a27cfc3d3eab85186210fc345bcb1d7aaa76a08
| 25,721
|
py
|
Python
|
source/images.py
|
peppy0510/PyMusicPlayer
|
6ac4779a137191700506629202eb596beacce021
|
[
"MIT"
] | 18
|
2019-04-22T10:42:14.000Z
|
2022-02-13T14:21:18.000Z
|
source/images.py
|
peppy0510/PyMusicPlayer
|
6ac4779a137191700506629202eb596beacce021
|
[
"MIT"
] | null | null | null |
source/images.py
|
peppy0510/PyMusicPlayer
|
6ac4779a137191700506629202eb596beacce021
|
[
"MIT"
] | 5
|
2020-01-11T19:15:40.000Z
|
2021-09-27T20:11:23.000Z
|
# encoding: utf-8
# author: Taehong Kim
# email: peppy0510@hotmail.com
from wx.lib.embeddedimage import PyEmbeddedImage
SmallUpArrow = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAADxJ"
"REFUOI1jZGRiZqAEMFGke2gY8P/f3/9kGwDTjM8QnAaga8JlCG3CAJdt2MQxDCAUaOjyjKMp"
"cRAYAABS2CPsss3BWQAAAABJRU5ErkJggg==")
SmallDnArrow = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAEhJ"
"REFUOI1jZGRiZqAEMFGke9QABgYGBgYWdIH///7+J6SJkYmZEacLkCUJacZqAD5DsInTLhDR"
"bcPlKrwugGnCFy6Mo3mBAQChDgRlP4RC7wAAAABJRU5ErkJggg==")
checkmark_icon14 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAABCUlEQVR4nNWSv0rEQBDGv9UD"
"g0WQSPC2CaZJtW0IWCxpLJKnsPEJ0sfCPIAgSIr1AdLZpbBLYREsLNIES0ml2yRimnBjJ3cc"
"enJWDkwxML+Zb/4wIsI2trMV9Rdwthx4ngff9xEEAYQQGIZh1nXd6TiOr0mSPP6qI2MMRHRQ"
"FMVtnudXm6TuATgEANM0oZS6qaqKA9hdq0xEX26a5hPnfFRKIcuycwAURdF9XdfHy3lEtApK"
"Ke8syyIpZTufz99c131pmuZIa40fwTiO9x3HuQZAhmEsyrI86fsebduugSszTtP0wRi7MAzj"
"PU3TyzAMH7TWm8/BOYdt2z2AMyHE8zAM3y0d7P+83CeHypR+yP8P/AAAAABJRU5ErkJggg==")
listbox_brokenlink_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAQAAAAMCAYAAABFohwTAAAAaklEQVR4nE3OvQnCYBSF4SfX"
"rxFBiGQph0hl6QKu4ViS0s4hxCIxGNBYeEVP+XL+YIsed7QFS6x89AyMfnpE2qcEr8D855gC"
"QxbCEKhQEiwKbrhgg+s3u06gQoN9fjnCLpdmHAo6nFHj9AYfShbMZoiymQAAAABJRU5ErkJg"
"gg==")
listbox_brokenlink_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAQAAAAMCAYAAABFohwTAAAAcUlEQVR4nE3OsQkCQQBE0Xfr"
"JiIIik0ZWYEV2IQtWJYYmlnEYXCnKOgYuKA//HyGkWSdZEhyS7KtmGLmy6vg7sejYMCziXdB"
"/opnwYhbE2NBh9rEpOKKC5bouyQwb1VfscKufTlUbLBvG33FCWcscPwAHbsliUqV3YQAAAAA"
"SUVORK5CYII=")
listbox_brokenlink_red = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAQAAAAMCAYAAABFohwTAAAAb0lEQVR4nE3OMQrCUBRE0eP3"
"NyIICW7KRVhZugG34bIkpZ2LEIskYkCfRR7owDSXyzCCXdAHY7CvWGFtzrvg6ZdXQY8pwacg"
"/oypYMCYYChYoCZYVjxwQ4u7mEc2QRupbnHML2fBIYjsqaLDFQ0uX4+rIsNUxKskAAAAAElF"
"TkSuQmCC")
listbox_tab_add = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAMElEQVR4nGP8//8/AwycPHny"
"PwMDA4O5uTkjTIyJgQAgqIDxxIkT//EpIGwC7R0JAIW3EV/jFS/AAAAAAElFTkSuQmCC")
listbox_playing_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAkAAAAJCAYAAADgkQYQAAAAPklEQVR4nJWQwQ0AIAwC6+tG"
"6Y8VdR6XxAE0VUn4XQgQkrrtqByAAUuaV6iCN+gEN8BRKDPHU9Jfp3Ldy08LPz2cvZ85YukA"
"AAAASUVORK5CYII=")
listbox_playing_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAkAAAAJCAYAAADgkQYQAAAAUUlEQVR4nI2QsQ2AMAwEnyit"
"dwqbwKQw03uApwkBRcbipG98VxkkN0nIBpLqa19RwcPh7nL3hokyH6J4IakgfLPXRK5mdgJA"
"FA15UzM5+POnCxcsagppPRu0AAAAAElFTkSuQmCC")
listbox_stop_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAkAAAAJCAYAAADgkQYQAAAAKklEQVR4nGP8//8/AyHARFAF"
"AwMDC4zBwcGBYeSPHz8YiTaJzooYqRYEAGtJCg2iIiDzAAAAAElFTkSuQmCC")
listbox_stop_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAkAAAAJCAYAAADgkQYQAAAAP0lEQVR4nGP8//8/AyHAwsDA"
"wNDb29uGS0FxcXEVC4yTlpZWia5g1qxZ7QwMDAxMBO2ivyK4w2GOxAYYiQknAPKrEGYaakYq"
"AAAAAElFTkSuQmCC")
listbox_tab_add = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAMElEQVR4nGP8//8/AwycPHny"
"PwMDA4O5uTkjTIyJgQAgqIDxxIkT//EpIGwC7R0JAIW3EV/jFS/AAAAAAElFTkSuQmCC")
listbox_tab_close = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAUElEQVR4nIWPQQrAMAgEJ3nX"
"+vrsv7aXJthesiDIyKCSBElJQq/NZlVFElUV3nQ2khxgG4Ddr7XGSPKxgDMEmFwyu20b2/Sb"
"hqT0nX+B25sPaylfC9YsisEAAAAASUVORK5CYII=")
playbox_button_agc_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAABFklEQVR4nO2UIYsDMRCFX49V"
"keFMIG5aDipjloq1MSeqI6vzG6L6G6orT1ec2X9QU79QuRAoZeXqE9lty5Z2I46I6z01vAl8"
"YfIyEyJCcmUAvPcpkUKILFRt26ZBMsYAvKWBDfRP/ZvULO6YMu6jWn8dAABS21XOAeD4Hbx7"
"Z0REFNL8RFJbY4xR/QWslp3tjAKUcS40e+exGGNEFEOV2hoFFbBX6E174IxSIyYs57OmKnF6"
"54XCZXrdUJv9dhcJvNF4muR8xqefzq1yPl1oeaiOPC8U6nKz3u4bAPW54XkxHP9TTYjIe/94"
"I0ptl9htyvpa4pKdPjzxaWKMCSGi0vSLCu/6Slvilajdf00ZqI4qhEiJBPADbSJZ+8/BNTwA"
"AAAASUVORK5CYII=")
playbox_button_agc_red = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAABQElEQVR4nO2VIWvDQBiG34y4"
"qQwijrleU+hUxGDQ2orEVdVVx7WT9wMmTnZ11XVRE2sTEbtAISIukKZxJaKwU9WZyEQYY82g"
"nFj3quPh4HkPPr5TKKWQHhVAURQylYQQtTodJxM5yuv5HMCVHNmX/Fv/plVtdi13eDpklg0A"
"yKKlEewBwByVVutb8nMavTWL3mDiJa8K+EbS3TJWMmd2cJ0cyH0j0Nd1cg6rWCX60OogzQF4"
"adzr0jYAaNMxW7SQvR96gwe7Rs5hFTtX79jQ7g6pV6PPS65w3o9Eg96/t2a7JIxdhS8e9/FT"
"JOyOGQYbD9p0zLaDWwDtGz0MNlUhz2/U4+Q0iVWCmcOmGiCi/usuG1vbwdLgHABgrpkG1Ig5"
"Ku+1k1aFUloUhcw9TAi5pC1xSdbPGa4+W6lWQohMJYAPvr973A2aPgoAAAAASUVORK5CYII=")
playbox_button_agc_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAABFUlEQVR4nO3WMYqEMBgF4DeL"
"l/gZFoQ0HkAWm+ktAhYD3iCdpQfwAG5nlwsMwhRCCnsbkRzA0mb5r7GFCrJbOAwSFnZeFV/z"
"xZCEnIQQcB4PADO7JInIm0d1Xbsh0zQF8OYG+5GX+q9VW8nKrh/c5HLO2v1uDlC5uSFGb5cJ"
"qO6ijTFGq6moLGArpf1i2xyh8tD5URbNrO3b4PJBAEBJabIQ/DUF6hpumiNUHjo/CnF+n/rN"
"byyLmjfP3Gv7Kg/d2BZSKj22t4bDKB713YKS0mgVAKCzP+r7uvwPzcN7AIXSJiGAm/xz4DLT"
"KldSAgDiwhCwaeLCJLSrnoQQzOzyHiaiP31eX+rTWXaTS3J5SxDt7/Vj8w3i63uTwpeYJAAA"
"AABJRU5ErkJggg==")
playbox_button_ff_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAmklEQVR4nO3Uuw0DIRAE0DmL"
"aDuYeFqjDTqhv+2A2AEny0KyzaHz6oKbiBUST8tvk4TwJADuHkmSTH3UWoshzQzAIwYbcqsX"
"UkspOedP5b9UACTfsaGczybJ3WdeTinly6y711p/LmJmJE8710N9p7PUyV7X1Q68NvyQt6IO"
"wIK3R1L/G2NiZpIu/0vc6lL2Oxx5oXaVZCQJ4AmyrTqy4lZ/dgAAAABJRU5ErkJggg==")
playbox_button_ff_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAmUlEQVR4nO3VsQ2AIBAF0K9h"
"iauxtrZ2lhvCERziZrGmtrZnDQstCEYDRC4W/opLSB4/IdBYa6EeA8B7r0kSkTlWy7LqkOPY"
"A2h1sCi/+iF1GDqR+W6spQIQkRCLxlpqiN0dpZb6cJRE27ylMjPzlLi5pCszO7dFYzqJ3K5R"
"oax+hWrY7zpm5fOvxK8W5bxNx2erqhKRJglgB3yPM9oSP3qWAAAAAElFTkSuQmCC")
playbox_button_fr_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAjklEQVR4nO2UsQ3AIAwEnYjK"
"G7j+1ViDijW8HxtQpwhFRBQFCLISKVfh6vTo9QsAMscRUUrJUikibn/lnG2UzExEq42s4re+"
"1Oq9DyFcnbe4AZ+IXJ3zrWdBV75u61igp1ZVnevuaJOqxhiP81md7XS3qco9+A0A9m20gZkB"
"fGolfmszpcOWhSrWubvTwgaF0CjkaPX9pQAAAABJRU5ErkJggg==")
playbox_button_fr_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAl0lEQVR4nGNUVlZmoDtgYWBg"
"eP78OT2tlJSUZIGw9u27RB8rnZz0GBgYmOhjGRoYtXWQ2jpnToeFhQouLkHAQoZ9c+bMwcWl"
"vq2YFpDkP5JtJc9DeABR8ZqSUnHixJ2UlBS62orLbrKdQnIaRrObvGBgVFZWfv78OT3LYUlJ"
"ySFVSozaSjSAlhKQypautkpKStLTSgYGBgB9NjiK7ILTQgAAAABJRU5ErkJggg==")
playbox_button_highlight_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADoAAAAVCAIAAACYI2qcAAABLElEQVR4nO2VIW+EMBiG3y2o"
"SnamCa5b5mvIiVnMxDQSzW9A9TegK9EnzmBPLBj8csgmmB0SPUG5SxgbZTnGkfCopv1e+pCP"
"ljvGGJaDBaAsy7k1jKCUWs2orut5VQYhhAC4n1tjHKvulKy6U2L1T3M/2p5knCo4Xhg8vIvD"
"JnzDLk6V44WBawNAsRdJ/r0ywXkGABzPMChPW71+qTHV/QnuB09HKfQePs+TqwVVGou0feUe"
"0wFd2w0itxkW502fH6vjQbVPBwDeX9m1/WvQWLfKLp3qrunGVpmMP3+vvFZQM+6o5R+F7b5w"
"qDQWMqv+Idhh5LebJ3ITBlH0CgDFXijwvrK2y1Umd6OCgzDGmt/xjUMIYYwt7N5ddadkYbr6"
"ZljEaUOjSymdW8OULyeWp0mUz7SHAAAAAElFTkSuQmCC")
playbox_button_highlight_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADoAAAAVCAIAAACYI2qcAAABIklEQVR4nO2XMW6DMBSG/1Rc"
"4s1eOABDluwMlthQL+CNrT5AD8DI5hsgdbDkoXuXDj5A9g7vGh0gTUSg4DbITcU3Wc/v2R96"
"NoKdEAL3QwKAmWNrLIKIkm7Utm1clVnKsgTwEFsjjE13TTbdNZnQ9Y3UlgGArZaNB1vdBdhq"
"2dH40cyLSB9dVnieP+dckYQ9nW/U28G4msBWq8a76maFVNSuYKvVx6OrsollpnWPRknTDfOv"
"Td9f04Oh0+oA4Mczh7Y/LRwwfXZTZZxzzqj0eq5vXN/M7zJvVTirO0a2z4/mxYOKOmSTXxQO"
"CDy7WWWUVlICAPJnRxi9Eqcup8o8BRXOsRNCMPNdfDMQ0f947/5VNt016a9abI1F9H8TRBTb"
"ZCmfU0mpwpmLP04AAAAASUVORK5CYII=")
playbox_button_highlight_red = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADoAAAAVCAIAAACYI2qcAAABRElEQVR4nO2WsU6DQACGfwyb"
"E26XbqWYdGNwwpWhPgSrt7WO9wg3SreO5jYfoGVg1cSEwUkTIWzNDQ5MzjgAxhCw0Ii0hm+6"
"HP9/fIHL5RRd13E8qACklH1rNIIQomajj/m8X5WdnC6XAE761mjHoNslg26X1OjGniKCCAAS"
"V3AaIwrEZZAAiAKhcK5wrnhxZfLbDLJ8s2LiCp4HvjJNdeuIPeN1GjKWMnr7fk+r19yvqC0c"
"llLbgrlmLJ2NK5dRa1+w9Q3uZ8PrYm7z9mxN6aRYHQDi6mSJvYuNdUd26FxMkLhi9VJ+lrhi"
"dbOFZdOHs5+Tv1XMabcZrs7NR/9pA23hsNAe/UGxRP3XrWQ8C21hcA4AMNdMQ+X2Lf6yZdO7"
"VsVdKLquSymP4s5ACPkf5+6hMuh2SX4yZJffw0cFQAjpW6Mpn55grasoRjO8AAAAAElFTkSu"
"QmCC")
playbox_button_loop_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAABA0lEQVR4nO2WLW4DMRCFX6q9"
"xIAikz2AFYWEG6xUtjcwC+wBfIDCZb5AtVJBpQHmJVHkAywMieYaAWsQZVvHilSDpg9Zzz+f"
"3vhHXimlUF0NABGpiSSiZm6N41gH2fc9gKc6sCv9U/8mtfmxJw7d+7N/e6ELwwUAgHG800tH"
"Pl+tnwAArb2cuVBx1ji4o/XMzN4e3RC/c2Y8s7fwHzGzWCk17kO7XRMA0Hrbhn1cOqUBchW+"
"T8F1AUBrvc6MKs2qN2ZKVZPD12Q2eukAqcLMuU3FjayTt51HOhs7Z7o5CIxjDeDakVNhBKyU"
"UiJS8x0mokd6JR6Jmk5TTWT6SxBlL9cv6Awd/3NgvHR6zAAAAABJRU5ErkJggg==")
playbox_button_loop_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAABB0lEQVR4nO2UoY6EMBRFz25Q"
"lbgmuLIhWVlDRozF8AUj0fMNqH7DaiRfgMGuGlM5yWSQJKhFoleAIBB2GVOxs1c1t2nPu+3L"
"e1FK4Vwe0HWdS6SU0htXwzC4QQohgFc3sIX+qX+T6m3u6FN++Co+6nZmpCEATWVKu3aC5JzF"
"PgD9ZX7yAeqqiNS/FKZuCZJzdtK2ZOl8zvFHXZd267K9L6yjsL9fW4D2eu/DSK+dvQEeyLpT"
"YZrnKfSXYjMo+7PaW+PHRw0QvL/5zc2uHYCmMsYY89On8ktWP87ymKk3yirKxyA0lbHA0gmS"
"nRFAKTXORjcSQiilnmlKPBN16mGXDTVRpZQukcA3XUttf1c7/3cAAAAASUVORK5CYII=")
playbox_button_loop_red = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAABGklEQVR4nO2VIU/DQBiGn5I6"
"1BLMpa63LZmrmJquGMXjZqkbyPsJlc0kFjcPragdyRJEHcm61DUVS0Chi1gxKysN4gTjU5f3"
"Lu+T97vLd4aUEu1lAmVZ6kQKIcz96mM+14M8XyyAMz2wg/qn/k2qeXQnj43VRTYb97+EKA68"
"FADnupraTWX78jBICgCs0J/d9o56d86ax97OzZSqlB/uln7+nbLHK5W53K3zFrOu1GiTTkay"
"D9C7Gln3m7ypdA3Q1uHfVbo0UsAKfbvlVNesl0PnOVlHAO+Pr8XN0G4qUHe4Um2Xyg9Zi2QQ"
"JMDE9Vfj6ZMTeEFaW9vAobJ96xgBQ0pZlqXOOSyEOKUpcUrU+g3vP1utVCGETiTwCVLMcMKP"
"a6mHAAAAAElFTkSuQmCC")
playbox_button_play_black = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAh0lEQVR4nO3UsQnFMAwE0MvH"
"lTa4+lbzGt7E+2kD17+IqxQhsYIgkKtUCB5IQpskpKcAcPdMkmTZqzFGDmlmAH452CGfepLW"
"Wq01WwVAMmivTzhiR/e6Zj9zTXft8ojq7r336/1R9a4XVde8dTXizUjaP3JOzEzSSz7ii9V5"
"TZmrnSrJTBLAH2jAKJc1SonSAAAAAElFTkSuQmCC")
playbox_button_play_white = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAiElEQVR4nO3WMQrAIAwF0G/x"
"EplzHQcnT+CxPIGTg3fyGh3cOpSaSEqhf8ogPEgkxDEzzOMBjDEsSSLys6q12pApJQCHDXbJ"
"r94khNBas1YBlFKUtrzDGls7V5m95zet2n6LmnOOMT5/r1VXPa0q8+SqxhOqvXeNN/ORjfhh"
"1THzO7cEEVmqAE79lyr2lsSv4wAAAABJRU5ErkJggg==")
playbox_button_play_red = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACcAAAAVCAIAAABKc2DEAAAAi0lEQVR4nO3VIQ6AMAwF0A+Z"
"w85U72rDoneE6Vl2vxksGsRQCMLapQkJX1U0ecnadINzDuoxAEopmiQRmVrty6JDTikBGHWw"
"W371IUOM87ZpqwByzkKb/8ISWzpXnt1nm1pt00X13q/Wvu+Xqq2eVOV5fFXiMdUjBIlX85GL"
"+GH1mmv9bFVVItIkAZwWKCmglIyMIgAAAABJRU5ErkJggg==")
apicoverlapmask = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAJYAAACWCAYAAAA8AXHiAAABmklEQVR4nO3SsQnAMBDAQNvN"
"t9l/Wi8REQh3E6jQnplnwcvO1wH8k7FIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFI"
"GIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4Sx"
"SBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuE"
"sUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiL"
"hLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgY"
"i4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFI"
"GIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4SxSBiLhLFIGIuEsUgYi4Sx"
"SBiLhLFIGIuEsUgYi8QFqpwBUYyXa/gAAAAASUVORK5CYII=")
macrobox_icon16 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACrElEQVR4nGWSPYicVRSGn3Pv"
"/e58m/nZn5nsYiCyGhFMZ0QQi6hNGm0XsRCLYKEI1toYaxFMqRHFQmHFwk7QJmCpbiGm8ie7"
"LmxiJjOT7M58M/Pdn2OxuxjZFw4HDrwv533PEVX9azro96b3BsGVC4CAAGKOuiBijrqAGHKK"
"NJrtotFs9aXq39brV94m1HP84hK4Aik8ai1Yh22UmMIflm8gzqM50Wh1ePKlDTWz0XAYJ/uU"
"7Q7WFdiigfENYlVRj/dBwXqPsRYjBmMMzjeopxPm1XhorPf48hRiBOsKjDHcu/k75y5e4qlX"
"Xqfq3yZWFcY6RMCggCLHYqCQM6SMamI6GHDu4iWeeHEDgDCZsPX153REKJeWyTEc5qCKquJQ"
"EM1IzswGd2mdOcuF197iGOvPvgDGsLX5KaCUi8vkGFHNoIpBFYkR0UTZbDPZ2+XGN1/wINaf"
"eY4LL1/m/t83md8fkWMgzWZozkcWUkRCRDwstDvc2LxGdWePp9985wGR52l2V/nlq0/IoSYr"
"aEo4ASQliAETAtlazHyGpsQJ5Eyu54gImvNhBqIZGwOSMzqfMR/vc37jMudffeN/3O0ff+Dn"
"ax/SObuOyZkwm0BOGDKYGLExEkZ3aPXWTpB3rn/HT1ffp93t4YxFqwOoKkgZA4oNAalrTjWX"
"mO78wa9Xr/xH/v5btj54l6XTa5TOw+QAmc+Q6QQ04USVog6or8EVtBZX2N38jIXlHq1HHue3"
"j96js3oGX3jy5AArYOoA1RjJCUdVIYO7WGvJKaHO0X34MXa//JicEp3eGhYhjwbYo600BvSf"
"W8h4jCt6qysLUlBv71B0uyQjqDE0RVEMjEbAEAOHj6OgwwFtX1Ku9FZEVf8Mu9un61t7tWk2"
"0WPzcvKKx2Mdj/GrD3m3/mj/X0XjRJEoM1EtAAAAAElFTkSuQmCC")
macrobox_icon32 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAGSElEQVR4nMWXW4hdVxnHf9/a"
"ey7nTDK3zMRKHdKaRBPbJFQQhiC04JPUF8H2TSERFSm1lYoQb0QIWkSkoBB98qkPXl5EiA8i"
"BStJbYrQW0KSpm0kETOZM5lJMplz9l7r+3xYa69zZiaY9EHcsFhnrbP2+v/3/7usb4mZXQG2"
"8v95VsTMDMB3b3PtzOsUI6OIc/FvkdiQgd9xXhCQ/hoZXCOksYtrECS9qxqYvO9+iqFhAErg"
"ZnXrxtZTP/0unQtnGJ6YjgScw1yBlCU4B1IgRZzHFeAc4orYirhOXBl/Oxf7okTSWpfmgvdM"
"3Hc/D3/mcwyNjq6UAMvvnmPp/NuMzz0YNxdJQAmsKEBcIlbk/0QiCYr1gJlAIhjJOlyau7Hw"
"b24uXmX6Izsok6YMt8aSuglcHOAGZI1zjZRmhhRJ7mQCSe/Hcf/JIzMwpShLwLIJojzRQOAs"
"LkQjsKWxxF5E6K3eIqhneGycofYYqGGiYBL3aPzE4pyZIOZQNRyKmZKcA5eZAWIWmaVxBlfN"
"4CuXL/GhPft45IlDhLqiu9wBJ6DNOsXMMFXQBGaa9jRUFULIGCWNGGbxRyYBNF9lhgArl99j"
"9uP7OfDkIVxRMtRq8/df/5zu9Q6tqRlMDSREM5lgagiKmWRDmAgaQlKhUYA+sGhDQBtmCMLy"
"Py8yu/sTHHzqCK6IrjO7+yHmv/wsvtdlbXkREcFUseBjb40CAdOQVVGzLLKLTpIA1fqyJ0VE"
"hJtX3mf7xx7m4NPfxxXFukwys3MP84eewXe7rF1fREgk1GfQCBzyHCFkJ8wKuMbxzBBt7BZY"
"61xjbv5RPv2tY5vAM4lde5g/vIFECLE1JEK0vQWPBZ99IDlhNEF2Qu037fUYnZq5I/AmJQ5/"
"A99dSySI4AlQNaBBE7FNBBSCImYxdEi9Ge3Z7Zz/429448VffTASS4uIGeYbEnU0QQio95ht"
"MAEWQCOJJmxEFVFjy/YPc+HE73jjxV/eA4m9yRxr3F7qRCW8T61GfY35OmKwMQpCgKDRHxoi"
"wSMiTMw9wIUTv/8ASjxD6N2mu7IEZv0v9zVaV9ExGfCBbP9MQnM0SAg4YHLuAS6c+C2njz9P"
"qHr/ncSuvTz2zaMU5RDV6q3sC+Y96uvoY6REJPTlximEuIkrYhbIOQmg8eJ7eERc3+FCAIl7"
"ma9zIiob+UVDcjwX83pI80UBhWHec+PSRXY9/gQHDj97V/DFd87yyvHnKUZajIxPZgKYgu9H"
"QZnZqsb4VwVxmEuJSQTRwMqli3z08SfvCbzzzllOvfBD3PAwI+0xrK5yMWPYOieMJsge3xAA"
"wWECaODm5ffZ+dkvsO8rz90d/PzbnPzZD3BDw7THJ9Gql6uifLpWVT41swJukIAjHiQirF69"
"ws7Pf5G9X3rq7uDn3uLkT47gRkZoTU6hdZXkHagO1KDurVeg8XTRAOoQM9Q5TITCFVSdhbuC"
"L517k1M//jbF0DCtian45U0hkurESEBhUxQ0JggKEmINRzw629tm+def/0Bv8SqfOnY8V0Tr"
"wM++zivHnqMcHmF0ahtWVfn9xGAdAanvkIhcCDjTaIoQYuyHgNQ143MPsnj6ZU5/52s5ha4D"
"P/o0ToTW5BR0u4iv4/veI94joUbq2Fxd4epeNHdDQNRwPiA+mkEGSIgPSO2Z3LGbpdMv89qR"
"r8YjFei8+Rqnv/d1SnG0p7ZB1UOCj2Hma/B1JFPXON+QqJCqB2mPnIgKDRRBMQmYM0wEcy7V"
"gvE8n9ixi6VX/8qZF44yO/8YZ37xIwoRWtMzaNWLBQnNnWBAplTYOMBCoKiqVPgMOKFLkuMk"
"1nQiqCrmYkVsEgvOiR27WfzbX1h46U+UW8YZmZ5Fe72+Le/gIw2GpDOh7PVw68LQB1wd4mQI"
"8etTeW0aM6OlW46FwEhrDGu1kaLEul3cIOimL++DN9Fm3W50xGyCEJBOh2J6e6xeEgFzsY85"
"XGJ92oxFwIe+p8sguuUuA6fmzNCFBdwggfa+R9iydz+r/3gVt20WK1wGz326B1qKaWsyWwO3"
"AVuwdJWIha6o4oKii9eYeugAW/Z/Mq5rLqdh+Tq3Tr6EjLbjzYgGcL22ttHEdzL5QKTK4J0j"
"KHZ7la0HH8VNz2QCl4nXc9u80//0EWDlP6ozIjEdXJd7AAAAAElFTkSuQmCC")
macrobox_icon53 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAADUAAAA1CAYAAADh5qNwAAAL+0lEQVR4nN2a+W9dxRXHv2fu"
"c7Adl+fg2BEIMF7ipNkIQq1IkPpjUdUfuqjih6qtWopNBG0FOEE0NBJtKa2qItoCbX8pFfwD"
"lVqhFqkLixqykYSEBIFYYhqHOIuXxPaz/e7M6Q9zzty5970Xgl2pKk+6nrvOnc/ZZ66JmUcB"
"fAIfn98UMTP/r0fx3/6VAFxEQVOT770FU2oCDAGg/BOkx1Ro4uPomZpjyrXhL0UP1Nmv1z87"
"i7aOriLTVCk+ujg6ggNPPYrZ8bNImltBxoCJQEQAGYAIbAzI+H1QIi1ARvcJiJ4jec4/YwDj"
"9/39Jpw3Rp7Vc8b4Z+VdRAQTnvfvcs6heXkb1n7ms1i+oiOnKU9tLV57+pcYf+cNlLtXg5m9"
"pki0ZYy0HsyQAZMJIGHQhOzecF73E2llXwdOxgvAiKBM1h8JCCBCMpkgkqZlmB4/h7f3vIBN"
"t33R9xlDTZ8+iQv/PoHydb2gxIBgclBkyEMAgElkAB6MCL41ej0CqQOuz1KSeLMSOCpqS1oi"
"Cpo3YgkqrNb2FZidnMDM5ATarlrpX69QLq3ClEpAiBsMsLSyT3qN2V/Sa8V9jlt9nvPnZePQ"
"H3vryN3rz+m7stNyLzPYsXcLa4P5BShjEu877EAFGOky8r7iAJEfdHQ9cAHZQHL3xCB6bzZo"
"3XRMXOcag6MAFkEB3h04AEUDjqVbkDTJi0gHHR5nsHMAu7wm2QUh+OucSb+gLd1ndtG+3u88"
"DDPg8lmpBiovmYJWwHkTLChPnzUmgZ2fx/yFSSzMzmRS5EhbKjznIk3JeVcww2BumRbZAXAO"
"zC5vDjGUUmcSz5sfcucVyokAXNAWEaEyNQ5bXcBNt9+BVWs3Ymr0BAgmEpoL/fk+Iqkj01ys"
"TdWIF7aDC2aoYNk4c3mKlVI7JRIwgu+NMpMjAolkQz41CSqTHuiWwWF0rtmAVes3Y+b8GZx7"
"5y20X9vtnxVB+Ld5ZycjZuwc2Bh4N3EAG38/OYE1oGD0Mk7nYozI/CLTINVaLho11hYzwxhC"
"ZfI87Pwcbhnajs41G/wLkhJuHdqBlX0DmBodgXhu0LL6B3LvKpihcz7KiZ9mJsxwqs165kfB"
"CDkLGJEdB+0JjPoWMYPIYG7iPOxcBbdsewCdA+tzkqMkwa1DO9DRuwaToyMSZRVGzNBZP3j1"
"M3aeKfY52c+ZpIJF5pcPFJErBTMpRi3VlgQTYwwqE+dQnatgy90P1gDlwO7ajq6B9Zgafd8n"
"VGaAbQTmMjDxlwCmEbAOWM56aqAEIDi9mEA+b3EAo8SgMnEetjKLLXfvxMqBDXWBQv8mwdbB"
"YazsXYMp0Zg6u2qOA4CrD+aie8L+pUK6mlWcg2rykwekxKAyfhZ2roIt39uFzrUbLwkU/7YO"
"DqOjd02msQAiYNZF4TozRe87cs1l1zxwAyjVVgajZuiioOEAB6SzM1jWdiW23vswOtduumwg"
"QExxcBgdvQO4UAeM2WU+piYn5seOA4xqip1FQ/NTTWjibWiGzJibOI9rbt6KjjWXNrkPA7uq"
"dyAzRZdJvW7wcBGYc4BeL5hgFNI5KzcamqGq2aG1owsjL/4V7/3zuUVBBbChYXT2r4t8TAfL"
"AYxtKqnDX2N2EvEYzNbfH/3ymnIu5AaSkkh9yoSk6cFNqYQkacKhp3+Fd//2p8WDmQRbBu9D"
"R89ADixoSbTCNvVAqplgelHuqgsF9tSSk2r8S5ItOS/BUksL2rquweFnn1waGBncOrTdg50c"
"kam7hngrA2ewtQLrW/UnbqypKElpNIk0w/A+FcDENErNzWjrvAaHn3kSJ178y+LBksSD9a7G"
"5MkTIEgZ5vJBg53zcNYCTrRnbQOfUjD1Ha2nmGG0pInBXB5sedfVOPSHX2PkpeeXCLYDnX1r"
"MXnyRFbbqe9YC9go6lkLl3qfa5x8o+gGdoBVMDQGE1Nsam5B61WdOPj7x/Hu3/+8NLBtD6Br"
"YB2mTr4n73cSOCJzE/NTTdX3qTphOwPjPBgXTFHBWlqxfOUqvPbMU3j7+T/C2XSRZIQtdw7j"
"6nWbMX9xMvKfLNmywgTzyx43UT/efxyH4rYhGDIwE8AcYD1Yc3kFDvz2Z5h4983FQQEgY/Dp"
"b34XsBZ2fl5qRDE7diEJs61NvtkSGTMIztd/jv3qUZgkOsAiLH0ZODgiMBPIrzt5P02AdHYa"
"c+NncNMd96K9e/WioQDgwLO/ARhIksSnGj/LDAuaTORhC8k3P0lkhpGIxxayZBXNeaBgOr3z"
"m1/qM6jOTmN67BQ2fuMe9H/+9iUB7fndz/HBkQMoX9eTWUxudZd9q5VF9AtQvhzypmTgZ6V+"
"ckogklmmSsQYwGR9wxhUKzOYHTuFzd++Dz23fWlJQLuf+DHGjh9G+dqeMGDWURIJW7b0XAzp"
"OU1plc7OT5+JCM7pYqNMoJmk6kgA4zWUVmYxPTqCG4e2Lx3o8Ydx+thBlK/vlVCNbDFU19M5"
"eL0XdEOoEP28CTqRhSFf8rMsCwc/sxaEEuz8LKZPjWDTt+5F7+e+sjSgx3Zh7PWDWHF9n49s"
"ekGgWPfjVsN6vUAB+CSrhawsiYAhS8fBrGURxhiksxcxfep9bLhzGH1f+OrSgH7xEMaOvopy"
"dx/YpeKr8tUj1kz81YUAWAtOq43ND8wg0VbolBkspuhXk3zUYbuAtDKDjUM70LtEoFce24Wx"
"w/tQ7u7PLR9znc86Po9mq7FsLcjaaJ2yxvx8ciPnI034HAONcuy/bDAw+8Eoer789SUD7f7p"
"Axg7sh/lG/qB2Ie01cHKPhc70OK2fpkkWnK6QJmF91DYOq35HJrbV+Ds3hcx+ebriwZ65dHt"
"OHNoD9qv6wWlvvQh50BSBmWbBAPdz222JlDklsgIAhRmoE5eIpuAgh2alrdh7txp7N05hInj"
"r31koD2P3I8zB19BubtPpjvRQK28z1qQ9ZDkxMxsGrXRfqMqnVymHRL/Qu7Yv5QcA9UqWjq6"
"YJIE+36wDRPHD1820N4f3Yczr+5Ge3e/H5RowAvOgorClC3WTjgnPhX/cgUtOSmR2JsYCh1m"
"JimFZbWK1o5VSJIE+x66PLB9j9yPsf0vo3x9H7ha9X4UtFG0DA+JGDpsNgiEbNpoPsX+Blkt"
"yrRWB84xjPpfdQGtHatQShLs33nXJcH27roHY7v/4YOCS/1mrYBJm0bHqd8ojcwt9RvSFIiP"
"G/mUcQyyDkbhohDfSHPEDK4uoEU0duD7g5g4dqhWQw9tw7n9L6O9ZyDzFVar0ADlpEC1GbQG"
"C4WW82Rt3rcazaeCeQmcj/+Zf4VB5OBsTmOJKeHVBwcxfuSA79Y57N85hHP7X0a5Z7WXrgyU"
"bL4vEzaONj1ns32bNz/TuPbLfMlI8vXR3IYyJZQrJDmLNTnK+eoCWld2oXL2NI7+ZDvW3/9D"
"nNnzAs7vfQnl/k/6RKlVipoHR/9jAeQ+c8YCz2coWQiCX4wxaSpL5kUohkhAggRTyH3MABPn"
"vpKzlksctwAvOLR0rEJ15gKOProDpmkZyj0D4DTNkng8cKLahFr3F33dZA4f8DhNYRqVSQRE"
"ZueigQtcDKYSjQF1fgMGqvNouqIZSanJT1MEqEYTuf96+TAmmXyE5W+BS1OvqUa1n9E1Csde"
"+oQ8nGqD/IRSpZzTYABmJGTk45nmEYqay0JRomjtJAMj0VRSTXMfszOoNAVVqzDLroB+yfOD"
"lQ4ifwpzGzVP4uxYpK9tBur/5Hnqm144W8fcSBdWBQqpBc3P+1xVhKJSCVSZAzUvB6UWbBSC"
"RDPItBcBkKxV+Gk/SdEbz1SLU4b8cY2+FKB4rCtYyFc5sA6YrcDIv/DkoK64oR8t3b24uO9f"
"aF63ya/UiKT9lD7TkmpCg1dw/CgShhPFURNdwvCyMlxBANRoh8RNiAgLbxxD2/ob0dK/NnsF"
"M1+A/Gvc3PEjOPGdr6H6wSlQayvYJCLpzK/YIGhQ4WLNhAidM7sGgAUe3Sn6D2lQiPKlsRY8"
"M4NlHZ3ofuIZtGz+lHYwlYMCAFSrmDm0F9S0DGSSaEpdZyAfFo4/SiyogSt0w7Gfyb8mLCyg"
"9cabgeaW+PaP739mngRwJerK6P/uRwAm/wPqLWN88pC3HAAAAABJRU5ErkJggg==")
macrobox_icon64 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAPSElEQVR4nOVbXWxcx3X+zlz+"
"KuIuRVKUI1q0ZBuNndhRbBSJrMpo0AJ962ubpyBF7NqyE6QNbImWYiVuURd2EtvJU1+CPATI"
"Qwu0r31sq1iJ5b/EiWI4jpNY0Q8p2eSKSy7J3Z05eZgzM2fu3iXlmIUffIHLvT+zc+d85zvf"
"OTN3Scx8AUANH86tQczMH/QoPsjNAGh+0IP4ALfGQNXVyy89j8Vfvw4zNAQyBUDhDgEkJ0Rg"
"IhBRz3UgXPe3wjUmgEptiUzqnwzIpP5S38ifQ0jP0G1023AdgHMOtalpTO2/tcfWHgBe/4/v"
"49y/fw/FyCiKwRHA+E4ZMlhD4DAQY0DGCBgmGUNyPQAibfxu0gCJBGDyhpPx58a38X2HYwJg"
"5Jm+bXyOAErGwIS2pgBBxssMdhazBz+Nm//0cH8AFn91Dm/81w9Q23cAAyM7wMGFJnmfyICD"
"B8MATfCwAQWw1MASAABQAoD8YJPnClBhkoeNASEApAA00qbUtyHyjy2MPMuACgN2Fhd+8Qom"
"brwJ4zfMRJuNBuDKay8CjjEwPApmBwR9DDLJDIBBpfP4mRqqY5T6UW10P6y+x+r7zHKr+pxL"
"fcdvcnoOOwcqBkBEWLp4XpucA8DWwgwOAs5VG1I2gjkNSN0nPbBKQNIBZ8amPmP/sR1K1xVY"
"zBLtnJ7rOJ1zAKEAO9sfACqK+EAqj18GyT2AJO+RBiMbcMkY7X3VR3xCBDaxIXlV7qk+ObIi"
"gcWAOFIxx3EKy0oA9IC0l5P9CfGyFzMKA6S9kQ0+60yMYQAu3SuzQBlWDjuqZIHL2BQAjcD1"
"AyCAEJ4ROuwJA30tGpsMZd0uY0F5ULpLVs23YoE8x4VQ8+11ny62d6o/xYh+APQir44j5YJx"
"2ujghZw9gQXGFHDtDbQWr6Kz1oIpwqMrWKA85rthFdOIgKTvcfYs7W3frQZhCwAkoSnkZZC5"
"u6Lnyw9MhicGGWOwvrwEZ7u4468/h7Hpj6K5cMmnsB4WhH6cGnByAKt2ebhUh4K/5L2f21QB"
"QECQgs0opTwVv6wHBogA9rLAGINW41101tfwqb/5IvYf/gvc9bdfxMjYOJoLF0GmiHGsM4I/"
"dODgPRUKLrLAIYpbCAXn+6EYSk6asspufQCIhgXvZZTJDUZmbNl7/pxMgfXGErprazh031cx"
"fdudAIDR8Qnc+/BjGK3vqgDB5WBn9GdF54iS0hQPStADDYKTUGDOQegFAIEFwZhc8au0INwn"
"NRBTGKw1FtFZa+GeBx7B7o/dkT1npL4LR47OYaRWx/L8JV8CR5yDZ+ENci4zNmiO1zS57rTa"
"bwJCHgG9aZDYg0CsChotLj1akIsesQNMgfXGIrprqzj0wKPY/bE7yzjnINTrWF645IUxE7YE"
"cgCBxdgEQvJ8DgJXglCe/ecAaErFHBsMCzaXWSChEPTDGKwvves9f/Q4pm//ZKXxYRsdn8C9"
"R+cwWhtHc/5yAsE5hKwQRSyC4JKRyOmfQHC9TNCZogqABIK30bMgdRiQTFqQC6IRz3dWmzh0"
"dA67b9vc+LB5JhzH8FgtgQCIwRUgQGuFeDpjgsog8p3oqNKz8yyAEAaaBUoQteCVBJGMwXrj"
"XXTXWrjnSycxffvB6zK+DMJIrY7mwmUYmU5rUYwgWJcEszIcvFhq7WCngOgHQFDigFLKCEl1"
"IwtUKJiiwPrSIjqtVdzz5ccx/Ym73pPxYRsdn8CRh45jpDaO5sIlDwIDzLYXBB0OSh84prsg"
"pE4xSc1wqwDIPBpDgbcMhfZqE0NjNRw59iR2f/y9eb68jdR24ciDxzC8s14BQj9NEKOdL3Yc"
"Q9ggRgfvByD6AgDEuKIAgg6FKH4hFHw82vUNjNR3YeKW296X8RGE+i4ceegYhsc0CFyhCRZs"
"bTQ81glB/DI2XEcIZBThVOr2hIJLDAEzRuq7sPTWGzjzzOPbAgAAjNYnPBPG6mjOl0BwovYO"
"YpgAwVLouCSEiQ0esC1CICAoCDBgSqFgVNrR+XXH5DTmf3oWP372VM9D/mgQxgMINS+MsU5I"
"8cxZSJTYwDkbYlj0BQAUYyaluRQKTuuBU3ogyI/P3oLLr76wvUwYn8C9D81heGcNyyUmsBNd"
"cInuZTZwZEMIgbz/EgAqthXFy3pgWFeJCgRnMT57CxZeexlnvv21bQPBa8JxjErZbIxJcwel"
"8JENzoJtV/TBJrACYJsCEAx3ViYhOQgcQBA2ZOEgINRv3J9A2K5wqE+oucNFvyzOrNjgZMwl"
"IJwFu27KBJtXglQCwQFqfZSkPI7lZQTB5SCwQ33fAcz/7EWceWb7NCEWS2M1mUWaxNZKIFRo"
"WM8KbLYomoogXVFZAUEyQXkOkIFQEQ4/O7v9mvDwCZk7XPDvKaLgaSBUigwaYK0HZDMA4FAN"
"QpYZXAZCHg4ahC7q+27Gwmsv/T9owmMYqdWxevUyjCFZCElVYQJDNMDarQshX/M7oXsJBNsL"
"gg4Hw6IRMTvIANiifuMBXPnFq/iff/oHrC1e3TYQ/vLRJ1HfO4v1xqJKi6WqL6Z2AaMEQPZq"
"jCB53zl5P+fzf5wlWetfSRkokCRZEmCYwMRwLr7Fko79OoHb2OgRofezmYFB/5osGptenAIk"
"z5JrTIkd/QAI01oCPIJVIDgHyEtHMPv3DHLbEUBMMBQqRoAKg2vn38LkbZ/Ekbmn1cuX97e1"
"V5s4/dwTaDXexc7pvT7eZSw+HhUYMnZfN2yaBTyFCOxvSMlZLYx96oSQEQCQIVz73ZuY+pM7"
"cGTuqe0zfqWJ089+AytX5rFzcg9cpx3L31AYhbFXXlNbHgIs4DkHMgQDAxeE0ZAABM9vZwE2"
"8pY2rBswmHyyNES49ps3MfWJu3B47ilQUflThPe8bTSv4fQzp9B65wpqe/eBbQcUZi3M0esU"
"aO9P/R/bWwj1jIqimAFEDoZIVlQBGErTYpKy2cK/iqYwYfIPv/a7NzF5+0H82YlvggYHt8X4"
"9soyTn/ra1i5chm1mZu85+UVOyVLEWgfVzbko6oQygEQ48MKUAh7CXewFRAIqSNygGURR//u"
"fvntX2Pq9oM4fOq57aN98xpOP30CrSuXUZ+Z9cYDCL9UyX+hkj5JaQBVlMKlLJDyJ3krw4TY"
"//oDQHjDmocEAGdBZLD8mzcwecfduOfr39lW4//vX49h9eo8ajM3gTud9BMcMJiV5+OhKJd6"
"GxwKo74AgAFy4TW3B0GY7cXREAgqJIhAJEwxBZbfeh1TBz+Nz5x6bvtifrmB008ew+o786jt"
"nYXrdtJYiQAWBdCKjxD+pSVQ290CAPh8TeJWFlEhyS8s+T2GBEN+tmPQmr+AyTvvxqEnvrut"
"xv/oyUewunAJtZn9YnzyMoeUJ4CEe3H1qgTAliEAp0IgCJ307dnAMi3gyAZ2hPWlqxi/9eM4"
"9C//ti2GR+P/+R+xsnAJtX37wd1Oj5elSEEChWNNEosChQHb3jTY+2Yo5FPJ7WEPVVRe8voa"
"YGBwCO2lRSz/9s1tMb7dvIbnv/EVtC5fRH3vLLjT8RoTZnpqsdMf21QH6HqAS9eudz3AG6zZ"
"4BIQYrSJy1AOQzvH0F66ih8/8gUs/fKn79v4H339y1iZv4CxmX3gbifqEjnnj62TY6eObZwG"
"R2PDbsPeqwE9r8aMCoHIBqfYEKopmf8TM7jbwcjEFMxAgbOPP4Slc6/8ccYvL+H5Uw+jdek8"
"6jPe86yXtqIzBIiS1wMQERjrQNbKNevPNy2FvdwLzV0CgnNtMJzCIhZOnQ5GJ6dhigJnTx7F"
"0i9ffY/GN/D8iaNoXTiP2sx+cLcbn2XE6B56RzD67TY/5y1CIKO5rvGlAz3XJucSECFzdLvY"
"MbUHxeAQzp58EEvnrg+E9nIDZ078PVqXzmNsZhauveEp6ywgwuXDQAB3igkBnGy3ANu8jXMw"
"VnSkPwPYU4ZZ1QQuAkH9gAjtnPNMmJpGUQzixZMPYPHnL29q/EZjEWfm7sPqxfMY2yepjl1v"
"7Fpbsbu4U8UOa4Gu7LYLdK9DA7yhFsa5/kBwFRBKIzodz4RiEC+dfKAvE9rLDfxk7j60LryN"
"2o0HfIUXvIywl5VeC1t3iz0IYVd9brEi5L0JJSplIPSe2oSVl6QJbYxOTWNgcAgvPXZ/jzC2"
"ry3hJ4/+HVoX30Zt9gC4287EjYT2if55Ntp6T2MzzsFYv9OmaTAyQNb+Fe0TEKV4VBqhlZmY"
"gU4Ho5N7UBQDePn4/Vh87UVP+6V38MJXP4+13/8WtX3ieSVUfvA25v7r30uK7yyMsykT2G6P"
"BpTWAzgZCr+qQhRKYqkMZWeQVIthNiTlM/yqUKwiO23smNqDtavzOPfUY7jl8w9j/n//G2sX"
"38bY/lu98ZqC7H+kB6SJXVb9XdcWpuaIK0DELCK4xXTYx75DvuIns4FQe8v8gOM/TYRmYSnK"
"f7IskXG7jdHJPei2VvD6c09gYOcYarM3w2njo8Wcnkx6DFtv2uBgj/61m+l2PbP6AUAhzuOq"
"JiWD1JIDy+InybQ5zMfj9IEh99V3O20UQ8MY3X2Db9tui8NJMaA0eSmdR7uAHLg0AYiGe3s4"
"ARIB2CoEmEEuoCHeJ29wAESWCjwz1YzMt4P6DiIzQshQoBJLuJSnrZsYHVpWtoier6a/B8DC"
"2C0AMFLg+AF7T7IMPHo7rLhWgBFXkYNhYWWGZN0urtiwDyNW5hGUJ0ts2AyW+PXABAEhFnX+"
"eKDbhdl0PaDTgelKIWFUDAu9vadZFkJQAYZfnQnrkSSGptlrGYSgLsiva6v6bsrTwXAV835N"
"IAegsBbUbvcHgEDgtTX/3sO6SP3gee/htFASRhD+ByjeB2L8B0Ag64hRScKiRgmIzcynngas"
"hpG8T/E81AS+f9NcQVHqOKsDaoc/C2O74NVVXwdYB7J++mmsFBRhDiC1gnHIp6iqnS9CrJ/M"
"SMkcJjdGChzjGMZy6lsXL7IXsuuCpnDWX5NjPz6LwloYa2G6FkXXoug6FNbBrLZQNFdQP/Tn"
"/Rnwkc8cwZ77v4L5Z59GMf4RYGjYYxzojqD4SMpPkLpAfybP+u8EF6oFyiqxo/IB91Ahm86K"
"d6PXQ8UaijXnwcbGBtxiEzc8+CXUPvtX5f54GcCYvtj4zx9i9ZUXQMPD8i9tlMZBKlMrgyPP"
"K4xLYGxlcNWWC10aeGZE+gxaEMBxXfD6OnbceTd2fe4L5c4blQB8iLbGh/6fpwcA/B7+3+c/"
"bEAQgMYfADrwx8s5l7JzAAAAAElFTkSuQmCC")
| 64.95202
| 79
| 0.878737
|
f1701d278c42a1ff9f8eaab17e6a105de6f37a16
| 18,950
|
py
|
Python
|
intersight/models/iaas_ucsd_info_all_of.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 21
|
2018-03-29T14:20:35.000Z
|
2021-10-13T05:11:41.000Z
|
intersight/models/iaas_ucsd_info_all_of.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 14
|
2018-01-30T15:45:46.000Z
|
2022-02-23T14:23:21.000Z
|
intersight/models/iaas_ucsd_info_all_of.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 18
|
2018-01-03T15:09:56.000Z
|
2021-07-16T02:21:54.000Z
|
# coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from intersight.configuration import Configuration
class IaasUcsdInfoAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'device_id': 'str',
'guid': 'str',
'host_name': 'str',
'ip': 'str',
'last_backup': 'datetime',
'node_type': 'str',
'product_name': 'str',
'product_vendor': 'str',
'product_version': 'str',
'status': 'str',
'connector_pack': 'list[IaasConnectorPack]',
'device_status': 'list[IaasDeviceStatus]',
'license_info': 'IaasLicenseInfo',
'most_run_tasks': 'list[IaasMostRunTasks]',
'registered_device': 'AssetDeviceRegistration',
'ucsd_managed_infra': 'IaasUcsdManagedInfra'
}
attribute_map = {
'device_id': 'DeviceId',
'guid': 'Guid',
'host_name': 'HostName',
'ip': 'Ip',
'last_backup': 'LastBackup',
'node_type': 'NodeType',
'product_name': 'ProductName',
'product_vendor': 'ProductVendor',
'product_version': 'ProductVersion',
'status': 'Status',
'connector_pack': 'ConnectorPack',
'device_status': 'DeviceStatus',
'license_info': 'LicenseInfo',
'most_run_tasks': 'MostRunTasks',
'registered_device': 'RegisteredDevice',
'ucsd_managed_infra': 'UcsdManagedInfra'
}
def __init__(self,
device_id=None,
guid=None,
host_name=None,
ip=None,
last_backup=None,
node_type=None,
product_name=None,
product_vendor=None,
product_version=None,
status=None,
connector_pack=None,
device_status=None,
license_info=None,
most_run_tasks=None,
registered_device=None,
ucsd_managed_infra=None,
local_vars_configuration=None): # noqa: E501
"""IaasUcsdInfoAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._device_id = None
self._guid = None
self._host_name = None
self._ip = None
self._last_backup = None
self._node_type = None
self._product_name = None
self._product_vendor = None
self._product_version = None
self._status = None
self._connector_pack = None
self._device_status = None
self._license_info = None
self._most_run_tasks = None
self._registered_device = None
self._ucsd_managed_infra = None
self.discriminator = None
if device_id is not None:
self.device_id = device_id
if guid is not None:
self.guid = guid
if host_name is not None:
self.host_name = host_name
if ip is not None:
self.ip = ip
if last_backup is not None:
self.last_backup = last_backup
if node_type is not None:
self.node_type = node_type
if product_name is not None:
self.product_name = product_name
if product_vendor is not None:
self.product_vendor = product_vendor
if product_version is not None:
self.product_version = product_version
if status is not None:
self.status = status
if connector_pack is not None:
self.connector_pack = connector_pack
if device_status is not None:
self.device_status = device_status
if license_info is not None:
self.license_info = license_info
if most_run_tasks is not None:
self.most_run_tasks = most_run_tasks
if registered_device is not None:
self.registered_device = registered_device
if ucsd_managed_infra is not None:
self.ucsd_managed_infra = ucsd_managed_infra
@property
def device_id(self):
"""Gets the device_id of this IaasUcsdInfoAllOf. # noqa: E501
Moid of the UCSD device connector's asset.DeviceRegistration. # noqa: E501
:return: The device_id of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: str
"""
return self._device_id
@device_id.setter
def device_id(self, device_id):
"""Sets the device_id of this IaasUcsdInfoAllOf.
Moid of the UCSD device connector's asset.DeviceRegistration. # noqa: E501
:param device_id: The device_id of this IaasUcsdInfoAllOf. # noqa: E501
:type: str
"""
self._device_id = device_id
@property
def guid(self):
"""Gets the guid of this IaasUcsdInfoAllOf. # noqa: E501
Unique ID of UCSD getting registerd with Intersight. # noqa: E501
:return: The guid of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: str
"""
return self._guid
@guid.setter
def guid(self, guid):
"""Sets the guid of this IaasUcsdInfoAllOf.
Unique ID of UCSD getting registerd with Intersight. # noqa: E501
:param guid: The guid of this IaasUcsdInfoAllOf. # noqa: E501
:type: str
"""
self._guid = guid
@property
def host_name(self):
"""Gets the host_name of this IaasUcsdInfoAllOf. # noqa: E501
The UCSD host name. # noqa: E501
:return: The host_name of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: str
"""
return self._host_name
@host_name.setter
def host_name(self, host_name):
"""Sets the host_name of this IaasUcsdInfoAllOf.
The UCSD host name. # noqa: E501
:param host_name: The host_name of this IaasUcsdInfoAllOf. # noqa: E501
:type: str
"""
self._host_name = host_name
@property
def ip(self):
"""Gets the ip of this IaasUcsdInfoAllOf. # noqa: E501
The UCSD IP address. # noqa: E501
:return: The ip of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""Sets the ip of this IaasUcsdInfoAllOf.
The UCSD IP address. # noqa: E501
:param ip: The ip of this IaasUcsdInfoAllOf. # noqa: E501
:type: str
"""
self._ip = ip
@property
def last_backup(self):
"""Gets the last_backup of this IaasUcsdInfoAllOf. # noqa: E501
Last successful backup created for this UCS Director appliance if backup is configured. # noqa: E501
:return: The last_backup of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: datetime
"""
return self._last_backup
@last_backup.setter
def last_backup(self, last_backup):
"""Sets the last_backup of this IaasUcsdInfoAllOf.
Last successful backup created for this UCS Director appliance if backup is configured. # noqa: E501
:param last_backup: The last_backup of this IaasUcsdInfoAllOf. # noqa: E501
:type: datetime
"""
self._last_backup = last_backup
@property
def node_type(self):
"""Gets the node_type of this IaasUcsdInfoAllOf. # noqa: E501
NodeType specifies if UCSD is deployed in Stand-alone or Multi Node. # noqa: E501
:return: The node_type of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: str
"""
return self._node_type
@node_type.setter
def node_type(self, node_type):
"""Sets the node_type of this IaasUcsdInfoAllOf.
NodeType specifies if UCSD is deployed in Stand-alone or Multi Node. # noqa: E501
:param node_type: The node_type of this IaasUcsdInfoAllOf. # noqa: E501
:type: str
"""
self._node_type = node_type
@property
def product_name(self):
"""Gets the product_name of this IaasUcsdInfoAllOf. # noqa: E501
The UCSD product name. # noqa: E501
:return: The product_name of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: str
"""
return self._product_name
@product_name.setter
def product_name(self, product_name):
"""Sets the product_name of this IaasUcsdInfoAllOf.
The UCSD product name. # noqa: E501
:param product_name: The product_name of this IaasUcsdInfoAllOf. # noqa: E501
:type: str
"""
self._product_name = product_name
@property
def product_vendor(self):
"""Gets the product_vendor of this IaasUcsdInfoAllOf. # noqa: E501
The UCSD product vendor. # noqa: E501
:return: The product_vendor of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: str
"""
return self._product_vendor
@product_vendor.setter
def product_vendor(self, product_vendor):
"""Sets the product_vendor of this IaasUcsdInfoAllOf.
The UCSD product vendor. # noqa: E501
:param product_vendor: The product_vendor of this IaasUcsdInfoAllOf. # noqa: E501
:type: str
"""
self._product_vendor = product_vendor
@property
def product_version(self):
"""Gets the product_version of this IaasUcsdInfoAllOf. # noqa: E501
The UCSD product/platform version. # noqa: E501
:return: The product_version of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: str
"""
return self._product_version
@product_version.setter
def product_version(self, product_version):
"""Sets the product_version of this IaasUcsdInfoAllOf.
The UCSD product/platform version. # noqa: E501
:param product_version: The product_version of this IaasUcsdInfoAllOf. # noqa: E501
:type: str
"""
self._product_version = product_version
@property
def status(self):
"""Gets the status of this IaasUcsdInfoAllOf. # noqa: E501
The UCSD status. Possible values are Active, Inactive, Unknown. # noqa: E501
:return: The status of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this IaasUcsdInfoAllOf.
The UCSD status. Possible values are Active, Inactive, Unknown. # noqa: E501
:param status: The status of this IaasUcsdInfoAllOf. # noqa: E501
:type: str
"""
self._status = status
@property
def connector_pack(self):
"""Gets the connector_pack of this IaasUcsdInfoAllOf. # noqa: E501
A reference to a iaasConnectorPack resource. When the $expand query parameter is specified, the referenced resource is returned inline. Relationship to a collection of connector packs installed on the UCSD. # noqa: E501
:return: The connector_pack of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: list[IaasConnectorPack]
"""
return self._connector_pack
@connector_pack.setter
def connector_pack(self, connector_pack):
"""Sets the connector_pack of this IaasUcsdInfoAllOf.
A reference to a iaasConnectorPack resource. When the $expand query parameter is specified, the referenced resource is returned inline. Relationship to a collection of connector packs installed on the UCSD. # noqa: E501
:param connector_pack: The connector_pack of this IaasUcsdInfoAllOf. # noqa: E501
:type: list[IaasConnectorPack]
"""
self._connector_pack = connector_pack
@property
def device_status(self):
"""Gets the device_status of this IaasUcsdInfoAllOf. # noqa: E501
A reference to a iaasDeviceStatus resource. When the $expand query parameter is specified, the referenced resource is returned inline. Relationship to a collection of infra accounts managed by the UCSD. # noqa: E501
:return: The device_status of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: list[IaasDeviceStatus]
"""
return self._device_status
@device_status.setter
def device_status(self, device_status):
"""Sets the device_status of this IaasUcsdInfoAllOf.
A reference to a iaasDeviceStatus resource. When the $expand query parameter is specified, the referenced resource is returned inline. Relationship to a collection of infra accounts managed by the UCSD. # noqa: E501
:param device_status: The device_status of this IaasUcsdInfoAllOf. # noqa: E501
:type: list[IaasDeviceStatus]
"""
self._device_status = device_status
@property
def license_info(self):
"""Gets the license_info of this IaasUcsdInfoAllOf. # noqa: E501
:return: The license_info of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: IaasLicenseInfo
"""
return self._license_info
@license_info.setter
def license_info(self, license_info):
"""Sets the license_info of this IaasUcsdInfoAllOf.
:param license_info: The license_info of this IaasUcsdInfoAllOf. # noqa: E501
:type: IaasLicenseInfo
"""
self._license_info = license_info
@property
def most_run_tasks(self):
"""Gets the most_run_tasks of this IaasUcsdInfoAllOf. # noqa: E501
A reference to a iaasMostRunTasks resource. When the $expand query parameter is specified, the referenced resource is returned inline. Relationship to collection of MostRunTasks objects with cascade on delete of UcsdInfo object. # noqa: E501
:return: The most_run_tasks of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: list[IaasMostRunTasks]
"""
return self._most_run_tasks
@most_run_tasks.setter
def most_run_tasks(self, most_run_tasks):
"""Sets the most_run_tasks of this IaasUcsdInfoAllOf.
A reference to a iaasMostRunTasks resource. When the $expand query parameter is specified, the referenced resource is returned inline. Relationship to collection of MostRunTasks objects with cascade on delete of UcsdInfo object. # noqa: E501
:param most_run_tasks: The most_run_tasks of this IaasUcsdInfoAllOf. # noqa: E501
:type: list[IaasMostRunTasks]
"""
self._most_run_tasks = most_run_tasks
@property
def registered_device(self):
"""Gets the registered_device of this IaasUcsdInfoAllOf. # noqa: E501
:return: The registered_device of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: AssetDeviceRegistration
"""
return self._registered_device
@registered_device.setter
def registered_device(self, registered_device):
"""Sets the registered_device of this IaasUcsdInfoAllOf.
:param registered_device: The registered_device of this IaasUcsdInfoAllOf. # noqa: E501
:type: AssetDeviceRegistration
"""
self._registered_device = registered_device
@property
def ucsd_managed_infra(self):
"""Gets the ucsd_managed_infra of this IaasUcsdInfoAllOf. # noqa: E501
:return: The ucsd_managed_infra of this IaasUcsdInfoAllOf. # noqa: E501
:rtype: IaasUcsdManagedInfra
"""
return self._ucsd_managed_infra
@ucsd_managed_infra.setter
def ucsd_managed_infra(self, ucsd_managed_infra):
"""Sets the ucsd_managed_infra of this IaasUcsdInfoAllOf.
:param ucsd_managed_infra: The ucsd_managed_infra of this IaasUcsdInfoAllOf. # noqa: E501
:type: IaasUcsdManagedInfra
"""
self._ucsd_managed_infra = ucsd_managed_infra
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict()
if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IaasUcsdInfoAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IaasUcsdInfoAllOf):
return True
return self.to_dict() != other.to_dict()
| 34.392015
| 1,052
| 0.637309
|
c178b219def8f7fbfe6d00f7822a8e61293d003a
| 1,775
|
py
|
Python
|
lib/python3.4/site-packages/pip/_vendor/html5lib/trie/py.py
|
LChristakis/chalice-hunter
|
6bffea4620e23ce9ff12ac30526ebafcb9c10058
|
[
"MIT"
] | 38,667
|
2015-01-01T00:15:34.000Z
|
2022-03-31T22:57:03.000Z
|
env/lib/python3.6/site-packages/pip/_vendor/html5lib/_trie/py.py
|
amogh-gulati/corona_dashboard
|
ce1a20ad56bdfb758d41513b4706fe3a47764c32
|
[
"MIT"
] | 8,417
|
2015-01-01T13:03:16.000Z
|
2022-03-31T17:40:27.000Z
|
pip/_vendor/html5lib/trie/py.py
|
alex/pip
|
d51a4b345b31ec4c8defbefe7f12b996c00c67fa
|
[
"MIT"
] | 11,269
|
2015-01-01T08:41:17.000Z
|
2022-03-31T16:12:52.000Z
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
| 26.102941
| 66
| 0.588732
|
25d97682ff7a25ed7df27b1dfcb02829b32b6fa7
| 12,420
|
py
|
Python
|
klab/rosetta/map_pdb_residues.py
|
Kortemme-Lab/klab
|
68f028a4d7f97b9009bff45799b5602824052dd1
|
[
"MIT"
] | 2
|
2016-06-14T00:32:19.000Z
|
2021-07-04T01:56:17.000Z
|
klab/rosetta/map_pdb_residues.py
|
Kortemme-Lab/klab
|
68f028a4d7f97b9009bff45799b5602824052dd1
|
[
"MIT"
] | 2
|
2019-01-17T18:52:17.000Z
|
2019-01-17T18:52:56.000Z
|
klab/rosetta/map_pdb_residues.py
|
Kortemme-Lab/klab
|
68f028a4d7f97b9009bff45799b5602824052dd1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# encoding: utf-8
# The MIT License (MIT)
#
# Copyright (c) 2015 Shane O'Connor
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
map_pdb_residues.py
Python functions to map PDB residue IDs to Rosetta/pose IDs by using the features database.
Warning: The inputs to the commands.getstatusoutput (one of these is an executable) are unsanitized. Only use these functions if you trust the caller.
Sample command line:
python map_pdb_residues.py -d ~/rosetta/main/database -e ~/rosetta/main/source/bin/rosetta_scripts.static.linuxgccrelease -f 1QG8.pdb -c A
Created by Shane O'Connor 2013
"""
import sys
import os
import tempfile
import subprocess
import traceback
from optparse import OptionParser # todo: deprecated since Python 2.7
from klab.fs.fsio import write_temp_file
script = '''<ROSETTASCRIPTS>
<MOVERS>
<SavePoseMover name="init_struct" reference_name="init_struct"/>
<ReportToDB name="features_reporter" database_name="%s">
<ResidueFeatures/>
<PdbDataFeatures/>
</ReportToDB>
</MOVERS>
<PROTOCOLS>
<Add mover_name="init_struct"/>
<Add mover_name="features_reporter"/>
</PROTOCOLS>
</ROSETTASCRIPTS>'''
def get_pdb_contents_to_pose_residue_map(pdb_file_contents, rosetta_scripts_path, rosetta_database_path = None, pdb_id = None, extra_flags = ''):
'''Takes a string containing a PDB file, the RosettaScripts executable, and the Rosetta database and then uses the features database to map PDB residue IDs to pose residue IDs.
On success, (True, the residue mapping) is returned. On failure, (False, a list of errors) is returned.
Note: extra_flags should typically include '-ignore_zero_occupancy false' and '-ignore_unrecognized_res'.'''
filename = write_temp_file("/tmp", pdb_file_contents)
success, mapping = get_pdb_to_pose_residue_map(filename, rosetta_scripts_path, rosetta_database_path = rosetta_database_path, pdb_id = pdb_id, extra_flags = extra_flags)
os.remove(filename)
return success, mapping
def get_pdb_to_pose_residue_map(pdb_path, rosetta_scripts_path, rosetta_database_path = None, pdb_id = None, extra_flags = ''):
'''Takes a path to a PDB file, the RosettaScripts executable, and the Rosetta database and then uses the features database to map PDB residue IDs to pose residue IDs.
On success, (True, the residue mapping) is returned. On failure, (False, a list of errors) is returned.
The mapping maps residue IDs to a dict with the three letter residue code and the Rosetta pose id e.g.
mapping = {
u'B 435 ': {'name3': u'GLN', 'pose_residue_id': 370, 'res_type': u'GLN'},
...
}
Note: extra_flags should typically include '-ignore_zero_occupancy false' and '-ignore_unrecognized_res'.'''
errors = []
exit_code = 0
F, script_path = tempfile.mkstemp(dir=".")
script_handle = os.fdopen(F, "w")
try:
db_path = script_path + ".db3"
script_handle.write(script % db_path)
script_handle.close()
if rosetta_database_path:
command_line = '%s -database %s -constant_seed -in:file:s %s -parser:protocol %s -overwrite -out:nooutput %s' % (rosetta_scripts_path, rosetta_database_path, pdb_path, script_path, extra_flags)
else:
command_line = '%s -constant_seed -in:file:s %s -parser:protocol %s -overwrite -out:nooutput %s' % (rosetta_scripts_path, pdb_path, script_path, extra_flags)
exit_code, stdout = subprocess.getstatusoutput(command_line)
if exit_code != 0:
errors.append("An error occured during execution. The exit code was %d. The output was:\n\n%s" % (exit_code, stdout))
else:
try:
mapping = get_mapping_from_db3_file( db_path )
except Exception as e:
errors.append(str(e))
errors.append(traceback.format_exc())
errors.append("The features database does not seem to have been correctly created. Check to see if the command '%s' is correct." % command_line)
except Exception as e:
errors.append(str(e))
errors.append(traceback.format_exc())
exit_code = 1
if errors and ((extra_flags.find('-ignore_zero_occupancy false') == -1) or (extra_flags.find('-ignore_unrecognized_res') == -1)):
errors.append("Note: extra_flags should typically include both '-ignore_zero_occupancy false' and '-ignore_unrecognized_res'.")
if os.path.exists(script_path):
os.remove(script_path)
if os.path.exists(db_path):
os.remove(db_path)
if exit_code or errors:
return False, errors
return True, mapping
def get_mapping_from_db3_file( db_path ):
'''
Does the work of reading the Rosetta SQLite3 .db3 file to retrieve the mapping
'''
import sqlite3 # should be moved to the top but we do this here for CentOS 5 support
conn = sqlite3.connect(db_path)
results = conn.cursor().execute('''
SELECT chain_id, pdb_residue_number, insertion_code, residues.struct_id, residues.resNum, residues.name3, residues.res_type
FROM residue_pdb_identification
INNER JOIN residues ON residue_pdb_identification.struct_id=residues.struct_id AND residue_pdb_identification.residue_number=residues.resNum
''')
# Create the mapping from PDB residues to Rosetta residues
rosetta_residue_ids = []
mapping = {}
for r in results:
mapping["%s%s%s" % (r[0], str(r[1]).rjust(4), r[2])] = {'pose_residue_id' : r[4], 'name3' : r[5], 'res_type' : r[6]}
rosetta_residue_ids.append(r[4])
# Ensure that the the range of the map is exactly the set of Rosetta residues i.e. the map from (a subset of) the PDB residues to the Rosetta residues is surjective
raw_residue_list = [r for r in conn.cursor().execute('''SELECT resNum, name3 FROM residues ORDER BY resNum''')]
assert(sorted([r[0] for r in raw_residue_list]) == sorted(rosetta_residue_ids))
return mapping
def strip_pdb(pdb_path, chains = [], strip_hetatms = False):
'''Takes a PDB file and strips all lines except ATOM and HETATM records. If chains is specified, only those chains are kept. If strip_hetatms is True then HETATM lines are also stripped.
Returns (True, a path to the stripped PDB file) on success and (False, a list of errors) on failure.'''
chains = set(chains)
contents = open(pdb_path).read().split("\n") # file handle should get garbage collected
if strip_hetatms:
if chains:
atom_lines = [l for l in contents if l.startswith("ATOM ") and l[21] in chains]
else:
atom_lines = [l for l in contents if l.startswith("ATOM ")]
else:
if chains:
atom_lines = [l for l in contents if (l.startswith("ATOM ") or l.startswith("HETATM")) and l[21] in chains]
else:
atom_lines = [l for l in contents if (l.startswith("ATOM ") or l.startswith("HETATM"))]
existing_chains = set([l[21] for l in atom_lines])
if chains.difference(existing_chains):
return False, ["Error: The following chains do not exist in the PDB file - %s" % ", ".join(list(chains.difference(existing_chains)))]
F, temp_pdb_path = tempfile.mkstemp(dir=".")
temp_pdb_handle = os.fdopen(F, "w")
temp_pdb_handle.write("\n".join(atom_lines))
temp_pdb_handle.close()
return True, temp_pdb_path
def get_stripped_pdb_to_pose_residue_map(input_pdb_path, rosetta_scripts_path, rosetta_database_path, chains = [], strip_hetatms = False):
'''Takes a path to an input PDB file, the path to the RosettaScripts executable and Rosetta database, an optional list of chains to strip the PDB down to, and an optional flag specifying whether HETATM lines should be stripped from the PDB.
On success, a pair (True, mapping between PDB and pose residues) is returned. On failure, a pair (False, a list of errors) is returned.'''
success, result = strip_pdb(input_pdb_path, chains = chains, strip_hetatms = strip_hetatms)
if success:
assert(os.path.exists(result))
success, mapping = get_pdb_to_pose_residue_map(result, rosetta_scripts_path, rosetta_database_path)
os.remove(result)
if success:
return True, mapping
else:
return False, mapping
return False, result
if __name__ == '__main__':
chains = []
parser = OptionParser()
parser.add_option("-e", "--executable", dest="rosetta_scripts_path", help="The location of the RosettaScripts executable e.g. ~/bin/rosetta_scripts.linuxgccrelease", metavar="EXECUTABLE")
parser.add_option("-d", "--database", dest="rosetta_database_path", help="The location of the Rosetta database", metavar="DATABASE")
parser.add_option("-f", "--file", dest="filename", help="The input PDB", metavar="FILE")
parser.add_option("-c", "--chains", dest="chains", default=[], help="A comma-separated list of chains to keep (all other chains will be discarded). The default behavior is to keep all chains.")
parser.add_option("-s", "--strip_hetatms", dest="strip_hetatms", action="store_true", default=False, help="Use this option to strip HETATM lines from the input PDB file. The default behavior is to keep HETATM lines.")
(options, args) = parser.parse_args()
parser.set_usage(None)
filename = options.filename
rosetta_database_path = options.rosetta_database_path
rosetta_scripts_path = options.rosetta_scripts_path
chains = options.chains
strip_hetatms = options.strip_hetatms
if not filename:
print("\nError: A filename must be specified.\n")
parser.print_help()
sys.exit(1)
elif not(os.path.exists(filename)):
print(("\nError: File '%s' does not exist.\n" % filename))
sys.exit(1)
if not rosetta_database_path:
print("\nError: The path to the Rosetta database corresponding with the RosettaScripts executable must be specified.\n")
parser.print_help()
sys.exit(1)
elif not(os.path.exists(rosetta_database_path)):
print(("\nError: The path '%s' does not exist.\n" % rosetta_database_path))
sys.exit(1)
if not rosetta_scripts_path:
print("\nError: The path to the RosettaScripts executable must be specified.\n")
parser.print_help()
sys.exit(1)
elif not(os.path.exists(rosetta_scripts_path)):
if os.path.exists(os.path.join(os.getcwd(), rosetta_scripts_path)):
rosetta_scripts_path = "./%s" % os.path.join(os.getcwd(), rosetta_scripts_path)
if not os.path.exists(rosetta_scripts_path):
print(("\nError: The path '%s' does not exist.\n" % rosetta_scripts_path))
sys.exit(1)
rosetta_scripts_path = os.path.abspath(rosetta_scripts_path)
if chains:
chains = chains.split(",")
for c in chains:
if not len(c) == 1:
print(("\nError: Chain ID '%s' is invalid. PDB chain identifiers are one character in length.\n" % c))
sys.exit(1)
success, result = get_stripped_pdb_to_pose_residue_map(filename, rosetta_scripts_path, rosetta_database_path, chains = chains, strip_hetatms = strip_hetatms)
if success:
print("{")
for k, v in sorted(result.items()):
print(("'%s': %s," % (k, v)))
print("}")
else:
print(("\n".join(result)))
sys.exit(1)
| 49.285714
| 244
| 0.696296
|
330c5dc2edef6bdc065594ad2bf5f7a85be9fc8a
| 3,334
|
py
|
Python
|
tests/test_late_swap.py
|
coltonfischer/pydfs-lineup-optimizer
|
ea28946bb7d8c28394468b49d08ff0216148cefd
|
[
"MIT"
] | 326
|
2015-12-23T16:30:18.000Z
|
2022-03-19T17:48:07.000Z
|
tests/test_late_swap.py
|
coltonfischer/pydfs-lineup-optimizer
|
ea28946bb7d8c28394468b49d08ff0216148cefd
|
[
"MIT"
] | 335
|
2016-02-06T05:36:58.000Z
|
2022-03-26T02:19:02.000Z
|
tests/test_late_swap.py
|
coltonfischer/pydfs-lineup-optimizer
|
ea28946bb7d8c28394468b49d08ff0216148cefd
|
[
"MIT"
] | 178
|
2016-02-06T05:30:20.000Z
|
2022-03-27T19:05:42.000Z
|
from __future__ import absolute_import, division
import unittest
from datetime import datetime, timedelta
from pytz import timezone
from unittest.mock import patch, PropertyMock
from pydfs_lineup_optimizer import get_optimizer
from pydfs_lineup_optimizer.constants import Site, Sport
from pydfs_lineup_optimizer.lineup import Lineup
from pydfs_lineup_optimizer.player import LineupPlayer, GameInfo
from .utils import create_players
class LateSwapTestCase(unittest.TestCase):
def setUp(self):
self.future_game_info = GameInfo(home_team='H', away_team='A', game_started=False,
starts_at=datetime.now(timezone('EST')) + timedelta(days=1))
self.finished_game_info = GameInfo(home_team='H2', away_team='A2', game_started=False,
starts_at=datetime.now(timezone('EST')) - timedelta(days=1))
self.lineup_optimizer = get_optimizer(Site.DRAFTKINGS, Sport.BASKETBALL)
self.lineup_optimizer.settings.min_games = None
positions = ['PG', 'SG', 'SF', 'PF', 'C', 'PG/SG', 'SF/PF', 'C']
self.active_players = create_players(positions, game_info=self.future_game_info, salary=5000, fppg=20)
self.inactive_players = create_players(positions, game_info=self.finished_game_info, salary=4500, fppg=10)
self.lineup_optimizer.player_pool.load_players(self.active_players + self.inactive_players)
self.lineup = Lineup([
LineupPlayer(self.active_players[0], 'PG'),
LineupPlayer(self.inactive_players[1], 'SG'),
LineupPlayer(self.active_players[2], 'SF'),
LineupPlayer(self.inactive_players[3], 'PF'),
LineupPlayer(self.active_players[4], 'C'),
LineupPlayer(self.inactive_players[5], 'G'),
LineupPlayer(self.active_players[6], 'F'),
LineupPlayer(self.inactive_players[7], 'UTIL'),
])
def test_late_swap_optimize(self):
players_in_action = {player: player.lineup_position for player in self.lineup if player.is_game_started}
lineup = next(self.lineup_optimizer.optimize_lineups([self.lineup]))
for player in lineup:
if not player.is_game_started:
continue
self.assertIn(player, players_in_action)
position = players_in_action[player]
self.assertEqual(position, player.lineup_position)
def test_late_swap_optimize_with_all_inactive_players(self):
with patch('pydfs_lineup_optimizer.player.Player.is_game_started', new_callable=PropertyMock) as \
mock_is_game_started:
mock_is_game_started.return_value = False
lineup = next(self.lineup_optimizer.optimize_lineups([self.lineup]))
for player in lineup:
self.assertNotIn(player, self.inactive_players)
def test_late_swap_optimize_with_all_active_players(self):
with patch('pydfs_lineup_optimizer.player.Player.is_game_started', new_callable=PropertyMock) as \
mock_is_game_started:
mock_is_game_started.return_value = True
lineup = next(self.lineup_optimizer.optimize_lineups([self.lineup]))
for player, new_lineup_player in zip(self.lineup, lineup):
self.assertEqual(player, new_lineup_player)
| 54.655738
| 114
| 0.691962
|
eaafeb77145a372f0c99aa7701cba3ed1e466cdd
| 4,141
|
py
|
Python
|
pandora/cost_volume_confidence/std_intensity.py
|
njimenezd/Pandora
|
9e3c2054415301edac6da7510056af0136790277
|
[
"Apache-2.0"
] | 14
|
2020-09-18T14:11:59.000Z
|
2020-11-18T14:10:07.000Z
|
pandora/cost_volume_confidence/std_intensity.py
|
njimenezd/Pandora
|
9e3c2054415301edac6da7510056af0136790277
|
[
"Apache-2.0"
] | 1
|
2020-09-29T10:35:45.000Z
|
2020-09-29T10:35:45.000Z
|
pandora/cost_volume_confidence/std_intensity.py
|
njimenezd/Pandora
|
9e3c2054415301edac6da7510056af0136790277
|
[
"Apache-2.0"
] | 1
|
2020-09-29T09:29:41.000Z
|
2020-09-29T09:29:41.000Z
|
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA
#
# https://github.com/CNES/Pandora_pandora
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions for estimating confidence from image.
"""
from typing import Dict, Tuple
import numpy as np
from json_checker import Checker, And
import xarray as xr
from pandora.img_tools import compute_std_raster
from . import cost_volume_confidence
@cost_volume_confidence.AbstractCostVolumeConfidence.register_subclass("std_intensity")
class StdIntensity(cost_volume_confidence.AbstractCostVolumeConfidence):
"""
StdIntensity class allows to estimate a confidence measure from the left image by calculating the standard
deviation of the intensity
"""
# Method name
_method = "stereo_pandora_intensityStd"
# Indicator
_indicator = ""
def __init__(self, **cfg: str) -> None:
"""
:param cfg: optional configuration, {'confidence_method': 'std_intensity'}
:type cfg: dict
:return: None
"""
self.cfg = self.check_conf(**cfg)
# Indicator
self._indicator = self._method + self.cfg["indicator"]
def check_conf(self, **cfg: str) -> Dict[str, str]:
"""
Add default values to the dictionary if there are missing elements and check if the dictionary is correct
:param cfg: std_intensity configuration
:type cfg: dict
:return cfg: std_intensity configuration updated
:rtype: dict
"""
if "indicator" not in cfg:
cfg["indicator"] = self._indicator
schema = {"confidence_method": And(str, lambda input: "std_intensity"), "indicator": str}
checker = Checker(schema)
checker.validate(cfg)
return cfg
def desc(self) -> None:
"""
Describes the confidence method
:return: None
"""
print("Intensity confidence method")
def confidence_prediction(
self,
disp: xr.Dataset,
img_left: xr.Dataset = None,
img_right: xr.Dataset = None,
cv: xr.Dataset = None,
) -> Tuple[xr.Dataset, xr.Dataset]:
"""
Computes a confidence measure that evaluates the standard deviation of intensity of the left image
:param disp: the disparity map dataset
:type disp: xarray.Dataset
:param img_left: left Dataset image
:tye img_left: xarray.Dataset
:param img_right: right Dataset image
:type img_right: xarray.Dataset
:param cv: cost volume dataset
:type cv: xarray.Dataset
:return: the disparity map and the cost volume with a new indicator 'ambiguity_confidence' in the DataArray
confidence_measure
:rtype: Tuple(xarray.Dataset, xarray.Dataset) with the data variables:
- confidence_measure 3D xarray.DataArray (row, col, indicator)
"""
nb_row, nb_col = img_left["im"].shape
window_size = cv.attrs["window_size"]
confidence_measure = np.full((nb_row, nb_col), np.nan, dtype=np.float32)
offset_row_col = int((window_size - 1) / 2)
if offset_row_col != 0:
confidence_measure[offset_row_col:-offset_row_col, offset_row_col:-offset_row_col] = compute_std_raster(
img_left, window_size
)
else:
confidence_measure = compute_std_raster(img_left, window_size)
disp, cv = self.allocate_confidence_map(self._indicator, confidence_measure, disp, cv)
return disp, cv
| 33.942623
| 116
| 0.666989
|
6fc8a2012f78d840220fd298815645cda18ce37b
| 1,551
|
py
|
Python
|
tests/helpers/misc/test_get_repository_name.py
|
bdraco/integration
|
b30e799bb27fdd978bd68f21909c82005d0dd3ea
|
[
"MIT"
] | null | null | null |
tests/helpers/misc/test_get_repository_name.py
|
bdraco/integration
|
b30e799bb27fdd978bd68f21909c82005d0dd3ea
|
[
"MIT"
] | null | null | null |
tests/helpers/misc/test_get_repository_name.py
|
bdraco/integration
|
b30e799bb27fdd978bd68f21909c82005d0dd3ea
|
[
"MIT"
] | null | null | null |
"""Helpers: Misc: get_repository_name."""
from custom_components.hacs.const import ELEMENT_TYPES
# pylint: disable=missing-docstring
from custom_components.hacs.helpers.functions.misc import get_repository_name
from custom_components.hacs.helpers.classes.manifest import HacsManifest
from tests.dummy_repository import dummy_repository_base
ELEMENT_TYPES = ELEMENT_TYPES + ["appdaemon", "python_script", "theme"]
def test_everything():
repository = dummy_repository_base()
repository.data.full_name = "test/TEST-REPOSITORY-NAME"
repository.repository_manifest = HacsManifest.from_dict(
{"name": "TEST-HACS_MANIFEST"}
)
repository.integration_manifest = {"name": "TEST-MANIFEST"}
for category in ELEMENT_TYPES:
repository.data.category = category
name = get_repository_name(repository)
assert name == "TEST-HACS_MANIFEST"
def test_integration_manifest():
repository = dummy_repository_base()
repository.data.category = "integration"
repository.data.full_name = "test/TEST-REPOSITORY-NAME"
repository.repository_manifest = HacsManifest.from_dict({})
repository.integration_manifest = {"name": "TEST-MANIFEST"}
name = get_repository_name(repository)
assert name == "TEST-MANIFEST"
def test_repository_name():
repository = dummy_repository_base()
repository.data.full_name = "test/TEST-REPOSITORY-NAME"
repository.repository_manifest = HacsManifest.from_dict({})
name = get_repository_name(repository)
assert name == "Test Repository Name"
| 35.25
| 77
| 0.756286
|
9f4641a5ad058d1a16a7bd81054c1ed5ba8b3260
| 1,205
|
py
|
Python
|
emotion-classification/src/text/__init__.py
|
nur-ag/emotion-core
|
35df8e2d39b72425c88ff986e8a9be901e77688a
|
[
"MIT"
] | 4
|
2021-12-15T21:50:40.000Z
|
2022-03-22T10:20:24.000Z
|
emotion-classification/src/text/__init__.py
|
nur-ag/emotion-core
|
35df8e2d39b72425c88ff986e8a9be901e77688a
|
[
"MIT"
] | null | null | null |
emotion-classification/src/text/__init__.py
|
nur-ag/emotion-core
|
35df8e2d39b72425c88ff986e8a9be901e77688a
|
[
"MIT"
] | 1
|
2021-11-14T17:36:15.000Z
|
2021-11-14T17:36:15.000Z
|
from .bow import BOWExtractor
from .tfidf import TfidfExtractor
from .fasttext import FastTextTokenEmbeddingExtractor
from .bert import BertEmbeddingExtractor
from .xlmroberta import XLMRobertaEmbeddingExtractor
def extractor_factory(extractor_type, dataset=None, **kwargs):
if extractor_type == 'fasttext':
return FastTextTokenEmbeddingExtractor(kwargs['ft_model_path'],
max_length=kwargs['max_length'])
if extractor_type == 'bow':
return BOWExtractor(dataset, num_words=kwargs['num_words'])
if extractor_type == 'tfidf':
return TfidfExtractor(dataset, num_words=kwargs['num_words'])
if extractor_type == 'bert':
return BertEmbeddingExtractor(kwargs['bert_model'],
freeze_output=kwargs['freeze_output'],
max_length=kwargs['max_length'])
if extractor_type == 'xlmroberta':
return XLMRobertaEmbeddingExtractor(kwargs['xlm_roberta_model'],
freeze_output=kwargs['freeze_output'],
max_length=kwargs['max_length'])
return None
| 46.346154
| 83
| 0.628216
|
653e5553ca75d7146967374c7b35373902047f75
| 30,094
|
py
|
Python
|
salt/modules/win_useradd.py
|
springborland/salt
|
bee85e477d57e9a171884e54fefb9a59d0835ed0
|
[
"Apache-2.0"
] | 1
|
2020-04-09T03:25:10.000Z
|
2020-04-09T03:25:10.000Z
|
salt/modules/win_useradd.py
|
springborland/salt
|
bee85e477d57e9a171884e54fefb9a59d0835ed0
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/win_useradd.py
|
springborland/salt
|
bee85e477d57e9a171884e54fefb9a59d0835ed0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Module for managing Windows Users
.. important::
If you feel that Salt should be using this module to manage users on a
minion, and it is using a different module (or gives an error similar to
*'user.info' is not available*), see :ref:`here
<module-provider-override>`.
:depends:
- pywintypes
- win32api
- win32con
- win32net
- win32netcon
- win32profile
- win32security
- win32ts
- wmi
.. note::
This currently only works with local user accounts, not domain accounts
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
from datetime import datetime
# Import Salt libs
import salt.utils.args
import salt.utils.dateutils
import salt.utils.platform
import salt.utils.winapi
from salt.exceptions import CommandExecutionError
from salt.ext import six
from salt.ext.six import string_types
try:
from shlex import quote as _cmd_quote # pylint: disable=E0611
except Exception: # pylint: disable=broad-except
from pipes import quote as _cmd_quote
log = logging.getLogger(__name__)
try:
import pywintypes
import wmi
import win32api
import win32con
import win32net
import win32netcon
import win32profile
import win32security
import win32ts
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Define the module's virtual name
__virtualname__ = "user"
def __virtual__():
"""
Requires Windows and Windows Modules
"""
if not salt.utils.platform.is_windows():
return False, "Module win_useradd: Windows Only"
if not HAS_WIN32NET_MODS:
return False, "Module win_useradd: Missing Win32 Modules"
return __virtualname__
def _to_unicode(instr):
"""
Internal function for converting to Unicode Strings
The NetUser* series of API calls in this module requires input parameters to
be Unicode Strings. This function ensures the parameter is a Unicode String.
This only seems to be an issue in Python 2. All calls to this function
should be gated behind a ``if six.PY2`` check.
Args:
instr (str): String to convert
Returns:
str: Unicode type string
"""
if instr is None or isinstance(instr, six.text_type):
return instr
else:
return six.text_type(instr, "utf-8")
def add(
name,
password=None,
fullname=None,
description=None,
groups=None,
home=None,
homedrive=None,
profile=None,
logonscript=None,
):
"""
Add a user to the minion.
Args:
name (str): User name
password (str, optional): User's password in plain text.
fullname (str, optional): The user's full name.
description (str, optional): A brief description of the user account.
groups (str, optional): A list of groups to add the user to.
(see chgroups)
home (str, optional): The path to the user's home directory.
homedrive (str, optional): The drive letter to assign to the home
directory. Must be the Drive Letter followed by a colon. ie: U:
profile (str, optional): An explicit path to a profile. Can be a UNC or
a folder on the system. If left blank, windows uses it's default
profile directory.
logonscript (str, optional): Path to a login script to run when the user
logs on.
Returns:
bool: True if successful. False is unsuccessful.
CLI Example:
.. code-block:: bash
salt '*' user.add name password
"""
if six.PY2:
name = _to_unicode(name)
password = _to_unicode(password)
fullname = _to_unicode(fullname)
description = _to_unicode(description)
home = _to_unicode(home)
homedrive = _to_unicode(homedrive)
profile = _to_unicode(profile)
logonscript = _to_unicode(logonscript)
user_info = {}
if name:
user_info["name"] = name
else:
return False
user_info["password"] = password
user_info["priv"] = win32netcon.USER_PRIV_USER
user_info["home_dir"] = home
user_info["comment"] = description
user_info["flags"] = win32netcon.UF_SCRIPT
user_info["script_path"] = logonscript
try:
win32net.NetUserAdd(None, 1, user_info)
except win32net.error as exc:
log.error("Failed to create user %s", name)
log.error("nbr: %s", exc.winerror)
log.error("ctx: %s", exc.funcname)
log.error("msg: %s", exc.strerror)
return False
update(name=name, homedrive=homedrive, profile=profile, fullname=fullname)
ret = chgroups(name, groups) if groups else True
return ret
def update(
name,
password=None,
fullname=None,
description=None,
home=None,
homedrive=None,
logonscript=None,
profile=None,
expiration_date=None,
expired=None,
account_disabled=None,
unlock_account=None,
password_never_expires=None,
disallow_change_password=None,
):
# pylint: disable=anomalous-backslash-in-string
"""
Updates settings for the windows user. Name is the only required parameter.
Settings will only be changed if the parameter is passed a value.
.. versionadded:: 2015.8.0
Args:
name (str): The user name to update.
password (str, optional): New user password in plain text.
fullname (str, optional): The user's full name.
description (str, optional): A brief description of the user account.
home (str, optional): The path to the user's home directory.
homedrive (str, optional): The drive letter to assign to the home
directory. Must be the Drive Letter followed by a colon. ie: U:
logonscript (str, optional): The path to the logon script.
profile (str, optional): The path to the user's profile directory.
expiration_date (date, optional): The date and time when the account
expires. Can be a valid date/time string. To set to never expire
pass the string 'Never'.
expired (bool, optional): Pass `True` to expire the account. The user
will be prompted to change their password at the next logon. Pass
`False` to mark the account as 'not expired'. You can't use this to
negate the expiration if the expiration was caused by the account
expiring. You'll have to change the `expiration_date` as well.
account_disabled (bool, optional): True disables the account. False
enables the account.
unlock_account (bool, optional): True unlocks a locked user account.
False is ignored.
password_never_expires (bool, optional): True sets the password to never
expire. False allows the password to expire.
disallow_change_password (bool, optional): True blocks the user from
changing the password. False allows the user to change the password.
Returns:
bool: True if successful. False is unsuccessful.
CLI Example:
.. code-block:: bash
salt '*' user.update bob password=secret profile=C:\\Users\\Bob
home=\\server\homeshare\bob homedrive=U:
"""
# pylint: enable=anomalous-backslash-in-string
if six.PY2:
name = _to_unicode(name)
password = _to_unicode(password)
fullname = _to_unicode(fullname)
description = _to_unicode(description)
home = _to_unicode(home)
homedrive = _to_unicode(homedrive)
logonscript = _to_unicode(logonscript)
profile = _to_unicode(profile)
# Make sure the user exists
# Return an object containing current settings for the user
try:
user_info = win32net.NetUserGetInfo(None, name, 4)
except win32net.error as exc:
log.error("Failed to update user %s", name)
log.error("nbr: %s", exc.winerror)
log.error("ctx: %s", exc.funcname)
log.error("msg: %s", exc.strerror)
return False
# Check parameters to update
# Update the user object with new settings
if password:
user_info["password"] = password
if home:
user_info["home_dir"] = home
if homedrive:
user_info["home_dir_drive"] = homedrive
if description:
user_info["comment"] = description
if logonscript:
user_info["script_path"] = logonscript
if fullname:
user_info["full_name"] = fullname
if profile:
user_info["profile"] = profile
if expiration_date:
if expiration_date == "Never":
user_info["acct_expires"] = win32netcon.TIMEQ_FOREVER
else:
try:
dt_obj = salt.utils.dateutils.date_cast(expiration_date)
except (ValueError, RuntimeError):
return "Invalid Date/Time Format: {0}".format(expiration_date)
user_info["acct_expires"] = time.mktime(dt_obj.timetuple())
if expired is not None:
if expired:
user_info["password_expired"] = 1
else:
user_info["password_expired"] = 0
if account_disabled is not None:
if account_disabled:
user_info["flags"] |= win32netcon.UF_ACCOUNTDISABLE
else:
user_info["flags"] &= ~win32netcon.UF_ACCOUNTDISABLE
if unlock_account is not None:
if unlock_account:
user_info["flags"] &= ~win32netcon.UF_LOCKOUT
if password_never_expires is not None:
if password_never_expires:
user_info["flags"] |= win32netcon.UF_DONT_EXPIRE_PASSWD
else:
user_info["flags"] &= ~win32netcon.UF_DONT_EXPIRE_PASSWD
if disallow_change_password is not None:
if disallow_change_password:
user_info["flags"] |= win32netcon.UF_PASSWD_CANT_CHANGE
else:
user_info["flags"] &= ~win32netcon.UF_PASSWD_CANT_CHANGE
# Apply new settings
try:
win32net.NetUserSetInfo(None, name, 4, user_info)
except win32net.error as exc:
log.error("Failed to update user %s", name)
log.error("nbr: %s", exc.winerror)
log.error("ctx: %s", exc.funcname)
log.error("msg: %s", exc.strerror)
return False
return True
def delete(name, purge=False, force=False):
"""
Remove a user from the minion
Args:
name (str): The name of the user to delete
purge (bool, optional): Boolean value indicating that the user profile
should also be removed when the user account is deleted. If set to
True the profile will be removed. Default is False.
force (bool, optional): Boolean value indicating that the user account
should be deleted even if the user is logged in. True will log the
user out and delete user.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' user.delete name
"""
if six.PY2:
name = _to_unicode(name)
# Check if the user exists
try:
user_info = win32net.NetUserGetInfo(None, name, 4)
except win32net.error as exc:
log.error("User not found: %s", name)
log.error("nbr: %s", exc.winerror)
log.error("ctx: %s", exc.funcname)
log.error("msg: %s", exc.strerror)
return False
# Check if the user is logged in
# Return a list of logged in users
try:
sess_list = win32ts.WTSEnumerateSessions()
except win32ts.error as exc:
log.error("No logged in users found")
log.error("nbr: %s", exc.winerror)
log.error("ctx: %s", exc.funcname)
log.error("msg: %s", exc.strerror)
# Is the user one that is logged in
logged_in = False
session_id = None
for sess in sess_list:
if (
win32ts.WTSQuerySessionInformation(
None, sess["SessionId"], win32ts.WTSUserName
)
== name
):
session_id = sess["SessionId"]
logged_in = True
# If logged in and set to force, log the user out and continue
# If logged in and not set to force, return false
if logged_in:
if force:
try:
win32ts.WTSLogoffSession(
win32ts.WTS_CURRENT_SERVER_HANDLE, session_id, True
)
except win32ts.error as exc:
log.error("User not found: %s", name)
log.error("nbr: %s", exc.winerror)
log.error("ctx: %s", exc.funcname)
log.error("msg: %s", exc.strerror)
return False
else:
log.error("User %s is currently logged in.", name)
return False
# Remove the User Profile directory
if purge:
try:
sid = getUserSid(name)
win32profile.DeleteProfile(sid)
except pywintypes.error as exc:
(number, context, message) = exc.args
if number == 2: # Profile Folder Not Found
pass
else:
log.error("Failed to remove profile for %s", name)
log.error("nbr: %s", exc.winerror)
log.error("ctx: %s", exc.funcname)
log.error("msg: %s", exc.strerror)
return False
# And finally remove the user account
try:
win32net.NetUserDel(None, name)
except win32net.error as exc:
log.error("Failed to delete user %s", name)
log.error("nbr: %s", exc.winerror)
log.error("ctx: %s", exc.funcname)
log.error("msg: %s", exc.strerror)
return False
return True
def getUserSid(username):
"""
Get the Security ID for the user
Args:
username (str): The user name for which to look up the SID
Returns:
str: The user SID
CLI Example:
.. code-block:: bash
salt '*' user.getUserSid jsnuffy
"""
if six.PY2:
username = _to_unicode(username)
domain = win32api.GetComputerName()
if username.find("\\") != -1:
domain = username.split("\\")[0]
username = username.split("\\")[-1]
domain = domain.upper()
return win32security.ConvertSidToStringSid(
win32security.LookupAccountName(None, domain + "\\" + username)[0]
)
def setpassword(name, password):
"""
Set the user's password
Args:
name (str): The user name for which to set the password
password (str): The new password
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' user.setpassword jsnuffy sup3rs3cr3t
"""
return update(name=name, password=password)
def addgroup(name, group):
"""
Add user to a group
Args:
name (str): The user name to add to the group
group (str): The name of the group to which to add the user
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' user.addgroup jsnuffy 'Power Users'
"""
if six.PY2:
name = _to_unicode(name)
group = _to_unicode(group)
name = _cmd_quote(name)
group = _cmd_quote(group).lstrip("'").rstrip("'")
user = info(name)
if not user:
return False
if group in user["groups"]:
return True
cmd = 'net localgroup "{0}" {1} /add'.format(group, name)
ret = __salt__["cmd.run_all"](cmd, python_shell=True)
return ret["retcode"] == 0
def removegroup(name, group):
"""
Remove user from a group
Args:
name (str): The user name to remove from the group
group (str): The name of the group from which to remove the user
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' user.removegroup jsnuffy 'Power Users'
"""
if six.PY2:
name = _to_unicode(name)
group = _to_unicode(group)
name = _cmd_quote(name)
group = _cmd_quote(group).lstrip("'").rstrip("'")
user = info(name)
if not user:
return False
if group not in user["groups"]:
return True
cmd = 'net localgroup "{0}" {1} /delete'.format(group, name)
ret = __salt__["cmd.run_all"](cmd, python_shell=True)
return ret["retcode"] == 0
def chhome(name, home, **kwargs):
"""
Change the home directory of the user, pass True for persist to move files
to the new home directory if the old home directory exist.
Args:
name (str): The name of the user whose home directory you wish to change
home (str): The new location of the home directory
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' user.chhome foo \\\\fileserver\\home\\foo True
"""
if six.PY2:
name = _to_unicode(name)
home = _to_unicode(home)
kwargs = salt.utils.args.clean_kwargs(**kwargs)
persist = kwargs.pop("persist", False)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
if persist:
log.info("Ignoring unsupported 'persist' argument to user.chhome")
pre_info = info(name)
if not pre_info:
return False
if home == pre_info["home"]:
return True
if not update(name=name, home=home):
return False
post_info = info(name)
if post_info["home"] != pre_info["home"]:
return post_info["home"] == home
return False
def chprofile(name, profile):
"""
Change the profile directory of the user
Args:
name (str): The name of the user whose profile you wish to change
profile (str): The new location of the profile
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' user.chprofile foo \\\\fileserver\\profiles\\foo
"""
return update(name=name, profile=profile)
def chfullname(name, fullname):
"""
Change the full name of the user
Args:
name (str): The user name for which to change the full name
fullname (str): The new value for the full name
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' user.chfullname user 'First Last'
"""
return update(name=name, fullname=fullname)
def chgroups(name, groups, append=True):
"""
Change the groups this user belongs to, add append=False to make the user a
member of only the specified groups
Args:
name (str): The user name for which to change groups
groups (str, list): A single group or a list of groups to assign to the
user. For multiple groups this can be a comma delimited string or a
list.
append (bool, optional): True adds the passed groups to the user's
current groups. False sets the user's groups to the passed groups
only. Default is True.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' user.chgroups jsnuffy Administrators,Users True
"""
if six.PY2:
name = _to_unicode(name)
if isinstance(groups, string_types):
groups = groups.split(",")
groups = [x.strip(" *") for x in groups]
if six.PY2:
groups = [_to_unicode(x) for x in groups]
ugrps = set(list_groups(name))
if ugrps == set(groups):
return True
name = _cmd_quote(name)
if not append:
for group in ugrps:
group = _cmd_quote(group).lstrip("'").rstrip("'")
if group not in groups:
cmd = 'net localgroup "{0}" {1} /delete'.format(group, name)
__salt__["cmd.run_all"](cmd, python_shell=True)
for group in groups:
if group in ugrps:
continue
group = _cmd_quote(group).lstrip("'").rstrip("'")
cmd = 'net localgroup "{0}" {1} /add'.format(group, name)
out = __salt__["cmd.run_all"](cmd, python_shell=True)
if out["retcode"] != 0:
log.error(out["stdout"])
return False
agrps = set(list_groups(name))
return len(ugrps - agrps) == 0
def info(name):
"""
Return user information
Args:
name (str): Username for which to display information
Returns:
dict: A dictionary containing user information
- fullname
- username
- SID
- passwd (will always return None)
- comment (same as description, left here for backwards compatibility)
- description
- active
- logonscript
- profile
- home
- homedrive
- groups
- password_changed
- successful_logon_attempts
- failed_logon_attempts
- last_logon
- account_disabled
- account_locked
- password_never_expires
- disallow_change_password
- gid
CLI Example:
.. code-block:: bash
salt '*' user.info jsnuffy
"""
if six.PY2:
name = _to_unicode(name)
ret = {}
items = {}
try:
items = win32net.NetUserGetInfo(None, name, 4)
except win32net.error:
pass
if items:
groups = []
try:
groups = win32net.NetUserGetLocalGroups(None, name)
except win32net.error:
pass
ret["fullname"] = items["full_name"]
ret["name"] = items["name"]
ret["uid"] = win32security.ConvertSidToStringSid(items["user_sid"])
ret["passwd"] = items["password"]
ret["comment"] = items["comment"]
ret["description"] = items["comment"]
ret["active"] = not bool(items["flags"] & win32netcon.UF_ACCOUNTDISABLE)
ret["logonscript"] = items["script_path"]
ret["profile"] = items["profile"]
ret["failed_logon_attempts"] = items["bad_pw_count"]
ret["successful_logon_attempts"] = items["num_logons"]
secs = time.mktime(datetime.now().timetuple()) - items["password_age"]
ret["password_changed"] = datetime.fromtimestamp(secs).strftime(
"%Y-%m-%d %H:%M:%S"
)
if items["last_logon"] == 0:
ret["last_logon"] = "Never"
else:
ret["last_logon"] = datetime.fromtimestamp(items["last_logon"]).strftime(
"%Y-%m-%d %H:%M:%S"
)
ret["expiration_date"] = datetime.fromtimestamp(items["acct_expires"]).strftime(
"%Y-%m-%d %H:%M:%S"
)
ret["expired"] = items["password_expired"] == 1
if not ret["profile"]:
ret["profile"] = _get_userprofile_from_registry(name, ret["uid"])
ret["home"] = items["home_dir"]
ret["homedrive"] = items["home_dir_drive"]
if not ret["home"]:
ret["home"] = ret["profile"]
ret["groups"] = groups
if items["flags"] & win32netcon.UF_DONT_EXPIRE_PASSWD == 0:
ret["password_never_expires"] = False
else:
ret["password_never_expires"] = True
if items["flags"] & win32netcon.UF_ACCOUNTDISABLE == 0:
ret["account_disabled"] = False
else:
ret["account_disabled"] = True
if items["flags"] & win32netcon.UF_LOCKOUT == 0:
ret["account_locked"] = False
else:
ret["account_locked"] = True
if items["flags"] & win32netcon.UF_PASSWD_CANT_CHANGE == 0:
ret["disallow_change_password"] = False
else:
ret["disallow_change_password"] = True
ret["gid"] = ""
return ret
else:
return {}
def _get_userprofile_from_registry(user, sid):
"""
In case net user doesn't return the userprofile we can get it from the
registry
Args:
user (str): The user name, used in debug message
sid (str): The sid to lookup in the registry
Returns:
str: Profile directory
"""
profile_dir = __salt__["reg.read_value"](
"HKEY_LOCAL_MACHINE",
"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList\\{0}".format(sid),
"ProfileImagePath",
)["vdata"]
log.debug('user %s with sid=%s profile is located at "%s"', user, sid, profile_dir)
return profile_dir
def list_groups(name):
"""
Return a list of groups the named user belongs to
Args:
name (str): The user name for which to list groups
Returns:
list: A list of groups to which the user belongs
CLI Example:
.. code-block:: bash
salt '*' user.list_groups foo
"""
if six.PY2:
name = _to_unicode(name)
ugrp = set()
try:
user = info(name)["groups"]
except KeyError:
return False
for group in user:
ugrp.add(group.strip(" *"))
return sorted(list(ugrp))
def getent(refresh=False):
"""
Return the list of all info for all users
Args:
refresh (bool, optional): Refresh the cached user information. Useful
when used from within a state function. Default is False.
Returns:
dict: A dictionary containing information about all users on the system
CLI Example:
.. code-block:: bash
salt '*' user.getent
"""
if "user.getent" in __context__ and not refresh:
return __context__["user.getent"]
ret = []
for user in __salt__["user.list_users"]():
stuff = {}
user_info = __salt__["user.info"](user)
stuff["gid"] = ""
stuff["groups"] = user_info["groups"]
stuff["home"] = user_info["home"]
stuff["name"] = user_info["name"]
stuff["passwd"] = user_info["passwd"]
stuff["shell"] = ""
stuff["uid"] = user_info["uid"]
ret.append(stuff)
__context__["user.getent"] = ret
return ret
def list_users():
"""
Return a list of all users on Windows
Returns:
list: A list of all users on the system
CLI Example:
.. code-block:: bash
salt '*' user.list_users
"""
res = 0
user_list = []
dowhile = True
try:
while res or dowhile:
dowhile = False
(users, _, res) = win32net.NetUserEnum(
None,
0,
win32netcon.FILTER_NORMAL_ACCOUNT,
res,
win32netcon.MAX_PREFERRED_LENGTH,
)
for user in users:
user_list.append(user["name"])
return user_list
except win32net.error:
pass
def rename(name, new_name):
"""
Change the username for a named user
Args:
name (str): The user name to change
new_name (str): The new name for the current user
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' user.rename jsnuffy jshmoe
"""
if six.PY2:
name = _to_unicode(name)
new_name = _to_unicode(new_name)
# Load information for the current name
current_info = info(name)
if not current_info:
raise CommandExecutionError("User '{0}' does not exist".format(name))
# Look for an existing user with the new name
new_info = info(new_name)
if new_info:
raise CommandExecutionError("User '{0}' already exists".format(new_name))
# Rename the user account
# Connect to WMI
with salt.utils.winapi.Com():
c = wmi.WMI(find_classes=0)
# Get the user object
try:
user = c.Win32_UserAccount(Name=name)[0]
except IndexError:
raise CommandExecutionError("User '{0}' does not exist".format(name))
# Rename the user
result = user.Rename(new_name)[0]
# Check the result (0 means success)
if not result == 0:
# Define Error Dict
error_dict = {
0: "Success",
1: "Instance not found",
2: "Instance required",
3: "Invalid parameter",
4: "User not found",
5: "Domain not found",
6: "Operation is allowed only on the primary domain controller of the domain",
7: "Operation is not allowed on the last administrative account",
8: "Operation is not allowed on specified special groups: user, admin, local, or guest",
9: "Other API error",
10: "Internal error",
}
raise CommandExecutionError(
"There was an error renaming '{0}' to '{1}'. Error: {2}".format(
name, new_name, error_dict[result]
)
)
return info(new_name).get("name") == new_name
def current(sam=False):
"""
Get the username that salt-minion is running under. If salt-minion is
running as a service it should return the Local System account. If salt is
running from a command prompt it should return the username that started the
command prompt.
.. versionadded:: 2015.5.6
Args:
sam (bool, optional): False returns just the username without any domain
notation. True returns the domain with the username in the SAM
format. Ie: ``domain\\username``
Returns:
str: Returns username
CLI Example:
.. code-block:: bash
salt '*' user.current
"""
try:
if sam:
user_name = win32api.GetUserNameEx(win32con.NameSamCompatible)
else:
user_name = win32api.GetUserName()
except pywintypes.error as exc:
log.error("Failed to get current user")
log.error("nbr: %s", exc.winerror)
log.error("ctx: %s", exc.funcname)
log.error("msg: %s", exc.strerror)
raise CommandExecutionError("Failed to get current user", info=exc)
if not user_name:
raise CommandExecutionError("Failed to get current user")
return user_name
| 27.994419
| 100
| 0.604738
|
3cc43272e14ccddfed16efd5cd04dc81aa60ca0e
| 2,415
|
py
|
Python
|
ooobuild/lo/i18n/k_parse_type.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/i18n/k_parse_type.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/i18n/k_parse_type.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.i18n
class KParseType(object):
"""
Const Class
Constants to specify the type of a parsed token.
Set by XCharacterClassification.parseAnyToken() and XCharacterClassification.parsePredefinedToken() in ParseResult.TokenType.
See Also:
`API KParseType <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1i18n_1_1KParseType.html>`_
"""
__ooo_ns__: str = 'com.sun.star.i18n'
__ooo_full_ns__: str = 'com.sun.star.i18n.KParseType'
__ooo_type_name__: str = 'const'
ONE_SINGLE_CHAR = 1
"""
One single character like ! # ; : $ et al.
"""
BOOLEAN = 2
"""
A Boolean operator like <, >, <>, =, <=, >=.
"""
IDENTNAME = 4
"""
A name matching the conditions passed.
"""
SINGLE_QUOTE_NAME = 8
"""
\"A single-quoted name matching the conditions passed ( 'na\\'me' ).\" \"Dequoted name in ParseResult.DequotedNameOrString ( na'me ).\"
"""
DOUBLE_QUOTE_STRING = 16
"""
A double-quoted string ( \"str\\\"i\"\"ng\" ).
Dequoted string in ParseResult.DequotedNameOrString ( str\"i\"ng ).
"""
ASC_NUMBER = 32
"""
A number where all digits are ASCII characters.
Numerical value in ParseResult.Value.
"""
UNI_NUMBER = 64
"""
A number where at least some digits are Unicode (and maybe ASCII) characters.
Numerical value inKParseType ParseResult.Value.
"""
MISSING_QUOTE = 1073741824
"""
Set (ored) if SINGLE_QUOTE_NAME or DOUBLE_QUOTE_STRING has no closing quote.
"""
ANY_NUMBER = ASC_NUMBER | UNI_NUMBER
"""
Any ASCII or Unicode number.
"""
__all__ = ['KParseType']
| 29.45122
| 139
| 0.669151
|
7615141f390a8e1c2e6e23ae23ac114ab853ea33
| 2,920
|
py
|
Python
|
dispike/server.py
|
cgeopapa/dispike
|
5b098ff19f1a0ff966704d16a3587c9970023eb2
|
[
"MIT"
] | null | null | null |
dispike/server.py
|
cgeopapa/dispike
|
5b098ff19f1a0ff966704d16a3587c9970023eb2
|
[
"MIT"
] | null | null | null |
dispike/server.py
|
cgeopapa/dispike
|
5b098ff19f1a0ff966704d16a3587c9970023eb2
|
[
"MIT"
] | null | null | null |
from fastapi import APIRouter, Request, Response
from fastapi.responses import PlainTextResponse
from loguru import logger
from .middlewares.verification import DiscordVerificationMiddleware
from .models.incoming import (
IncomingDiscordInteraction,
IncomingDiscordOptionList,
SubcommandIncomingDiscordOptionList,
IncomingDiscordOption,
)
from .eventer import EventHandler
from .eventer_helpers.determine_event_information import determine_event_information
from .response import DiscordResponse
import json
import typing
router = APIRouter()
interaction = EventHandler()
_RAISE_FOR_TESTING = False
@router.get("/ping")
async def ping():
return PlainTextResponse(
"If you see this, Your instance is working and accepting requests."
)
@router.post("/interactions")
async def handle_interactions(request: Request) -> Response:
logger.info("interaction recieved.")
_get_request_body = json.loads(request.state._cached_body.decode())
logger.info(_get_request_body)
if _get_request_body["type"] == 1:
logger.info("handling ACK Ping.")
return {"type": 1}
_parse_to_object = IncomingDiscordInteraction(**_get_request_body)
_event_name, arguments = determine_event_information(_parse_to_object)
logger.info(f"event name: {_event_name}")
if interaction.check_event_exists(_event_name) == False:
logger.debug("discarding event not existing.")
return {"type": 5}
# _event_settings = interaction.return_event_settings(_event_name)
arguments[router._user_defined_setting_ctx_value] = _parse_to_object
# Check the type hint for the return type, fallback for checking the type if no hints are provided
try:
_type_hinted_request = interaction.view_event_function_return_type(_event_name)
_type_hinted_returned_value = _type_hinted_request["return"]
if _type_hinted_returned_value == DiscordResponse:
_get_res = await interaction.emit(_event_name, **arguments)
logger.debug(_get_res.response)
return _get_res.response
elif _type_hinted_returned_value == dict:
return await interaction.emit(_event_name, **arguments)
except KeyError:
logger.error(
"unable to find return value for type hint.. resorting to guessing.."
)
if _RAISE_FOR_TESTING == True:
raise AssertionError("No hinting!") # pragma: no cover
except Exception:
logger.exception("unhandled exception for returning hinted value")
raise
interaction_data = await interaction.emit(_event_name, **arguments)
if isinstance(interaction_data, DiscordResponse):
interaction_data: DiscordResponse
return interaction_data.response
if isinstance(interaction_data, dict):
return interaction_data
# Backup response, simply acknowledge. (Type 5)
return {"type": 5}
| 34.761905
| 102
| 0.734247
|
4c476ea0cfd1342f660647fe8806e315c3663896
| 63,925
|
bzl
|
Python
|
rust/private/rustc.bzl
|
cfredric/rules_rust
|
521e649ff44e9711fe3c45b0ec1e792f7e1d361e
|
[
"Apache-2.0"
] | null | null | null |
rust/private/rustc.bzl
|
cfredric/rules_rust
|
521e649ff44e9711fe3c45b0ec1e792f7e1d361e
|
[
"Apache-2.0"
] | null | null | null |
rust/private/rustc.bzl
|
cfredric/rules_rust
|
521e649ff44e9711fe3c45b0ec1e792f7e1d361e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionality for constructing actions that invoke the Rust compiler"""
load(
"@bazel_tools//tools/build_defs/cc:action_names.bzl",
"CPP_LINK_EXECUTABLE_ACTION_NAME",
)
load("//rust/private:common.bzl", "rust_common")
load("//rust/private:providers.bzl", _BuildInfo = "BuildInfo")
load("//rust/private:stamp.bzl", "is_stamping_enabled")
load(
"//rust/private:utils.bzl",
"abs",
"expand_dict_value_locations",
"expand_list_element_locations",
"find_cc_toolchain",
"get_lib_name",
"get_preferred_artifact",
"is_exec_configuration",
"make_static_lib_symlink",
"relativize",
)
BuildInfo = _BuildInfo
AliasableDepInfo = provider(
doc = "A provider mapping an alias name to a Crate's information.",
fields = {
"dep": "CrateInfo",
"name": "str",
},
)
_error_format_values = ["human", "json", "short"]
ErrorFormatInfo = provider(
doc = "Set the --error-format flag for all rustc invocations",
fields = {"error_format": "(string) [" + ", ".join(_error_format_values) + "]"},
)
ExtraRustcFlagsInfo = provider(
doc = "Pass each value as an additional flag to non-exec rustc invocations",
fields = {"extra_rustc_flags": "List[string] Extra flags to pass to rustc in non-exec configuration"},
)
ExtraExecRustcFlagsInfo = provider(
doc = "Pass each value as an additional flag to exec rustc invocations",
fields = {"extra_exec_rustc_flags": "List[string] Extra flags to pass to rustc in exec configuration"},
)
def _get_rustc_env(attr, toolchain, crate_name):
"""Gathers rustc environment variables
Args:
attr (struct): The current target's attributes
toolchain (rust_toolchain): The current target's rust toolchain context
crate_name (str): The name of the crate to be compiled
Returns:
dict: Rustc environment variables
"""
version = attr.version if hasattr(attr, "version") else "0.0.0"
major, minor, patch = version.split(".", 2)
if "-" in patch:
patch, pre = patch.split("-", 1)
else:
pre = ""
return {
"CARGO_CFG_TARGET_ARCH": toolchain.target_arch,
"CARGO_CFG_TARGET_OS": toolchain.os,
"CARGO_CRATE_NAME": crate_name,
"CARGO_PKG_AUTHORS": "",
"CARGO_PKG_DESCRIPTION": "",
"CARGO_PKG_HOMEPAGE": "",
"CARGO_PKG_NAME": attr.name,
"CARGO_PKG_VERSION": version,
"CARGO_PKG_VERSION_MAJOR": major,
"CARGO_PKG_VERSION_MINOR": minor,
"CARGO_PKG_VERSION_PATCH": patch,
"CARGO_PKG_VERSION_PRE": pre,
}
def get_compilation_mode_opts(ctx, toolchain):
"""Gathers rustc flags for the current compilation mode (opt/debug)
Args:
ctx (ctx): The current rule's context object
toolchain (rust_toolchain): The current rule's `rust_toolchain`
Returns:
struct: See `_rust_toolchain_impl` for more details
"""
comp_mode = ctx.var["COMPILATION_MODE"]
if not comp_mode in toolchain.compilation_mode_opts:
fail("Unrecognized compilation mode {} for toolchain.".format(comp_mode))
return toolchain.compilation_mode_opts[comp_mode]
def _are_linkstamps_supported(feature_configuration, has_grep_includes):
# Are linkstamps supported by the C++ toolchain?
return (cc_common.is_enabled(feature_configuration = feature_configuration, feature_name = "linkstamps") and
# Is Bazel recent enough to support Starlark linkstamps?
hasattr(cc_common, "register_linkstamp_compile_action") and
# The current rule doesn't define _grep_includes attribute; this
# attribute is required for compiling linkstamps.
has_grep_includes)
def _should_use_pic(cc_toolchain, feature_configuration, crate_type):
if crate_type in ("cdylib", "dylib"):
return cc_toolchain.needs_pic_for_dynamic_libraries(feature_configuration = feature_configuration)
return False
def collect_deps(
deps,
proc_macro_deps,
aliases,
are_linkstamps_supported = False):
"""Walks through dependencies and collects the transitive dependencies.
Args:
deps (list): The deps from ctx.attr.deps.
proc_macro_deps (list): The proc_macro deps from ctx.attr.proc_macro_deps.
aliases (dict): A dict mapping aliased targets to their actual Crate information.
are_linkstamps_supported (bool): Whether the current rule and the toolchain support building linkstamps..
Returns:
tuple: Returns a tuple of:
DepInfo,
BuildInfo,
linkstamps (depset[CcLinkstamp]): A depset of CcLinkstamps that need to be compiled and linked into all linked binaries.
"""
direct_crates = []
transitive_crates = []
transitive_noncrates = []
transitive_build_infos = []
transitive_link_search_paths = []
build_info = None
linkstamps = []
transitive_crate_outputs = []
aliases = {k.label: v for k, v in aliases.items()}
for dep in depset(transitive = [deps, proc_macro_deps]).to_list():
(crate_info, dep_info) = _get_crate_and_dep_info(dep)
cc_info = _get_cc_info(dep)
dep_build_info = _get_build_info(dep)
if cc_info and are_linkstamps_supported:
linkstamps.append(cc_info.linking_context.linkstamps())
if crate_info:
# This dependency is a rust_library
# When crate_info.owner is set, we use it. When the dep type is Target we get the
# label from dep.label
owner = getattr(crate_info, "owner", dep.label if type(dep) == "Target" else None)
direct_crates.append(AliasableDepInfo(
name = aliases.get(owner, crate_info.name),
dep = crate_info,
))
transitive_crates.append(
depset(
[crate_info],
transitive = [] if "proc-macro" in [
crate_info.type,
crate_info.wrapped_crate_type,
] else [dep_info.transitive_crates],
),
)
transitive_crate_outputs.append(
depset(
[crate_info.output],
transitive = [] if "proc-macro" in [
crate_info.type,
crate_info.wrapped_crate_type,
] else [dep_info.transitive_crate_outputs],
),
)
if "proc-macro" not in [crate_info.type, crate_info.wrapped_crate_type]:
transitive_noncrates.append(dep_info.transitive_noncrates)
transitive_build_infos.append(dep_info.transitive_build_infos)
transitive_link_search_paths.append(dep_info.link_search_path_files)
elif cc_info:
# This dependency is a cc_library
transitive_noncrates.append(cc_info.linking_context.linker_inputs)
elif dep_build_info:
if build_info:
fail("Several deps are providing build information, " +
"only one is allowed in the dependencies")
build_info = dep_build_info
transitive_build_infos.append(depset([build_info]))
transitive_link_search_paths.append(depset([build_info.link_search_paths]))
else:
fail("rust targets can only depend on rust_library, rust_*_library or cc_library " +
"targets.")
transitive_crates_depset = depset(transitive = transitive_crates)
return (
rust_common.dep_info(
direct_crates = depset(direct_crates),
transitive_crates = transitive_crates_depset,
transitive_noncrates = depset(
transitive = transitive_noncrates,
order = "topological", # dylib link flag ordering matters.
),
transitive_crate_outputs = depset(transitive = transitive_crate_outputs),
transitive_build_infos = depset(transitive = transitive_build_infos),
link_search_path_files = depset(transitive = transitive_link_search_paths),
dep_env = build_info.dep_env if build_info else None,
),
build_info,
depset(transitive = linkstamps),
)
def _collect_libs_from_linker_inputs(linker_inputs, use_pic):
# TODO: We could let the user choose how to link, instead of always preferring to link static libraries.
return [
get_preferred_artifact(lib, use_pic)
for li in linker_inputs
for lib in li.libraries
]
def _get_crate_and_dep_info(dep):
if type(dep) == "Target" and rust_common.crate_info in dep:
return (dep[rust_common.crate_info], dep[rust_common.dep_info])
elif type(dep) == "struct" and hasattr(dep, "crate_info"):
return (dep.crate_info, dep.dep_info)
return (None, None)
def _get_cc_info(dep):
if type(dep) == "Target" and CcInfo in dep:
return dep[CcInfo]
elif type(dep) == "struct" and hasattr(dep, "cc_info"):
return dep.cc_info
return None
def _get_build_info(dep):
if type(dep) == "Target" and BuildInfo in dep:
return dep[BuildInfo]
elif type(dep) == "struct" and hasattr(dep, "build_info"):
return dep.build_info
return None
def get_cc_user_link_flags(ctx):
"""Get the current target's linkopt flags
Args:
ctx (ctx): The current rule's context object
Returns:
depset: The flags passed to Bazel by --linkopt option.
"""
return ctx.fragments.cpp.linkopts
def get_linker_and_args(ctx, attr, cc_toolchain, feature_configuration, rpaths):
"""Gathers cc_common linker information
Args:
ctx (ctx): The current target's context object
attr (struct): Attributes to use in gathering linker args
cc_toolchain (CcToolchain): cc_toolchain for which we are creating build variables.
feature_configuration (FeatureConfiguration): Feature configuration to be queried.
rpaths (depset): Depset of directories where loader will look for libraries at runtime.
Returns:
tuple: A tuple of the following items:
- (str): The tool path for given action.
- (sequence): A flattened command line flags for given action.
- (dict): Environment variables to be set for given action.
"""
user_link_flags = get_cc_user_link_flags(ctx)
# Add linkopt's from dependencies. This includes linkopts from transitive
# dependencies since they get merged up.
for dep in getattr(attr, "deps", []):
if CcInfo in dep and dep[CcInfo].linking_context:
for linker_input in dep[CcInfo].linking_context.linker_inputs.to_list():
for flag in linker_input.user_link_flags:
user_link_flags.append(flag)
link_variables = cc_common.create_link_variables(
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
is_linking_dynamic_library = False,
runtime_library_search_directories = rpaths,
user_link_flags = user_link_flags,
)
link_args = cc_common.get_memory_inefficient_command_line(
feature_configuration = feature_configuration,
action_name = CPP_LINK_EXECUTABLE_ACTION_NAME,
variables = link_variables,
)
link_env = cc_common.get_environment_variables(
feature_configuration = feature_configuration,
action_name = CPP_LINK_EXECUTABLE_ACTION_NAME,
variables = link_variables,
)
ld = cc_common.get_tool_for_action(
feature_configuration = feature_configuration,
action_name = CPP_LINK_EXECUTABLE_ACTION_NAME,
)
return ld, link_args, link_env
def _process_build_scripts(
build_info,
dep_info,
compile_inputs):
"""Gathers the outputs from a target's `cargo_build_script` action.
Args:
build_info (BuildInfo): The target Build's dependency info.
dep_info (DepInfo): The Depinfo provider form the target Crate's set of inputs.
compile_inputs (depset): A set of all files that will participate in the build.
Returns:
tuple: A tuple: A tuple of the following items:
- (depset[File]): A list of all build info `OUT_DIR` File objects
- (str): The `OUT_DIR` of the current build info
- (File): An optional path to a generated environment file from a `cargo_build_script` target
- (depset[File]): All direct and transitive build flags from the current build info.
"""
extra_inputs, out_dir, build_env_file, build_flags_files = _create_extra_input_args(build_info, dep_info)
compile_inputs = depset(transitive = [extra_inputs, compile_inputs])
return compile_inputs, out_dir, build_env_file, build_flags_files
def _symlink_for_ambiguous_lib(actions, toolchain, crate_info, lib):
"""Constructs a disambiguating symlink for a library dependency.
Args:
actions (Actions): The rule's context actions object.
toolchain: The Rust toolchain object.
crate_info (CrateInfo): The target crate's info.
lib (File): The library to symlink to.
Returns:
(File): The disambiguating symlink for the library.
"""
# FIXME: Once the relative order part of the native-link-modifiers rustc
# feature is stable, we should be able to eliminate the need to construct
# symlinks by passing the full paths to the libraries.
# https://github.com/rust-lang/rust/issues/81490.
# Take the absolute value of hash() since it could be negative.
path_hash = abs(hash(lib.path))
lib_name = get_lib_name(lib)
prefix = "lib"
extension = ".a"
if toolchain.os.startswith("windows"):
prefix = ""
extension = ".lib"
# Ensure the symlink follows the lib<name>.a pattern on Unix-like platforms
# or <name>.lib on Windows.
# Add a hash of the original library path to disambiguate libraries with the same basename.
symlink_name = "{}{}-{}{}".format(prefix, lib_name, path_hash, extension)
# Add the symlink to a target crate-specific _ambiguous_libs/ subfolder,
# to avoid possible collisions with sibling crates that may depend on the
# same ambiguous libraries.
symlink = actions.declare_file("_ambiguous_libs/" + crate_info.output.basename + "/" + symlink_name)
actions.symlink(
output = symlink,
target_file = lib,
progress_message = "Creating symlink to ambiguous lib: {}".format(lib.path),
)
return symlink
def _disambiguate_libs(actions, toolchain, crate_info, dep_info, use_pic):
"""Constructs disambiguating symlinks for ambiguous library dependencies.
The symlinks are all created in a _ambiguous_libs/ subfolder specific to
the target crate to avoid possible collisions with sibling crates that may
depend on the same ambiguous libraries.
Args:
actions (Actions): The rule's context actions object.
toolchain: The Rust toolchain object.
crate_info (CrateInfo): The target crate's info.
dep_info: (DepInfo): The target crate's dependency info.
use_pic: (boolean): Whether the build should use PIC.
Returns:
dict[String, File]: A mapping from ambiguous library paths to their
disambiguating symlink.
"""
# FIXME: Once the relative order part of the native-link-modifiers rustc
# feature is stable, we should be able to eliminate the need to construct
# symlinks by passing the full paths to the libraries.
# https://github.com/rust-lang/rust/issues/81490.
# A dictionary from file paths of ambiguous libraries to the corresponding
# symlink.
ambiguous_libs = {}
# A dictionary maintaining a mapping from preferred library name to the
# last visited artifact with that name.
visited_libs = {}
for link_input in dep_info.transitive_noncrates.to_list():
for lib in link_input.libraries:
# FIXME: Dynamic libs are not disambiguated right now, there are
# cases where those have a non-standard name with version (e.g.,
# //test/unit/versioned_libs). We hope that the link modifiers
# stabilization will come before we need to make this work.
if _is_dylib(lib):
continue
artifact = get_preferred_artifact(lib, use_pic)
name = get_lib_name(artifact)
# On Linux-like platforms, normally library base names start with
# `lib`, following the pattern `lib[name].(a|lo)` and we pass
# -lstatic=name.
# On Windows, the base name looks like `name.lib` and we pass
# -lstatic=name.
# FIXME: Under the native-link-modifiers unstable rustc feature,
# we could use -lstatic:+verbatim instead.
needs_symlink_to_standardize_name = (
(toolchain.os.startswith("linux") or toolchain.os.startswith("mac") or toolchain.os.startswith("darwin")) and
artifact.basename.endswith(".a") and not artifact.basename.startswith("lib")
) or (
toolchain.os.startswith("windows") and not artifact.basename.endswith(".lib")
)
# Detect cases where we need to disambiguate library dependencies
# by constructing symlinks.
if (
needs_symlink_to_standardize_name or
# We have multiple libraries with the same name.
(name in visited_libs and visited_libs[name].path != artifact.path)
):
# Disambiguate the previously visited library (if we just detected
# that it is ambiguous) and the current library.
if name in visited_libs:
old_path = visited_libs[name].path
if old_path not in ambiguous_libs:
ambiguous_libs[old_path] = _symlink_for_ambiguous_lib(actions, toolchain, crate_info, visited_libs[name])
ambiguous_libs[artifact.path] = _symlink_for_ambiguous_lib(actions, toolchain, crate_info, artifact)
visited_libs[name] = artifact
return ambiguous_libs
def collect_inputs(
ctx,
file,
files,
linkstamps,
toolchain,
cc_toolchain,
feature_configuration,
crate_info,
dep_info,
build_info,
stamp = False):
"""Gather's the inputs and required input information for a rustc action
Args:
ctx (ctx): The rule's context object.
file (struct): A struct containing files defined in label type attributes marked as `allow_single_file`.
files (list): A list of all inputs (`ctx.files`).
linkstamps (depset): A depset of CcLinkstamps that need to be compiled and linked into all linked binaries.
toolchain (rust_toolchain): The current `rust_toolchain`.
cc_toolchain (CcToolchainInfo): The current `cc_toolchain`.
feature_configuration (FeatureConfiguration): Feature configuration to be queried.
crate_info (CrateInfo): The Crate information of the crate to process build scripts for.
dep_info (DepInfo): The target Crate's dependency information.
build_info (BuildInfo): The target Crate's build settings.
stamp (bool, optional): Whether or not workspace status stamping is enabled. For more details see
https://docs.bazel.build/versions/main/user-manual.html#flag--stamp
Returns:
tuple: A tuple: A tuple of the following items:
- (list): A list of all build info `OUT_DIR` File objects
- (str): The `OUT_DIR` of the current build info
- (File): An optional path to a generated environment file from a `cargo_build_script` target
- (depset[File]): All direct and transitive build flag files from the current build info
- (list[File]): Linkstamp outputs
- (dict[String, File]): Ambiguous libs, see `_disambiguate_libs`.
"""
linker_script = getattr(file, "linker_script") if hasattr(file, "linker_script") else None
linker_depset = cc_toolchain.all_files
use_pic = _should_use_pic(cc_toolchain, feature_configuration, crate_info.type)
# Pass linker inputs only for linking-like actions, not for example where
# the output is rlib. This avoids quadratic behavior where transitive noncrates are
# flattened on each transitive rust_library dependency.
additional_transitive_inputs = []
ambiguous_libs = {}
if crate_info.type in ("staticlib", "proc-macro"):
additional_transitive_inputs = _collect_libs_from_linker_inputs(
dep_info.transitive_noncrates.to_list(),
use_pic,
)
elif crate_info.type in ("bin", "dylib", "cdylib"):
linker_inputs = dep_info.transitive_noncrates.to_list()
ambiguous_libs = _disambiguate_libs(ctx.actions, toolchain, crate_info, dep_info, use_pic)
additional_transitive_inputs = _collect_libs_from_linker_inputs(linker_inputs, use_pic) + [
additional_input
for linker_input in linker_inputs
for additional_input in linker_input.additional_inputs
] + ambiguous_libs.values()
# Compute linkstamps. Use the inputs of the binary as inputs to the
# linkstamp action to ensure linkstamps are rebuilt whenever binary inputs
# change.
linkstamp_outs = []
nolinkstamp_compile_inputs = depset(
getattr(files, "data", []) +
([build_info.rustc_env, build_info.flags] if build_info else []) +
([toolchain.target_json] if toolchain.target_json else []) +
([] if linker_script == None else [linker_script]),
transitive = [
linker_depset,
crate_info.srcs,
dep_info.transitive_crate_outputs,
depset(additional_transitive_inputs),
crate_info.compile_data,
toolchain.all_files,
],
)
if crate_info.type in ("bin", "cdylib"):
# There is no other way to register an action for each member of a depset than
# flattening the depset as of 2021-10-12. Luckily, usually there is only one linkstamp
# in a build, and we only flatten the list on binary targets that perform transitive linking,
# so it's extremely unlikely that this call to `to_list()` will ever be a performance
# problem.
for linkstamp in linkstamps.to_list():
# The linkstamp output path is based on the binary crate
# name and the input linkstamp path. This is to disambiguate
# the linkstamp outputs produced by multiple binary crates
# that depend on the same linkstamp. We use the same pattern
# for the output name as the one used by native cc rules.
out_name = "_objs/" + crate_info.output.basename + "/" + linkstamp.file().path[:-len(linkstamp.file().extension)] + "o"
linkstamp_out = ctx.actions.declare_file(out_name)
linkstamp_outs.append(linkstamp_out)
cc_common.register_linkstamp_compile_action(
actions = ctx.actions,
cc_toolchain = cc_toolchain,
feature_configuration = feature_configuration,
grep_includes = ctx.file._grep_includes,
source_file = linkstamp.file(),
output_file = linkstamp_out,
compilation_inputs = linkstamp.hdrs(),
inputs_for_validation = nolinkstamp_compile_inputs,
label_replacement = str(ctx.label),
output_replacement = crate_info.output.path,
)
# If stamping is enabled include the volatile status info file
stamp_info = [ctx.version_file] if stamp else []
compile_inputs = depset(
linkstamp_outs + stamp_info,
transitive = [
nolinkstamp_compile_inputs,
],
)
build_env_files = getattr(files, "rustc_env_files", [])
compile_inputs, out_dir, build_env_file, build_flags_files = _process_build_scripts(build_info, dep_info, compile_inputs)
if build_env_file:
build_env_files = [f for f in build_env_files] + [build_env_file]
compile_inputs = depset(build_env_files, transitive = [compile_inputs])
return compile_inputs, out_dir, build_env_files, build_flags_files, linkstamp_outs, ambiguous_libs
def construct_arguments(
ctx,
attr,
file,
toolchain,
tool_path,
cc_toolchain,
feature_configuration,
crate_info,
dep_info,
linkstamp_outs,
ambiguous_libs,
output_hash,
rust_flags,
out_dir,
build_env_files,
build_flags_files,
emit = ["dep-info", "link"],
force_all_deps_direct = False,
force_link = False,
stamp = False,
remap_path_prefix = "."):
"""Builds an Args object containing common rustc flags
Args:
ctx (ctx): The rule's context object
attr (struct): The attributes for the target. These may be different from ctx.attr in an aspect context.
file (struct): A struct containing files defined in label type attributes marked as `allow_single_file`.
toolchain (rust_toolchain): The current target's `rust_toolchain`
tool_path (str): Path to rustc
cc_toolchain (CcToolchain): The CcToolchain for the current target.
feature_configuration (FeatureConfiguration): Class used to construct command lines from CROSSTOOL features.
crate_info (CrateInfo): The CrateInfo provider of the target crate
dep_info (DepInfo): The DepInfo provider of the target crate
linkstamp_outs (list): Linkstamp outputs of native dependencies
ambiguous_libs (dict): Ambiguous libs, see `_disambiguate_libs`
output_hash (str): The hashed path of the crate root
rust_flags (list): Additional flags to pass to rustc
out_dir (str): The path to the output directory for the target Crate.
build_env_files (list): Files containing rustc environment variables, for instance from `cargo_build_script` actions.
build_flags_files (depset): The output files of a `cargo_build_script` actions containing rustc build flags
emit (list): Values for the --emit flag to rustc.
force_all_deps_direct (bool, optional): Whether to pass the transitive rlibs with --extern
to the commandline as opposed to -L.
force_link (bool, optional): Whether to add link flags to the command regardless of `emit`.
stamp (bool, optional): Whether or not workspace status stamping is enabled. For more details see
https://docs.bazel.build/versions/main/user-manual.html#flag--stamp
remap_path_prefix (str, optional): A value used to remap `${pwd}` to. If set to a falsey value, no prefix will be set.
Returns:
tuple: A tuple of the following items
- (struct): A struct of arguments used to run the `Rustc` action
- process_wrapper_flags (Args): Arguments for the process wrapper
- rustc_path (Args): Arguments for invoking rustc via the process wrapper
- rustc_flags (Args): Rust flags for the Rust compiler
- all (list): A list of all `Args` objects in the order listed above.
This is to be passed to the `arguments` parameter of actions
- (dict): Common rustc environment variables
"""
output_dir = getattr(crate_info.output, "dirname", None)
linker_script = getattr(file, "linker_script", None)
env = _get_rustc_env(attr, toolchain, crate_info.name)
# Wrapper args first
process_wrapper_flags = ctx.actions.args()
for build_env_file in build_env_files:
process_wrapper_flags.add("--env-file", build_env_file)
process_wrapper_flags.add_all(build_flags_files, before_each = "--arg-file")
# Certain rust build processes expect to find files from the environment
# variable `$CARGO_MANIFEST_DIR`. Examples of this include pest, tera,
# asakuma.
#
# The compiler and by extension proc-macros see the current working
# directory as the Bazel exec root. This is what `$CARGO_MANIFEST_DIR`
# would default to but is often the wrong value (e.g. if the source is in a
# sub-package or if we are building something in an external repository).
# Hence, we need to set `CARGO_MANIFEST_DIR` explicitly.
#
# Since we cannot get the `exec_root` from starlark, we cheat a little and
# use `${pwd}` which resolves the `exec_root` at action execution time.
process_wrapper_flags.add("--subst", "pwd=${pwd}")
# If stamping is enabled, enable the functionality in the process wrapper
if stamp:
process_wrapper_flags.add("--volatile-status-file", ctx.version_file)
# Both ctx.label.workspace_root and ctx.label.package are relative paths
# and either can be empty strings. Avoid trailing/double slashes in the path.
components = "${{pwd}}/{}/{}".format(ctx.label.workspace_root, ctx.label.package).split("/")
env["CARGO_MANIFEST_DIR"] = "/".join([c for c in components if c])
if out_dir != None:
env["OUT_DIR"] = "${pwd}/" + out_dir
# Handle that the binary name and crate name may be different.
#
# If a target name contains a - then cargo (and rules_rust) will generate a
# crate name with _ instead. Accordingly, rustc will generate a output
# file (executable, or rlib, or whatever) with _ not -. But when cargo
# puts a binary in the target/${config} directory, and sets environment
# variables like `CARGO_BIN_EXE_${binary_name}` it will use the - version
# not the _ version. So we rename the rustc-generated file (with _s) to
# have -s if needed.
emit_with_paths = emit
if crate_info.type == "bin" and crate_info.output != None:
generated_file = crate_info.name + toolchain.binary_ext
src = "/".join([crate_info.output.dirname, generated_file])
dst = crate_info.output.path
if src != dst:
emit_with_paths = [("link=" + dst if val == "link" else val) for val in emit]
# Arguments for launching rustc from the process wrapper
rustc_path = ctx.actions.args()
rustc_path.add("--")
rustc_path.add(tool_path)
# Rustc arguments
rustc_flags = ctx.actions.args()
rustc_flags.set_param_file_format("multiline")
rustc_flags.use_param_file("@%s", use_always = False)
rustc_flags.add(crate_info.root)
rustc_flags.add("--crate-name=" + crate_info.name)
rustc_flags.add("--crate-type=" + crate_info.type)
if hasattr(attr, "_error_format"):
rustc_flags.add("--error-format=" + attr._error_format[ErrorFormatInfo].error_format)
# Mangle symbols to disambiguate crates with the same name. This could
# happen only for non-final artifacts where we compute an output_hash,
# e.g., rust_library.
#
# For "final" artifacts and ones intended for distribution outside of
# Bazel, such as rust_binary, rust_static_library and rust_shared_library,
# where output_hash is None we don't need to add these flags.
if output_hash:
extra_filename = "-" + output_hash
rustc_flags.add("--codegen=metadata=" + extra_filename)
rustc_flags.add("--codegen=extra-filename=" + extra_filename)
if output_dir:
rustc_flags.add("--out-dir=" + output_dir)
compilation_mode = get_compilation_mode_opts(ctx, toolchain)
rustc_flags.add("--codegen=opt-level=" + compilation_mode.opt_level)
rustc_flags.add("--codegen=debuginfo=" + compilation_mode.debug_info)
# For determinism to help with build distribution and such
if remap_path_prefix:
rustc_flags.add("--remap-path-prefix=${{pwd}}={}".format(remap_path_prefix))
if emit:
rustc_flags.add("--emit=" + ",".join(emit_with_paths))
rustc_flags.add("--color=always")
rustc_flags.add("--target=" + toolchain.target_flag_value)
if hasattr(attr, "crate_features"):
rustc_flags.add_all(getattr(attr, "crate_features"), before_each = "--cfg", format_each = 'feature="%s"')
if linker_script:
rustc_flags.add(linker_script.path, format = "--codegen=link-arg=-T%s")
# Gets the paths to the folders containing the standard library (or libcore)
rust_std_paths = toolchain.rust_std_paths.to_list()
# Tell Rustc where to find the standard library
rustc_flags.add_all(rust_std_paths, before_each = "-L", format_each = "%s")
rustc_flags.add_all(rust_flags)
# Deduplicate data paths due to https://github.com/bazelbuild/bazel/issues/14681
data_paths = depset(direct = getattr(attr, "data", []) + getattr(attr, "compile_data", [])).to_list()
rustc_flags.add_all(
expand_list_element_locations(
ctx,
getattr(attr, "rustc_flags", []),
data_paths,
),
)
add_edition_flags(rustc_flags, crate_info)
# Link!
if ("link" in emit and crate_info.type not in ["rlib", "lib"]) or force_link:
# Rust's built-in linker can handle linking wasm files. We don't want to attempt to use the cc
# linker since it won't understand.
if toolchain.target_arch != "wasm32":
if output_dir:
use_pic = _should_use_pic(cc_toolchain, feature_configuration, crate_info.type)
rpaths = _compute_rpaths(toolchain, output_dir, dep_info, use_pic)
else:
rpaths = depset([])
ld, link_args, link_env = get_linker_and_args(ctx, attr, cc_toolchain, feature_configuration, rpaths)
env.update(link_env)
rustc_flags.add("--codegen=linker=" + ld)
rustc_flags.add_joined("--codegen", link_args, join_with = " ", format_joined = "link-args=%s")
_add_native_link_flags(rustc_flags, dep_info, linkstamp_outs, ambiguous_libs, crate_info.type, toolchain, cc_toolchain, feature_configuration)
# These always need to be added, even if not linking this crate.
add_crate_link_flags(rustc_flags, dep_info, force_all_deps_direct)
needs_extern_proc_macro_flag = "proc-macro" in [crate_info.type, crate_info.wrapped_crate_type] and \
crate_info.edition != "2015"
if needs_extern_proc_macro_flag:
rustc_flags.add("--extern")
rustc_flags.add("proc_macro")
# Make bin crate data deps available to tests.
for data in getattr(attr, "data", []):
if rust_common.crate_info in data:
dep_crate_info = data[rust_common.crate_info]
if dep_crate_info.type == "bin":
# Trying to make CARGO_BIN_EXE_{} canonical across platform by strip out extension if exists
env_basename = dep_crate_info.output.basename[:-(1 + len(dep_crate_info.output.extension))] if len(dep_crate_info.output.extension) > 0 else dep_crate_info.output.basename
env["CARGO_BIN_EXE_" + env_basename] = dep_crate_info.output.short_path
# Add environment variables from the Rust toolchain.
env.update(toolchain.env)
# Update environment with user provided variables.
env.update(expand_dict_value_locations(
ctx,
crate_info.rustc_env,
data_paths,
))
# Ensure the sysroot is set for the target platform
env["SYSROOT"] = toolchain.sysroot
if toolchain._rename_first_party_crates:
env["RULES_RUST_THIRD_PARTY_DIR"] = toolchain._third_party_dir
# extra_rustc_flags apply to the target configuration, not the exec configuration.
if hasattr(ctx.attr, "_extra_rustc_flags") and not is_exec_configuration(ctx):
rustc_flags.add_all(ctx.attr._extra_rustc_flags[ExtraRustcFlagsInfo].extra_rustc_flags)
if hasattr(ctx.attr, "_extra_exec_rustc_flags") and is_exec_configuration(ctx):
rustc_flags.add_all(ctx.attr._extra_exec_rustc_flags[ExtraExecRustcFlagsInfo].extra_exec_rustc_flags)
# Create a struct which keeps the arguments separate so each may be tuned or
# replaced where necessary
args = struct(
process_wrapper_flags = process_wrapper_flags,
rustc_path = rustc_path,
rustc_flags = rustc_flags,
all = [process_wrapper_flags, rustc_path, rustc_flags],
)
return args, env
def rustc_compile_action(
ctx,
attr,
toolchain,
crate_info,
output_hash = None,
rust_flags = [],
force_all_deps_direct = False):
"""Create and run a rustc compile action based on the current rule's attributes
Args:
ctx (ctx): The rule's context object
attr (struct): Attributes to use for the rust compile action
toolchain (rust_toolchain): The current `rust_toolchain`
crate_info (CrateInfo): The CrateInfo provider for the current target.
output_hash (str, optional): The hashed path of the crate root. Defaults to None.
rust_flags (list, optional): Additional flags to pass to rustc. Defaults to [].
force_all_deps_direct (bool, optional): Whether to pass the transitive rlibs with --extern
to the commandline as opposed to -L.
Returns:
list: A list of the following providers:
- (CrateInfo): info for the crate we just built; same as `crate_info` parameter.
- (DepInfo): The transitive dependencies of this crate.
- (DefaultInfo): The output file for this crate, and its runfiles.
"""
cc_toolchain, feature_configuration = find_cc_toolchain(ctx)
dep_info, build_info, linkstamps = collect_deps(
deps = crate_info.deps,
proc_macro_deps = crate_info.proc_macro_deps,
aliases = crate_info.aliases,
are_linkstamps_supported = _are_linkstamps_supported(
feature_configuration = feature_configuration,
has_grep_includes = hasattr(ctx.attr, "_grep_includes"),
),
)
# Determine if the build is currently running with --stamp
stamp = is_stamping_enabled(attr)
compile_inputs, out_dir, build_env_files, build_flags_files, linkstamp_outs, ambiguous_libs = collect_inputs(
ctx = ctx,
file = ctx.file,
files = ctx.files,
linkstamps = linkstamps,
toolchain = toolchain,
cc_toolchain = cc_toolchain,
feature_configuration = feature_configuration,
crate_info = crate_info,
dep_info = dep_info,
build_info = build_info,
stamp = stamp,
)
args, env_from_args = construct_arguments(
ctx = ctx,
attr = attr,
file = ctx.file,
toolchain = toolchain,
tool_path = toolchain.rustc.path,
cc_toolchain = cc_toolchain,
feature_configuration = feature_configuration,
crate_info = crate_info,
dep_info = dep_info,
linkstamp_outs = linkstamp_outs,
ambiguous_libs = ambiguous_libs,
output_hash = output_hash,
rust_flags = rust_flags,
out_dir = out_dir,
build_env_files = build_env_files,
build_flags_files = build_flags_files,
force_all_deps_direct = force_all_deps_direct,
stamp = stamp,
)
env = dict(ctx.configuration.default_shell_env)
env.update(env_from_args)
if hasattr(attr, "version") and attr.version != "0.0.0":
formatted_version = " v{}".format(attr.version)
else:
formatted_version = ""
outputs = [crate_info.output]
# For a cdylib that might be added as a dependency to a cc_* target on Windows, it is important to include the
# interface library that rustc generates in the output files.
interface_library = None
if toolchain.os == "windows" and crate_info.type == "cdylib":
# Rustc generates the import library with a `.dll.lib` extension rather than the usual `.lib` one that msvc
# expects (see https://github.com/rust-lang/rust/pull/29520 for more context).
interface_library = ctx.actions.declare_file(crate_info.output.basename + ".lib", sibling = crate_info.output)
outputs.append(interface_library)
# The action might generate extra output that we don't want to include in the `DefaultInfo` files.
action_outputs = list(outputs)
# Rustc generates a pdb file (on Windows) or a dsym folder (on macos) so provide it in an output group for crate
# types that benefit from having debug information in a separate file.
pdb_file = None
dsym_folder = None
if crate_info.type in ("cdylib", "bin") and not crate_info.is_test:
if toolchain.os == "windows":
pdb_file = ctx.actions.declare_file(crate_info.output.basename[:-len(crate_info.output.extension)] + "pdb", sibling = crate_info.output)
action_outputs.append(pdb_file)
elif toolchain.os == "darwin":
dsym_folder = ctx.actions.declare_directory(crate_info.output.basename + ".dSYM", sibling = crate_info.output)
action_outputs.append(dsym_folder)
if ctx.executable._process_wrapper:
# Run as normal
ctx.actions.run(
executable = ctx.executable._process_wrapper,
inputs = compile_inputs,
outputs = action_outputs,
env = env,
arguments = args.all,
mnemonic = "Rustc",
progress_message = "Compiling Rust {} {}{} ({} files)".format(
crate_info.type,
ctx.label.name,
formatted_version,
len(crate_info.srcs.to_list()),
),
)
else:
# Run without process_wrapper
if build_env_files or build_flags_files or stamp:
fail("build_env_files, build_flags_files, stamp are not supported when building without process_wrapper")
ctx.actions.run(
executable = toolchain.rustc,
inputs = compile_inputs,
outputs = action_outputs,
env = env,
arguments = [args.rustc_flags],
mnemonic = "Rustc",
progress_message = "Compiling Rust (without process_wrapper) {} {}{} ({} files)".format(
crate_info.type,
ctx.label.name,
formatted_version,
len(crate_info.srcs.to_list()),
),
)
runfiles = ctx.runfiles(
files = getattr(ctx.files, "data", []),
collect_data = True,
)
# TODO: Remove after some resolution to
# https://github.com/bazelbuild/rules_rust/issues/771
out_binary = getattr(attr, "out_binary", False)
providers = [
DefaultInfo(
# nb. This field is required for cc_library to depend on our output.
files = depset(outputs),
runfiles = runfiles,
executable = crate_info.output if crate_info.type == "bin" or crate_info.is_test or out_binary else None,
),
]
if crate_info.type in ["staticlib", "cdylib"]:
# These rules are not supposed to be depended on by other rust targets, and
# as such they shouldn't provide a CrateInfo. However, one may still want to
# write a rust_test for them, so we provide the CrateInfo wrapped in a provider
# that rust_test understands.
providers.extend([rust_common.test_crate_info(crate = crate_info), dep_info])
else:
providers.extend([crate_info, dep_info])
if toolchain.target_arch != "wasm32":
providers += establish_cc_info(ctx, attr, crate_info, toolchain, cc_toolchain, feature_configuration, interface_library)
if pdb_file:
providers.append(OutputGroupInfo(pdb_file = depset([pdb_file])))
if dsym_folder:
providers.append(OutputGroupInfo(dsym_folder = depset([dsym_folder])))
return providers
def _is_dylib(dep):
return not bool(dep.static_library or dep.pic_static_library)
def _collect_nonstatic_linker_inputs(cc_info):
shared_linker_inputs = []
for linker_input in cc_info.linking_context.linker_inputs.to_list():
dylibs = [
lib
for lib in linker_input.libraries
if _is_dylib(lib)
]
if dylibs:
shared_linker_inputs.append(cc_common.create_linker_input(
owner = linker_input.owner,
libraries = depset(dylibs),
))
return shared_linker_inputs
def establish_cc_info(ctx, attr, crate_info, toolchain, cc_toolchain, feature_configuration, interface_library):
"""If the produced crate is suitable yield a CcInfo to allow for interop with cc rules
Args:
ctx (ctx): The rule's context object
attr (struct): Attributes to use in gathering CcInfo
crate_info (CrateInfo): The CrateInfo provider of the target crate
toolchain (rust_toolchain): The current `rust_toolchain`
cc_toolchain (CcToolchainInfo): The current `CcToolchainInfo`
feature_configuration (FeatureConfiguration): Feature configuration to be queried.
interface_library (File): Optional interface library for cdylib crates on Windows.
Returns:
list: A list containing the CcInfo provider
"""
# A test will not need to produce CcInfo as nothing can depend on test targets
if crate_info.is_test:
return []
# Only generate CcInfo for particular crate types
if crate_info.type not in ("staticlib", "cdylib", "rlib", "lib"):
return []
# TODO: Remove after some resolution to
# https://github.com/bazelbuild/rules_rust/issues/771
if getattr(attr, "out_binary", False):
return []
if crate_info.type == "staticlib":
library_to_link = cc_common.create_library_to_link(
actions = ctx.actions,
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
static_library = crate_info.output,
# TODO(hlopko): handle PIC/NOPIC correctly
pic_static_library = crate_info.output,
)
elif crate_info.type in ("rlib", "lib"):
# bazel hard-codes a check for endswith((".a", ".pic.a",
# ".lib")) in create_library_to_link, so we work around that
# by creating a symlink to the .rlib with a .a extension.
dot_a = make_static_lib_symlink(ctx.actions, crate_info.output)
# TODO(hlopko): handle PIC/NOPIC correctly
library_to_link = cc_common.create_library_to_link(
actions = ctx.actions,
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
static_library = dot_a,
# TODO(hlopko): handle PIC/NOPIC correctly
pic_static_library = dot_a,
)
elif crate_info.type == "cdylib":
library_to_link = cc_common.create_library_to_link(
actions = ctx.actions,
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
dynamic_library = crate_info.output,
interface_library = interface_library,
)
else:
fail("Unexpected case")
link_input = cc_common.create_linker_input(
owner = ctx.label,
libraries = depset([library_to_link]),
)
linking_context = cc_common.create_linking_context(
# TODO - What to do for no_std?
linker_inputs = depset([link_input]),
)
cc_infos = [
CcInfo(linking_context = linking_context),
toolchain.stdlib_linkflags,
]
for dep in getattr(attr, "deps", []):
if CcInfo in dep:
# A Rust staticlib or shared library doesn't need to propagate linker inputs
# of its dependencies, except for shared libraries.
if crate_info.type in ["cdylib", "staticlib"]:
shared_linker_inputs = _collect_nonstatic_linker_inputs(dep[CcInfo])
if shared_linker_inputs:
linking_context = cc_common.create_linking_context(
linker_inputs = depset(shared_linker_inputs),
)
cc_infos.append(CcInfo(linking_context = linking_context))
else:
cc_infos.append(dep[CcInfo])
if crate_info.type in ("rlib", "lib") and toolchain.libstd_and_allocator_ccinfo:
# TODO: if we already have an rlib in our deps, we could skip this
cc_infos.append(toolchain.libstd_and_allocator_ccinfo)
return [cc_common.merge_cc_infos(cc_infos = cc_infos)]
def add_edition_flags(args, crate):
"""Adds the Rust edition flag to an arguments object reference
Args:
args (Args): A reference to an Args object
crate (CrateInfo): A CrateInfo provider
"""
if crate.edition != "2015":
args.add("--edition={}".format(crate.edition))
def _create_extra_input_args(build_info, dep_info):
"""Gather additional input arguments from transitive dependencies
Args:
build_info (BuildInfo): The BuildInfo provider from the target Crate's set of inputs.
dep_info (DepInfo): The Depinfo provider form the target Crate's set of inputs.
Returns:
tuple: A tuple of the following items:
- (depset[File]): A list of all build info `OUT_DIR` File objects
- (str): The `OUT_DIR` of the current build info
- (File): An optional generated environment file from a `cargo_build_script` target
- (depset[File]): All direct and transitive build flag files from the current build info.
"""
input_files = []
# Arguments to the commandline line wrapper that are going to be used
# to create the final command line
out_dir = None
build_env_file = None
build_flags_files = []
if build_info:
out_dir = build_info.out_dir.path
build_env_file = build_info.rustc_env
build_flags_files.append(build_info.flags)
build_flags_files.append(build_info.link_flags)
input_files.append(build_info.out_dir)
input_files.append(build_info.link_flags)
return (
depset(input_files, transitive = [dep_info.link_search_path_files]),
out_dir,
build_env_file,
depset(build_flags_files, transitive = [dep_info.link_search_path_files]),
)
def _compute_rpaths(toolchain, output_dir, dep_info, use_pic):
"""Determine the artifact's rpaths relative to the bazel root for runtime linking of shared libraries.
Args:
toolchain (rust_toolchain): The current `rust_toolchain`
output_dir (str): The output directory of the current target
dep_info (DepInfo): The current target's dependency info
use_pic: If set, prefers pic_static_library over static_library.
Returns:
depset: A set of relative paths from the output directory to each dependency
"""
# Windows has no rpath equivalent, so always return an empty depset.
if toolchain.os == "windows":
return depset([])
dylibs = [
get_preferred_artifact(lib, use_pic)
for linker_input in dep_info.transitive_noncrates.to_list()
for lib in linker_input.libraries
if _is_dylib(lib)
]
if not dylibs:
return depset([])
# For darwin, dylibs compiled by Bazel will fail to be resolved at runtime
# without a version of Bazel that includes
# https://github.com/bazelbuild/bazel/pull/13427. This is known to not be
# included in Bazel 4.1 and below.
if toolchain.os != "linux" and toolchain.os != "darwin":
fail("Runtime linking is not supported on {}, but found {}".format(
toolchain.os,
dep_info.transitive_noncrates,
))
# Multiple dylibs can be present in the same directory, so deduplicate them.
return depset([
relativize(lib_dir, output_dir)
for lib_dir in _get_dir_names(dylibs)
])
def _get_dir_names(files):
"""Returns a list of directory names from the given list of File objects
Args:
files (list): A list of File objects
Returns:
list: A list of directory names for all files
"""
dirs = {}
for f in files:
dirs[f.dirname] = None
return dirs.keys()
def add_crate_link_flags(args, dep_info, force_all_deps_direct = False):
"""Adds link flags to an Args object reference
Args:
args (Args): An arguments object reference
dep_info (DepInfo): The current target's dependency info
force_all_deps_direct (bool, optional): Whether to pass the transitive rlibs with --extern
to the commandline as opposed to -L.
"""
if force_all_deps_direct:
args.add_all(
depset(
transitive = [
dep_info.direct_crates,
dep_info.transitive_crates,
],
),
uniquify = True,
map_each = _crate_to_link_flag,
)
else:
# nb. Direct crates are linked via --extern regardless of their crate_type
args.add_all(dep_info.direct_crates, map_each = _crate_to_link_flag)
args.add_all(
dep_info.transitive_crates,
map_each = _get_crate_dirname,
uniquify = True,
format_each = "-Ldependency=%s",
)
def _crate_to_link_flag(crate):
"""A helper macro used by `add_crate_link_flags` for adding crate link flags to a Arg object
Args:
crate (CrateInfo|AliasableDepInfo): A CrateInfo or an AliasableDepInfo provider
Returns:
list: Link flags for the given provider
"""
# This is AliasableDepInfo, we should use the alias as a crate name
if hasattr(crate, "dep"):
name = crate.name
crate_info = crate.dep
else:
name = crate.name
crate_info = crate
return ["--extern={}={}".format(name, crate_info.output.path)]
def _get_crate_dirname(crate):
"""A helper macro used by `add_crate_link_flags` for getting the directory name of the current crate's output path
Args:
crate (CrateInfo): A CrateInfo provider from the current rule
Returns:
str: The directory name of the the output File that will be produced.
"""
return crate.output.dirname
def _portable_link_flags(lib, use_pic, ambiguous_libs):
artifact = get_preferred_artifact(lib, use_pic)
if ambiguous_libs and artifact.path in ambiguous_libs:
artifact = ambiguous_libs[artifact.path]
if lib.static_library or lib.pic_static_library:
# To ensure appropriate linker library argument order, in the presence
# of both native libraries that depend on rlibs and rlibs that depend
# on native libraries, we use an approach where we "sandwich" the
# rust libraries between two similar sections of all of native
# libraries:
# n1 n2 ... r1 r2 ... n1 n2 ...
# A B C
# This way any dependency from a native library to a rust library
# is resolved from A to B, and any dependency from a rust library to
# a native one is resolved from B to C.
# The question of resolving dependencies from a native library from A
# to any rust library is addressed in a different place, where we
# create symlinks to the rlibs, pretending they are native libraries,
# and adding references to these symlinks in the native section A.
# We rely in the behavior of -Clink-arg to put the linker args
# at the end of the linker invocation constructed by rustc.
return [
"-lstatic=%s" % get_lib_name(artifact),
"-Clink-arg=-l%s" % get_lib_name(artifact),
]
elif _is_dylib(lib):
return [
"-ldylib=%s" % get_lib_name(artifact),
]
return []
def _make_link_flags_windows(linker_input_and_use_pic_and_ambiguous_libs):
linker_input, use_pic, ambiguous_libs = linker_input_and_use_pic_and_ambiguous_libs
ret = []
for lib in linker_input.libraries:
if lib.alwayslink:
ret.extend(["-C", "link-arg=/WHOLEARCHIVE:%s" % get_preferred_artifact(lib, use_pic).path])
else:
ret.extend(_portable_link_flags(lib, use_pic, ambiguous_libs))
return ret
def _make_link_flags_darwin(linker_input_and_use_pic_and_ambiguous_libs):
linker_input, use_pic, ambiguous_libs = linker_input_and_use_pic_and_ambiguous_libs
ret = []
for lib in linker_input.libraries:
if lib.alwayslink:
ret.extend([
"-C",
("link-arg=-Wl,-force_load,%s" % get_preferred_artifact(lib, use_pic).path),
])
else:
ret.extend(_portable_link_flags(lib, use_pic, ambiguous_libs))
return ret
def _make_link_flags_default(linker_input_and_use_pic_and_ambiguous_libs):
linker_input, use_pic, ambiguous_libs = linker_input_and_use_pic_and_ambiguous_libs
ret = []
for lib in linker_input.libraries:
if lib.alwayslink:
ret.extend([
"-C",
"link-arg=-Wl,--whole-archive",
"-C",
("link-arg=%s" % get_preferred_artifact(lib, use_pic).path),
"-C",
"link-arg=-Wl,--no-whole-archive",
])
else:
ret.extend(_portable_link_flags(lib, use_pic, ambiguous_libs))
return ret
def _libraries_dirnames(linker_input_and_use_pic_and_ambiguous_libs):
link_input, use_pic, _ = linker_input_and_use_pic_and_ambiguous_libs
# De-duplicate names.
return depset([get_preferred_artifact(lib, use_pic).dirname for lib in link_input.libraries]).to_list()
def _add_native_link_flags(args, dep_info, linkstamp_outs, ambiguous_libs, crate_type, toolchain, cc_toolchain, feature_configuration):
"""Adds linker flags for all dependencies of the current target.
Args:
args (Args): The Args struct for a ctx.action
dep_info (DepInfo): Dependency Info provider
linkstamp_outs (list): Linkstamp outputs of native dependencies
ambiguous_libs (dict): Ambiguous libs, see `_disambiguate_libs`
crate_type: Crate type of the current target
toolchain (rust_toolchain): The current `rust_toolchain`
cc_toolchain (CcToolchainInfo): The current `cc_toolchain`
feature_configuration (FeatureConfiguration): feature configuration to use with cc_toolchain
"""
if crate_type in ["lib", "rlib"]:
return
use_pic = _should_use_pic(cc_toolchain, feature_configuration, crate_type)
if toolchain.os == "windows":
make_link_flags = _make_link_flags_windows
elif toolchain.os.startswith("mac") or toolchain.os.startswith("darwin"):
make_link_flags = _make_link_flags_darwin
else:
make_link_flags = _make_link_flags_default
# TODO(hlopko): Remove depset flattening by using lambdas once we are on >=Bazel 5.0
args_and_pic_and_ambiguous_libs = [(arg, use_pic, ambiguous_libs) for arg in dep_info.transitive_noncrates.to_list()]
args.add_all(args_and_pic_and_ambiguous_libs, map_each = _libraries_dirnames, uniquify = True, format_each = "-Lnative=%s")
if ambiguous_libs:
# If there are ambiguous libs, the disambiguation symlinks to them are
# all created in the same directory. Add it to the library search path.
ambiguous_libs_dirname = ambiguous_libs.values()[0].dirname
args.add("-Lnative={}".format(ambiguous_libs_dirname))
args.add_all(args_and_pic_and_ambiguous_libs, map_each = make_link_flags)
for linkstamp_out in linkstamp_outs:
args.add_all(["-C", "link-arg=%s" % linkstamp_out.path])
if crate_type in ["dylib", "cdylib"]:
# For shared libraries we want to link C++ runtime library dynamically
# (for example libstdc++.so or libc++.so).
args.add_all(
cc_toolchain.dynamic_runtime_lib(feature_configuration = feature_configuration),
map_each = _get_dirname,
format_each = "-Lnative=%s",
)
args.add_all(
cc_toolchain.dynamic_runtime_lib(feature_configuration = feature_configuration),
map_each = get_lib_name,
format_each = "-ldylib=%s",
)
else:
# For all other crate types we want to link C++ runtime library statically
# (for example libstdc++.a or libc++.a).
args.add_all(
cc_toolchain.static_runtime_lib(feature_configuration = feature_configuration),
map_each = _get_dirname,
format_each = "-Lnative=%s",
)
args.add_all(
cc_toolchain.static_runtime_lib(feature_configuration = feature_configuration),
map_each = get_lib_name,
format_each = "-lstatic=%s",
)
def _get_dirname(file):
"""A helper function for `_add_native_link_flags`.
Args:
file (File): The target file
Returns:
str: Directory name of `file`
"""
return file.dirname
def _error_format_impl(ctx):
"""Implementation of the `error_format` rule
Args:
ctx (ctx): The rule's context object
Returns:
list: A list containing the ErrorFormatInfo provider
"""
raw = ctx.build_setting_value
if raw not in _error_format_values:
fail("{} expected a value in `{}` but got `{}`".format(
ctx.label,
_error_format_values,
raw,
))
return [ErrorFormatInfo(error_format = raw)]
error_format = rule(
doc = (
"Change the [--error-format](https://doc.rust-lang.org/rustc/command-line-arguments.html#option-error-format) " +
"flag from the command line with `--@rules_rust//:error_format`. See rustc documentation for valid values."
),
implementation = _error_format_impl,
build_setting = config.string(flag = True),
)
def _extra_rustc_flags_impl(ctx):
return ExtraRustcFlagsInfo(extra_rustc_flags = ctx.build_setting_value)
extra_rustc_flags = rule(
doc = (
"Add additional rustc_flags from the command line with `--@rules_rust//:extra_rustc_flags`. " +
"This flag should only be used for flags that need to be applied across the entire build. For options that " +
"apply to individual crates, use the rustc_flags attribute on the individual crate's rule instead. NOTE: " +
"These flags not applied to the exec configuration (proc-macros, cargo_build_script, etc); " +
"use `--@rules_rust//:extra_exec_rustc_flags` to apply flags to the exec configuration."
),
implementation = _extra_rustc_flags_impl,
build_setting = config.string_list(flag = True),
)
def _extra_exec_rustc_flags_impl(ctx):
return ExtraExecRustcFlagsInfo(extra_exec_rustc_flags = ctx.build_setting_value)
extra_exec_rustc_flags = rule(
doc = (
"Add additional rustc_flags in the exec configuration from the command line with `--@rules_rust//:extra_exec_rustc_flags`. " +
"This flag should only be used for flags that need to be applied across the entire build. " +
"These flags only apply to the exec configuration (proc-macros, cargo_build_script, etc)."
),
implementation = _extra_exec_rustc_flags_impl,
build_setting = config.string_list(flag = True),
)
| 42.334437
| 187
| 0.666734
|
834ecd6b40ddf2678c02d72588e7faf0160bf72e
| 132
|
py
|
Python
|
dashboard/urls.py
|
makaimc/pfisdi
|
45e897b374d50e2f5385f15cbf318da0e17900f7
|
[
"MIT"
] | 2
|
2015-01-05T21:09:24.000Z
|
2015-07-31T16:52:38.000Z
|
dashboard/urls.py
|
makaimc/pfisdi
|
45e897b374d50e2f5385f15cbf318da0e17900f7
|
[
"MIT"
] | null | null | null |
dashboard/urls.py
|
makaimc/pfisdi
|
45e897b374d50e2f5385f15cbf318da0e17900f7
|
[
"MIT"
] | null | null | null |
from django.conf.urls.defaults import *
urlpatterns = patterns('dashboard.views',
url(r'^$', 'dashboard', name='dashboard'),
)
| 22
| 46
| 0.689394
|
6fea02550bb37131b6c74e7cc03b8a0d9abcc99c
| 117
|
py
|
Python
|
basic/update_upgrade_autoremove.py
|
CiganOliviu/automatisations
|
a4aa01cbb1b70bd837ab747935fb78d63130afc9
|
[
"MIT"
] | null | null | null |
basic/update_upgrade_autoremove.py
|
CiganOliviu/automatisations
|
a4aa01cbb1b70bd837ab747935fb78d63130afc9
|
[
"MIT"
] | null | null | null |
basic/update_upgrade_autoremove.py
|
CiganOliviu/automatisations
|
a4aa01cbb1b70bd837ab747935fb78d63130afc9
|
[
"MIT"
] | null | null | null |
import os
os.system('sudo apt-get update')
os.system('sudo apt-get upgrade')
os.system('sudo apt-get autoremove')
| 14.625
| 36
| 0.726496
|
e79b4c8de00b4f7d169843170e81574fe159af88
| 490
|
py
|
Python
|
src/apps/unit/migrations/0007_topic_description.py
|
brainfukk/fiuread
|
7414ec9f580b8bdc78e3ce63bb6ebf1ac7cdc4f8
|
[
"Apache-2.0"
] | null | null | null |
src/apps/unit/migrations/0007_topic_description.py
|
brainfukk/fiuread
|
7414ec9f580b8bdc78e3ce63bb6ebf1ac7cdc4f8
|
[
"Apache-2.0"
] | null | null | null |
src/apps/unit/migrations/0007_topic_description.py
|
brainfukk/fiuread
|
7414ec9f580b8bdc78e3ce63bb6ebf1ac7cdc4f8
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-10-11 13:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("unit", "0006_alter_unitexerciseelementanswer_data"),
]
operations = [
migrations.AddField(
model_name="topic",
name="description",
field=models.CharField(
blank=True, default="", max_length=40, verbose_name="Описание раздела"
),
),
]
| 23.333333
| 86
| 0.593878
|
a1b9676e7d90511b0335b524244d825a68eb09d7
| 23,460
|
py
|
Python
|
unittest/scripts/py_devapi/scripts/mysqlx_column_metadata.py
|
mysql/mysql-shell
|
7a299599a79ef2b2f578ffa41cbc901a88fc6b62
|
[
"Apache-2.0"
] | 119
|
2016-04-14T14:16:22.000Z
|
2022-03-08T20:24:38.000Z
|
unittest/scripts/py_devapi/scripts/mysqlx_column_metadata.py
|
mysql/mysql-shell
|
7a299599a79ef2b2f578ffa41cbc901a88fc6b62
|
[
"Apache-2.0"
] | 9
|
2017-04-26T20:48:42.000Z
|
2021-09-07T01:52:44.000Z
|
unittest/scripts/py_devapi/scripts/mysqlx_column_metadata.py
|
mysql/mysql-shell
|
7a299599a79ef2b2f578ffa41cbc901a88fc6b62
|
[
"Apache-2.0"
] | 51
|
2016-07-20T05:06:48.000Z
|
2022-03-09T01:20:53.000Z
|
# Assumptions: validate_crud_functions available
# Assumes __uripwd is defined as <user>:<pwd>@<host>:<plugin_port>
from __future__ import print_function
from mysqlsh import mysqlx
mySession = mysqlx.get_session(__uripwd)
ensure_schema_does_not_exist(mySession, 'py_shell_test')
schema = mySession.create_schema('py_shell_test')
mySession.set_current_schema('py_shell_test')
server_57 = mySession.sql("select @@version like '5.7%'").execute().fetch_one()[0]
def formatCollation(collation):
if collation == "binary":
return collation
# print collation with normalized output to work in both 5.7 and 8.0
# default utf8mb4 collation in 5.7 is utf8mb4_general_ci
# but in 8.0 it's utf8mb4_0900_ai_ci
if (server_57):
return collation+"//utf8mb4_0900_ai_ci"
else:
return "utf8mb4_general_ci//"+collation
# Metadata Validation On Numeric Types
result = mySession.sql('create table table1 (one bit, two tinyint primary key, utwo tinyint unsigned, three smallint, uthree smallint unsigned, four mediumint, ufour mediumint unsigned, five int, ufive int unsigned, six float, usix float unsigned, csix float(5,3), seven decimal, useven decimal unsigned, cseven decimal(4,2), eight double, ueight double unsigned, ceight double(8,3))').execute()
table = schema.get_table('table1')
result = table.insert().values(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18).execute()
result = table.select().execute()
row = result.fetch_one()
columns = result.get_columns()
column_index = 0
#@ Metadata on Bit Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on TinyInt Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Unsigned TinyInt Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on SmallInt Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Unsigned SmallInt Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on MediumInt Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Unsigned MediumInt Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Int Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Unsigned Int Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Float Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Unsigned Float Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Float Column with length and fractional digits
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Decimal Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Unsigned Decimal Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Decimal Column with length and fractional digits
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Double Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Unsigned Double Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Double Column with length and fractional digits
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
# Metadata Validation On Other Types
result = mySession.sql("create table table2 (one json, two char(5) primary key, three varchar(20), four text, five time, six date, seven timestamp, eight set('1','2','3'), nine enum ('a','b','c'), ten varbinary(15), eleven blob)").execute()
table = schema.get_table('table2')
result = table.insert().values('{"name":"John", "Age":23}', 'test', 'sample', 'a_text', mysqlx.expr('NOW()'), mysqlx.expr('CURDATE()'), mysqlx.expr('NOW()'), '2','c', '¡¡¡¡', '£€¢').execute()
result = table.select().execute()
columns = result.get_columns()
column_index = 0
#@ Metadata on Json Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Char Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', formatCollation(column.get_collation_name()))
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Varchar Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', formatCollation(column.get_collation_name()))
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Text Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', formatCollation(column.get_collation_name()))
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Time Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Date Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on DateTime Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', column.get_collation_name())
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Set Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', formatCollation(column.get_collation_name()))
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Enum Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', formatCollation(column.get_collation_name()))
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on VarBinary Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', formatCollation(column.get_collation_name()))
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Metadata on Blob Column
column = columns[column_index]
field = row[column_index]
column_index += 1
print('Field Type: ', type(field))
print('Schema Name:', column.get_schema_name())
print('Table Name:', column.get_table_name())
print('Table Label:', column.get_table_label())
print('Column Name:', column.get_column_name())
print('Column Label:', column.get_column_label())
print('Type:', column.get_type())
print('Length:', column.get_length())
print('Fractional Digits:', column.get_fractional_digits())
print('Is Number Signed:', column.is_number_signed())
print('Collation Name:', formatCollation(column.get_collation_name()))
print('Charset Name:', column.get_character_set_name())
print('Is ZeroFill:', column.is_zero_fill())
#@ Aggregated column type
result = mySession.run_sql("select count(*) from mysql.user")
columns = result.get_columns()
print("Count(*) Type:", columns[0].get_type().data)
mySession.close()
| 40.8
| 395
| 0.749915
|
efc6c9928f805812fb8bc6e7ce87cd18bacc4fcc
| 10,392
|
py
|
Python
|
datasketch/hyperloglog.py
|
kinmanz/lshtry
|
9361b0660239dc9563cfa747f30e1789f3bcf3f4
|
[
"MIT"
] | null | null | null |
datasketch/hyperloglog.py
|
kinmanz/lshtry
|
9361b0660239dc9563cfa747f30e1789f3bcf3f4
|
[
"MIT"
] | null | null | null |
datasketch/hyperloglog.py
|
kinmanz/lshtry
|
9361b0660239dc9563cfa747f30e1789f3bcf3f4
|
[
"MIT"
] | 1
|
2018-04-05T13:51:38.000Z
|
2018-04-05T13:51:38.000Z
|
'''
This module implements the HyperLogLog data sketch for estimating
cardinality of very large dataset in a single pass.
The original HyperLogLog is described here:
http://algo.inria.fr/flajolet/Publications/FlFuGaMe07.pdf
This HyperLogLog implementation is based on:
https://github.com/svpcom/hyperloglog
with enhanced functionalities for serialization and similarities.
'''
import struct, copy
from hashlib import sha1
import numpy as np
try:
from .hyperloglog_const import _thresholds, _raw_estimate, _bias
except ImportError:
# For Python 2
from hyperloglog_const import _thresholds, _raw_estimate, _bias
# Get the number of bits starting from the first non-zero bit to the right
_bit_length = lambda bits : bits.bit_length()
# For < Python 2.7
if not hasattr(int, 'bit_length'):
_bit_length = lambda bits : len(bin(bits)) - 2 if bits > 0 else 0
class HyperLogLog(object):
'''
The HyperLogLog class.
'''
__slots__ = ('p', 'm', 'reg', 'alpha', 'max_rank', 'hashobj')
# The range of the hash values used for HyperLogLog
_hash_range_bit = 32
_hash_range_byte = 4
_struct_fmt_str = '<I'
def _get_alpha(self, p):
if not (4 <= p <= 16):
raise ValueError("p=%d should be in range [4 : 16]" % p)
if p == 4:
return 0.673
if p == 5:
return 0.697
if p == 6:
return 0.709
return 0.7213 / (1.0 + 1.079 / (1 << p))
def __init__(self, p=8, reg=None, hashobj=sha1):
'''
Create a HyperLogLog with precision parameter `p` and (optionally) a
register vector `reg`. If `reg` is specified, the constructor will
use it as the underlying regiser, instead of creating a new one, and
the `p` parameter value is ignored.
'''
if reg is None:
self.p = p
self.m = 1 << p
self.reg = np.zeros((self.m,), dtype=np.int8)
else:
# Check if the register has the correct type
if not isinstance(reg, np.ndarray):
raise ValueError("The imported register must be a numpy.ndarray.")
# We have to check if the imported register has the correct length.
self.m = reg.size
self.p = _bit_length(self.m) - 1
if 1 << self.p != self.m:
raise ValueError("The imported register has \
incorrect size. Expect a power of 2.")
# Generally we trust the user to import register that contains
# reasonable counter values, so we don't check for every values.
self.reg = reg
# Common settings
self.hashobj = hashobj
self.alpha = self._get_alpha(self.p)
self.max_rank = self._hash_range_bit - self.p
def __len__(self):
'''
Get the size of the HyperLogLog.
'''
return len(self.reg)
def __eq__(self, other):
'''
Check equivalence between two HyperLogLogs
'''
if self.p != other.p:
return False
if self.m != other.m:
return False
if not np.array_equal(self.reg, other.reg):
return False
return True
def is_empty(self):
'''
Check if the current HyperLogLog is empty - at the state of just
initialized.
'''
if np.any(self.reg):
return False
return True
def clear(self):
'''
Reset the current HyperLogLog.
'''
self.reg = np.zeros((self.m,), dtype=np.int8)
def copy(self):
'''
Create a copy of the current HyperLogLog by exporting its state.
'''
return HyperLogLog(reg=self.digest())
def _get_rank(self, bits):
rank = self.max_rank - _bit_length(bits) + 1
if rank <= 0:
raise ValueError("Hash value overflow, maximum size is %d\
bits" % self.max_rank)
return rank
def update(self, b):
'''
Update the HyperLogLog with a new data value in bytes.
'''
# Digest the hash object to get the hash value
hv = struct.unpack(self._struct_fmt_str,
self.hashobj(b).digest()[:self._hash_range_byte])[0]
# Get the index of the register using the first p bits of the hash
reg_index = hv & (self.m - 1)
# Get the rest of the hash
bits = hv >> self.p
# Update the register
self.reg[reg_index] = max(self.reg[reg_index], self._get_rank(bits))
def digest(self, hashobj):
'''
Return the current register.
'''
return copy.copy(self.reg)
def merge(self, other):
'''
Merge the other HyperLogLog with this one, making this the union of the
two.
'''
if self.m != other.m or self.p != other.p:
raise ValueError("Cannot merge HyperLogLog with different\
precisions.")
self.reg = np.maximum(self.reg, other.reg)
def _linearcounting(self, num_zero):
return self.m * np.log(self.m / float(num_zero))
def _largerange_correction(self, e):
return - (1 << 32) * np.log(1.0 - e / (1 << 32))
def count(self):
'''
Estimate the cardinality of the data seen so far.
'''
# Use HyperLogLog estimation function
e = self.alpha * float(self.m ** 2) / np.sum(2.0**(-self.reg))
# Small range correction
if e <= (5.0 / 2.0) * self.m:
num_zero = self.m - np.count_nonzero(self.reg)
return self._linearcounting(num_zero)
# Normal range, no correction
if e <= (1.0 / 30.0) * (1 << 32):
return e
# Large range correction
return self._largerange_correction(e)
@classmethod
def union(cls, *hyperloglogs):
'''
Return the union of all given HyperLogLogs
'''
if len(hyperloglogs) < 2:
raise ValueError("Cannot union less than 2 HyperLogLog\
sketches")
m = hyperloglogs[0].m
if not all(h.m == m for h in hyperloglogs):
raise ValueError("Cannot union HyperLogLog sketches with\
different precisions")
reg = np.maximum.reduce([h.reg for h in hyperloglogs])
h = cls(reg=reg)
return h
def bytesize(self):
'''
Return the size of the HyperLogLog in bytes.
'''
# Since p is no larger than 64, use 8 bits
p_size = struct.calcsize('B')
# Each register value is no larger than 64, use 8 bits
# TODO: is there a way to use 5 bits instead of 8 bits
# to store integer in Python?
reg_val_size = struct.calcsize('B')
return p_size + reg_val_size * self.m
def serialize(self, buf):
'''
Serialize this HyperLogLog into bytes, store in the `buf`.
This is more efficient than using pickle.dumps on the object.
'''
if len(buf) < self.bytesize():
raise ValueError("The buffer does not have enough space\
for holding this HyperLogLog.")
fmt = 'B%dB' % self.m
struct.pack_into(fmt, buf, 0, self.p, *self.reg)
@classmethod
def deserialize(cls, buf):
'''
Reconstruct a HyperLogLog from bytes in `buf`.
This is more efficient than using the pickle.loads on the pickled
bytes.
'''
size = struct.calcsize('B')
try:
p = struct.unpack_from('B', buf, 0)[0]
except TypeError:
p = struct.unpack_from('B', buffer(buf), 0)[0]
h = cls(p)
offset = size
try:
h.reg = np.array(struct.unpack_from('%dB' % h.m,
buf, offset), dtype=np.int8)
except TypeError:
h.reg = np.array(struct.unpack_from('%dB' % h.m,
buffer(buf), offset), dtype=np.int8)
return h
def __getstate__(self):
'''
This function is called when pickling the HyperLogLog object.
Returns a bytearray which will then be pickled.
Note that the bytes returned by the Python pickle.dumps is not
the same as the buffer returned by this function.
'''
buf = bytearray(self.bytesize())
self.serialize(buf)
return buf
def __setstate__(self, buf):
'''
This function is called when unpickling the HyperLogLog object.
Initialize the object with data in the buffer.
Note that the input buffer is not the same as the input to the
Python pickle.loads function.
'''
size = struct.calcsize('B')
try:
p = struct.unpack_from('B', buf, 0)[0]
except TypeError:
p = struct.unpack_from('B', buffer(buf), 0)[0]
self.__init__(p=p)
offset = size
try:
self.reg = np.array(struct.unpack_from('%dB' % self.m,
buf, offset), dtype=np.int8)
except TypeError:
self.reg = np.array(struct.unpack_from('%dB' % self.m,
buffer(buf), offset), dtype=np.int8)
class HyperLogLogPlusPlus(HyperLogLog):
'''
The HyperLogLog++, an enhanced HyperLogLog from Google.
http://research.google.com/pubs/pub40671.html
Main changes:
1) Use 64 bits instead of 32 bits for hash function
2) A new small-cardinality estimation scheme
3) Sparse representation (not implemented here)
'''
_hash_range_bit = 64
_hash_range_byte = 8
_struct_fmt_str = '<Q'
def _get_threshold(self, p):
return _thresholds[p - 4]
def _estimate_bias(self, e, p):
bias_vector = _bias[p - 4]
estimate_vector = _raw_estimate[p - 4]
nearest_neighbors = np.argsort((e - estimate_vector)**2)[:6]
return np.mean(bias_vector[nearest_neighbors])
def count(self):
num_zero = self.m - np.count_nonzero(self.reg)
if num_zero > 0:
# linear counting
lc = self._linearcounting(num_zero)
if lc <= self._get_threshold(self.p):
return lc
# Use HyperLogLog estimation function
e = self.alpha * float(self.m ** 2) / np.sum(2.0**(-self.reg))
if e <= 5 * self.m:
return e - self._estimate_bias(e, self.p)
else:
return e
| 33.522581
| 82
| 0.577463
|
f18924b9a537a34c6294d8e3e7826087a3e6e505
| 966
|
py
|
Python
|
azure-mgmt-web/azure/mgmt/web/models/site_config_resource_paged.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-web/azure/mgmt/web/models/site_config_resource_paged.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-web/azure/mgmt/web/models/site_config_resource_paged.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class SiteConfigResourcePaged(Paged):
"""
A paging container for iterating over a list of :class:`SiteConfigResource <azure.mgmt.web.models.SiteConfigResource>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[SiteConfigResource]'}
}
def __init__(self, *args, **kwargs):
super(SiteConfigResourcePaged, self).__init__(*args, **kwargs)
| 34.5
| 129
| 0.587992
|
5c64afc8596ba927f2b9873857943cf1b10d4d82
| 1,145
|
py
|
Python
|
setup.py
|
richard-lane/fourbody
|
9c029ad4d179e7ad7448522166e09c29c7096071
|
[
"MIT"
] | null | null | null |
setup.py
|
richard-lane/fourbody
|
9c029ad4d179e7ad7448522166e09c29c7096071
|
[
"MIT"
] | null | null | null |
setup.py
|
richard-lane/fourbody
|
9c029ad4d179e7ad7448522166e09c29c7096071
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name="fourbody",
packages=["fourbody"],
version="0.2",
license="MIT",
description="Phase space parameterisation for four-body decays X -> h1+ h2- h3- h4+",
long_description="See the projet homepage for details",
author="Richard Lane",
author_email="richard.lane6798@gmail.com",
url="https://github.com/richard-lane/fourbody",
download_url="https://github.com/richard-lane/fourbody/archive/v_02.tar.gz",
keywords=[
"Physics",
"hadron",
"Dalitz",
"phase space",
"phasespace",
"root",
"cern",
],
install_requires=["numpy", "pylorentz"],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
| 31.805556
| 89
| 0.593013
|
2b1902673621d0909ad9cc48a79a4cf27adf8af4
| 23,269
|
py
|
Python
|
virt/ansible-latest/lib/python2.7/site-packages/ansible/modules/net_tools/basics/uri.py
|
lakhlaifi/RedHat-Ansible
|
27c5077cced9d416081fcd5d69ea44bca0317fa4
|
[
"Apache-2.0"
] | null | null | null |
virt/ansible-latest/lib/python2.7/site-packages/ansible/modules/net_tools/basics/uri.py
|
lakhlaifi/RedHat-Ansible
|
27c5077cced9d416081fcd5d69ea44bca0317fa4
|
[
"Apache-2.0"
] | null | null | null |
virt/ansible-latest/lib/python2.7/site-packages/ansible/modules/net_tools/basics/uri.py
|
lakhlaifi/RedHat-Ansible
|
27c5077cced9d416081fcd5d69ea44bca0317fa4
|
[
"Apache-2.0"
] | 1
|
2020-02-13T14:24:57.000Z
|
2020-02-13T14:24:57.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Romeo Theriault <romeot () hawaii.edu>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: uri
short_description: Interacts with webservices
description:
- Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE
HTTP authentication mechanisms.
- For Windows targets, use the M(win_uri) module instead.
version_added: "1.1"
options:
url:
description:
- HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path
type: str
required: true
dest:
description:
- A path of where to download the file to (if desired). If I(dest) is a
directory, the basename of the file on the remote server will be used.
type: path
url_username:
description:
- A username for the module to use for Digest, Basic or WSSE authentication.
type: str
aliases: [ user ]
url_password:
description:
- A password for the module to use for Digest, Basic or WSSE authentication.
type: str
aliases: [ password ]
body:
description:
- The body of the http request/response to the web service. If C(body_format) is set
to 'json' it will take an already formatted JSON string or convert a data structure
into JSON. If C(body_format) is set to 'form-urlencoded' it will convert a dictionary
or list of tuples into an 'application/x-www-form-urlencoded' string. (Added in v2.7)
type: raw
body_format:
description:
- The serialization format of the body. When set to C(json) or C(form-urlencoded), encodes the
body argument, if needed, and automatically sets the Content-Type header accordingly.
As of C(2.3) it is possible to override the `Content-Type` header, when
set to C(json) or C(form-urlencoded) via the I(headers) option.
type: str
choices: [ form-urlencoded, json, raw ]
default: raw
version_added: "2.0"
method:
description:
- The HTTP method of the request or response.
- In more recent versions we do not restrict the method at the module level anymore
but it still must be a valid method accepted by the service handling the request.
type: str
default: GET
return_content:
description:
- Whether or not to return the body of the response as a "content" key in
the dictionary result.
- Independently of this option, if the reported Content-type is "application/json", then the JSON is
always loaded into a key called C(json) in the dictionary results.
type: bool
default: no
force_basic_auth:
description:
- Force the sending of the Basic authentication header upon initial request.
- The library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail.
type: bool
default: no
follow_redirects:
description:
- Whether or not the URI module should follow redirects. C(all) will follow all redirects.
C(safe) will follow only "safe" redirects, where "safe" means that the client is only
doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow
any redirects. Note that C(yes) and C(no) choices are accepted for backwards compatibility,
where C(yes) is the equivalent of C(all) and C(no) is the equivalent of C(safe). C(yes) and C(no)
are deprecated and will be removed in some future version of Ansible.
type: str
choices: [ all, 'none', safe ]
default: safe
creates:
description:
- A filename, when it already exists, this step will not be run.
type: path
removes:
description:
- A filename, when it does not exist, this step will not be run.
type: path
status_code:
description:
- A list of valid, numeric, HTTP status codes that signifies success of the request.
type: list
default: [ 200 ]
timeout:
description:
- The socket level timeout in seconds
type: int
default: 30
HEADER_:
description:
- Any parameter starting with "HEADER_" is a sent with your request as a header.
For example, HEADER_Content-Type="application/json" would send the header
"Content-Type" along with your request with a value of "application/json".
- This option is deprecated as of C(2.1) and will be removed in Ansible 2.9.
Use I(headers) instead.
type: dict
headers:
description:
- Add custom HTTP headers to a request in the format of a YAML hash. As
of C(2.3) supplying C(Content-Type) here will override the header
generated by supplying C(json) or C(form-urlencoded) for I(body_format).
type: dict
version_added: '2.1'
others:
description:
- All arguments accepted by the M(file) module also work here
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
- Prior to 1.9.2 the code defaulted to C(no).
type: bool
default: yes
version_added: '1.9.2'
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- This file can also include the key as well, and if the key is included, I(client_key) is not required
type: path
version_added: '2.4'
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- If I(client_cert) contains both the certificate and key, this option is not required.
type: path
version_added: '2.4'
src:
description:
- Path to file to be submitted to the remote server.
- Cannot be used with I(body).
type: path
version_added: '2.7'
remote_src:
description:
- If C(no), the module will search for src on originating/master machine.
- If C(yes) the module will use the C(src) path on the remote/target machine.
type: bool
default: no
version_added: '2.7'
force:
description:
- If C(yes) do not get a cached copy.
type: bool
default: no
aliases: [ thirsty ]
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: yes
unix_socket:
description:
- Path to Unix domain socket to use for connection
version_added: '2.8'
http_agent:
description:
- Header to identify as, generally appears in web server logs.
type: str
default: ansible-httpget
notes:
- The dependency on httplib2 was removed in Ansible 2.1.
- The module returns all the HTTP headers in lower-case.
- For Windows targets, use the M(win_uri) module instead.
seealso:
- module: get_url
- module: win_uri
author:
- Romeo Theriault (@romeotheriault)
'''
EXAMPLES = r'''
- name: Check that you can connect (GET) to a page and it returns a status 200
uri:
url: http://www.example.com
- name: Check that a page returns a status 200 and fail if the word AWESOME is not in the page contents
uri:
url: http://www.example.com
return_content: yes
register: this
failed_when: "'AWESOME' not in this.content"
- name: Create a JIRA issue
uri:
url: https://your.jira.example.com/rest/api/2/issue/
user: your_username
password: your_pass
method: POST
body: "{{ lookup('file','issue.json') }}"
force_basic_auth: yes
status_code: 201
body_format: json
- name: Login to a form based webpage, then use the returned cookie to access the app in later tasks
uri:
url: https://your.form.based.auth.example.com/index.php
method: POST
body_format: form-urlencoded
body:
name: your_username
password: your_password
enter: Sign in
status_code: 302
register: login
- name: Login to a form based webpage using a list of tuples
uri:
url: https://your.form.based.auth.example.com/index.php
method: POST
body_format: form-urlencoded
body:
- [ name, your_username ]
- [ password, your_password ]
- [ enter, Sign in ]
status_code: 302
register: login
- name: Connect to website using a previously stored cookie
uri:
url: https://your.form.based.auth.example.com/dashboard.php
method: GET
return_content: yes
headers:
Cookie: "{{ login.set_cookie }}"
- name: Queue build of a project in Jenkins
uri:
url: http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}
user: "{{ jenkins.user }}"
password: "{{ jenkins.password }}"
method: GET
force_basic_auth: yes
status_code: 201
- name: POST from contents of local file
uri:
url: https://httpbin.org/post
method: POST
src: file.json
- name: POST from contents of remote file
uri:
url: https://httpbin.org/post
method: POST
src: /path/to/my/file.json
remote_src: yes
'''
RETURN = r'''
# The return information includes all the HTTP headers in lower-case.
elapsed:
description: The number of seconds that elapsed while performing the download
returned: on success
type: int
sample: 23
msg:
description: The HTTP message from the request
returned: always
type: str
sample: OK (unknown bytes)
redirected:
description: Whether the request was redirected
returned: on success
type: bool
sample: false
status:
description: The HTTP status code from the request
returned: always
type: int
sample: 200
url:
description: The actual URL used for the request
returned: always
type: str
sample: https://www.ansible.com/
'''
import cgi
import datetime
import json
import os
import re
import shutil
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import PY2, iteritems, string_types
from ansible.module_utils.six.moves.urllib.parse import urlencode, urlsplit
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.urls import fetch_url, url_argument_spec
JSON_CANDIDATES = ('text', 'json', 'javascript')
def format_message(err, resp):
msg = resp.pop('msg')
return err + (' %s' % msg if msg else '')
def write_file(module, url, dest, content, resp):
# create a tempfile with some test content
fd, tmpsrc = tempfile.mkstemp(dir=module.tmpdir)
f = open(tmpsrc, 'wb')
try:
f.write(content)
except Exception as e:
os.remove(tmpsrc)
msg = format_message("Failed to create temporary content file: %s" % to_native(e), resp)
module.fail_json(msg=msg, **resp)
f.close()
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
msg = format_message("Source '%s' does not exist" % tmpsrc, resp)
module.fail_json(msg=msg, **resp)
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
msg = format_message("Source '%s' not readable" % tmpsrc, resp)
module.fail_json(msg=msg, **resp)
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
msg = format_message("Destination '%s' not writable" % dest, resp)
module.fail_json(msg=msg, **resp)
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
msg = format_message("Destination '%s' not readable" % dest, resp)
module.fail_json(msg=msg, **resp)
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
msg = format_message("Destination dir '%s' not writable" % os.path.dirname(dest), resp)
module.fail_json(msg=msg, **resp)
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
except Exception as e:
os.remove(tmpsrc)
msg = format_message("failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)), resp)
module.fail_json(msg=msg, **resp)
os.remove(tmpsrc)
def url_filename(url):
fn = os.path.basename(urlsplit(url)[2])
if fn == '':
return 'index.html'
return fn
def absolute_location(url, location):
"""Attempts to create an absolute URL based on initial URL, and
next URL, specifically in the case of a ``Location`` header.
"""
if '://' in location:
return location
elif location.startswith('/'):
parts = urlsplit(url)
base = url.replace(parts[2], '')
return '%s%s' % (base, location)
elif not location.startswith('/'):
base = os.path.dirname(url)
return '%s/%s' % (base, location)
else:
return location
def kv_list(data):
''' Convert data into a list of key-value tuples '''
if data is None:
return None
if isinstance(data, Sequence):
return list(data)
if isinstance(data, Mapping):
return list(data.items())
raise TypeError('cannot form-urlencode body, expect list or dict')
def form_urlencoded(body):
''' Convert data into a form-urlencoded string '''
if isinstance(body, string_types):
return body
if isinstance(body, (Mapping, Sequence)):
result = []
# Turn a list of lists into a list of tupples that urlencode accepts
for key, values in kv_list(body):
if isinstance(values, string_types) or not isinstance(values, (Mapping, Sequence)):
values = [values]
for value in values:
if value is not None:
result.append((to_text(key), to_text(value)))
return urlencode(result, doseq=True)
return body
def uri(module, url, dest, body, body_format, method, headers, socket_timeout):
# is dest is set and is a directory, let's check if we get redirected and
# set the filename from that url
redirected = False
redir_info = {}
r = {}
src = module.params['src']
if src:
try:
headers.update({
'Content-Length': os.stat(src).st_size
})
data = open(src, 'rb')
except OSError:
module.fail_json(msg='Unable to open source file %s' % src, elapsed=0)
else:
data = body
kwargs = {}
if dest is not None:
# Stash follow_redirects, in this block we don't want to follow
# we'll reset back to the supplied value soon
follow_redirects = module.params['follow_redirects']
module.params['follow_redirects'] = False
if os.path.isdir(dest):
# first check if we are redirected to a file download
_, redir_info = fetch_url(module, url, data=body,
headers=headers,
method=method,
timeout=socket_timeout, unix_socket=module.params['unix_socket'])
# if we are redirected, update the url with the location header,
# and update dest with the new url filename
if redir_info['status'] in (301, 302, 303, 307):
url = redir_info['location']
redirected = True
dest = os.path.join(dest, url_filename(url))
# if destination file already exist, only download if file newer
if os.path.exists(dest):
kwargs['last_mod_time'] = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest))
# Reset follow_redirects back to the stashed value
module.params['follow_redirects'] = follow_redirects
resp, info = fetch_url(module, url, data=data, headers=headers,
method=method, timeout=socket_timeout, unix_socket=module.params['unix_socket'],
**kwargs)
try:
content = resp.read()
except AttributeError:
# there was no content, but the error read()
# may have been stored in the info as 'body'
content = info.pop('body', '')
if src:
# Try to close the open file handle
try:
data.close()
except Exception:
pass
r['redirected'] = redirected or info['url'] != url
r.update(redir_info)
r.update(info)
return r, content, dest
def main():
argument_spec = url_argument_spec()
argument_spec.update(
dest=dict(type='path'),
url_username=dict(type='str', aliases=['user']),
url_password=dict(type='str', aliases=['password'], no_log=True),
body=dict(type='raw'),
body_format=dict(type='str', default='raw', choices=['form-urlencoded', 'json', 'raw']),
src=dict(type='path'),
method=dict(type='str', default='GET'),
return_content=dict(type='bool', default=False),
follow_redirects=dict(type='str', default='safe', choices=['all', 'no', 'none', 'safe', 'urllib2', 'yes']),
creates=dict(type='path'),
removes=dict(type='path'),
status_code=dict(type='list', default=[200]),
timeout=dict(type='int', default=30),
headers=dict(type='dict', default={}),
unix_socket=dict(type='path'),
)
module = AnsibleModule(
argument_spec=argument_spec,
# TODO: Remove check_invalid_arguments in 2.9
check_invalid_arguments=False,
add_file_common_args=True,
mutually_exclusive=[['body', 'src']],
)
url = module.params['url']
body = module.params['body']
body_format = module.params['body_format'].lower()
method = module.params['method'].upper()
dest = module.params['dest']
return_content = module.params['return_content']
creates = module.params['creates']
removes = module.params['removes']
status_code = [int(x) for x in list(module.params['status_code'])]
socket_timeout = module.params['timeout']
dict_headers = module.params['headers']
if not re.match('^[A-Z]+$', method):
module.fail_json(msg="Parameter 'method' needs to be a single word in uppercase, like GET or POST.")
if body_format == 'json':
# Encode the body unless its a string, then assume it is pre-formatted JSON
if not isinstance(body, string_types):
body = json.dumps(body)
if 'content-type' not in [header.lower() for header in dict_headers]:
dict_headers['Content-Type'] = 'application/json'
elif body_format == 'form-urlencoded':
if not isinstance(body, string_types):
try:
body = form_urlencoded(body)
except ValueError as e:
module.fail_json(msg='failed to parse body as form_urlencoded: %s' % to_native(e), elapsed=0)
if 'content-type' not in [header.lower() for header in dict_headers]:
dict_headers['Content-Type'] = 'application/x-www-form-urlencoded'
# TODO: Deprecated section. Remove in Ansible 2.9
# Grab all the http headers. Need this hack since passing multi-values is
# currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}')
for key, value in iteritems(module.params):
if key.startswith("HEADER_"):
module.deprecate('Supplying headers via HEADER_* is deprecated. Please use `headers` to'
' supply headers for the request', version='2.9')
skey = key.replace("HEADER_", "")
dict_headers[skey] = value
# End deprecated section
if creates is not None:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of uri executions.
if os.path.exists(creates):
module.exit_json(stdout="skipped, since '%s' exists" % creates, changed=False)
if removes is not None:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of uri executions.
if not os.path.exists(removes):
module.exit_json(stdout="skipped, since '%s' does not exist" % removes, changed=False)
# Make the request
start = datetime.datetime.utcnow()
resp, content, dest = uri(module, url, dest, body, body_format, method,
dict_headers, socket_timeout)
resp['elapsed'] = (datetime.datetime.utcnow() - start).seconds
resp['status'] = int(resp['status'])
resp['changed'] = False
# Write the file out if requested
if dest is not None:
if resp['status'] in status_code and resp['status'] != 304:
write_file(module, url, dest, content, resp)
# allow file attribute changes
resp['changed'] = True
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = dest
resp['changed'] = module.set_fs_attributes_if_different(file_args, resp['changed'])
resp['path'] = dest
# Transmogrify the headers, replacing '-' with '_', since variables don't
# work with dashes.
# In python3, the headers are title cased. Lowercase them to be
# compatible with the python2 behaviour.
uresp = {}
for key, value in iteritems(resp):
ukey = key.replace("-", "_").lower()
uresp[ukey] = value
if 'location' in uresp:
uresp['location'] = absolute_location(url, uresp['location'])
# Default content_encoding to try
content_encoding = 'utf-8'
if 'content_type' in uresp:
content_type, params = cgi.parse_header(uresp['content_type'])
if 'charset' in params:
content_encoding = params['charset']
u_content = to_text(content, encoding=content_encoding)
if any(candidate in content_type for candidate in JSON_CANDIDATES):
try:
js = json.loads(u_content)
uresp['json'] = js
except Exception:
if PY2:
sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2
else:
u_content = to_text(content, encoding=content_encoding)
if resp['status'] not in status_code:
uresp['msg'] = 'Status code was %s and not %s: %s' % (resp['status'], status_code, uresp.get('msg', ''))
module.fail_json(content=u_content, **uresp)
elif return_content:
module.exit_json(content=u_content, **uresp)
else:
module.exit_json(**uresp)
if __name__ == '__main__':
main()
| 35.471037
| 115
| 0.644377
|
1fd252de442c85c8d27b538ca82515856fd3276c
| 102
|
py
|
Python
|
main.py
|
IanLuan/pyHangman
|
476756825825e064baad4b353f0451d82a3afb4b
|
[
"MIT"
] | 1
|
2019-03-08T21:08:52.000Z
|
2019-03-08T21:08:52.000Z
|
main.py
|
IanLuan/pyHangman
|
476756825825e064baad4b353f0451d82a3afb4b
|
[
"MIT"
] | null | null | null |
main.py
|
IanLuan/pyHangman
|
476756825825e064baad4b353f0451d82a3afb4b
|
[
"MIT"
] | 1
|
2021-04-27T02:54:35.000Z
|
2021-04-27T02:54:35.000Z
|
from game.hangman import Hangman
from game.word import Word
from game.game import Game
game = Game()
| 17
| 32
| 0.784314
|
2ead70d7ca23b43666fc4d1b0050ea664e6fd7a9
| 39,303
|
py
|
Python
|
cdap-docs/tools/licenses/licenses.py
|
pa-mahe/cdap
|
17e80b38a17215bced088d7c1c7ee58386d0ae59
|
[
"Apache-2.0"
] | 3
|
2019-06-30T08:14:21.000Z
|
2019-10-14T10:03:40.000Z
|
cdap-docs/tools/licenses/licenses.py
|
pa-mahe/cdap
|
17e80b38a17215bced088d7c1c7ee58386d0ae59
|
[
"Apache-2.0"
] | 21
|
2018-10-31T10:19:47.000Z
|
2020-02-06T12:06:05.000Z
|
cdap-docs/tools/licenses/licenses.py
|
pa-mahe/cdap
|
17e80b38a17215bced088d7c1c7ee58386d0ae59
|
[
"Apache-2.0"
] | 5
|
2019-09-04T19:09:50.000Z
|
2020-04-22T16:06:50.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2014-2017 Cask Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Checks that the license dependencies files used match the dependencies in the product.
# Builds .rst files for output and PDF generation.
# Usage: python licenses.py
import csv
import json
import os
import subprocess
import sys
import traceback
from datetime import date
from optparse import OptionParser
from pprint import pprint
VERSION = '0.1.3'
COPYRIGHT_YEAR = date.today().year
MASTER_CSV = 'cdap-dependencies-master.csv'
MASTER_CSV_COMMENTS = {'bower': """# Bower Dependencies
# dependency,version,type,license,license_url,homepage,license_page
""",
'npm': """#
# NPM Dependencies
# dependency,version,type,license,license_url,homepage,license_page
""",
'jar': """#
# Jar Dependencies
# dependency,version,type,license,license_url
""",
}
MASTER_CSV_TYPES = ('bower', 'npm', 'jar')
ENTERPRISE = 'cdap-enterprise-dependencies'
LEVEL_1 = 'cdap-level-1-dependencies'
LOCAL_SANDBOX = 'cdap-sandbox-dependencies'
CDAP_UI = 'cdap-ui-dependencies'
CASK_REVERSE_DOMAIN = 'co.cask'
LICENSES_SOURCE = '../../reference-manual/source/licenses'
CDAP_BOWER_DEPENDENCIES = ('../../../cdap-ui/bower.json', '#')
CDAP_NPM_DEPENDENCIES = ('../../../cdap-ui/package.json', '@')
CDAP_UI_SOURCES = {'bower': CDAP_BOWER_DEPENDENCIES, 'npm': CDAP_NPM_DEPENDENCIES}
CDAP_UI_CASK_DEPENDENCIES = u'cask-'
CDAP_UI_DEPENDENCIES_KEY = 'dependencies'
MIT_LICENSE = "'MIT'"
SPACE = ' '*3
BACK_DASH = '\-'
SCRIPT_DIR_PATH = os.path.dirname(os.path.abspath(__file__))
DEBUG = False
QUIET = False
def get_cdap_version():
# Sets the Build Version
grep_version_cmd = "grep '<version>' ../../../pom.xml | awk 'NR==1;START{print $1}'"
version = None
try:
# Python 2.6 commands
p1 = subprocess.Popen(['grep' , '<version>', '../../../pom.xml' ], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['awk', 'NR==1;START{print $1}'], stdin=p1.stdout, stdout=subprocess.PIPE)
full_version_temp = p2.communicate()[0]
# Python 2.6 command end
# full_version = subprocess.check_output(grep_version_cmd, shell=True).strip().replace('<version>', '').replace('</version>', '')
full_version = full_version_temp.strip().replace('<version>', '').replace('</version>', '')
version = full_version.replace('-SNAPSHOT', '')
except:
print '\nWARNING: Could not get version using grep\n'
raise
return version
def parse_options():
""" Parses args options.
"""
parser = OptionParser(
usage="%prog [options] [file]",
description='Checks that the license dependencies files used match the dependencies in the product.')
parser.add_option(
'-v', '--version',
action='store_true',
dest='version',
help='Version of this software',
default=False)
parser.add_option(
'-z', '--debug',
action='store_true',
dest='debug',
help='Print debug messages',
default=False)
cdap_version = get_cdap_version()
parser.add_option(
'-w', '--build_version',
dest='build_version',
help='The built version of CDAP '
"(default: %s)" % cdap_version,
default=cdap_version)
parser.add_option(
'-u', '--ui',
action='store_true',
dest='cdap_ui',
help='Process CDAP UI dependencies',
default=False)
parser.add_option(
'-e', '--enterprise',
action='store_true',
dest='enterprise',
help='Process enterprise dependencies',
default=False)
parser.add_option(
'-l', '--level_1',
action='store_true',
dest='level_1',
help='Process level 1 dependencies',
default=False)
parser.add_option(
'-s', '--local_sandbox',
action='store_true',
dest='local_sandbox',
help='Process local sandbox dependencies',
default=False)
parser.add_option(
'-a', '--rst_enterprise',
action='store_true',
dest='rst_enterprise',
help='Print enterprise dependencies to an rst file',
default=False)
parser.add_option(
'-b', '--rst_level_1',
action='store_true',
dest='rst_level_1',
help='Print level1 dependencies to an rst file',
default=False)
parser.add_option(
'-c', '--rst_local_sandbox',
action='store_true',
dest='rst_local_sandbox',
help='Print local sandbox dependencies to an rst file',
default=False)
parser.add_option(
'-d', '--rst_cdap_ui',
action='store_true',
dest='rst_cdap_ui',
help='Print CDAP UI dependencies to an rst file',
default=False)
parser.add_option(
'-m', '--master_print',
action='store_true',
dest='master_print_terminal',
help='Prints to terminal the master dependency file',
default=False)
parser.add_option(
'-p', '--print',
action='store_true',
dest='master_print_file',
help='Prints to file a new master dependency file',
default=False)
parser.add_option(
'-t', '--list_special',
action='store_true',
dest='list_special',
help='Lists dependencies that require special handling (typically those not Apache or MIT licenses)',
default=False)
parser.add_option(
'-y', '--use-next-year',
action='store_true',
dest='use_next_year',
help=("Instead of using the current year " +
"(%d), uses the next year (%d) for copyright year in the PDFs" % (COPYRIGHT_YEAR, COPYRIGHT_YEAR+1)),
default=False)
(options, args) = parser.parse_args()
global DEBUG
DEBUG = options.debug
if options.version:
print "Version: %s" % VERSION
sys.exit(1)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return options, args
def log(message, type):
"""Basic logger, print output directly to stdout and errors to stderr.
"""
(sys.stdout if type == 'notice' else sys.stderr).write(message + '\n')
def process_master():
# Read in the master csv files and create a dictionary of it
# Contains both Jars and Bower dependencies
# Jar dependencies:
# Keys are the jars, Values are the Library instances
# 'jar','Version','Classifier','License','License URL'
# Example:
# 'bonecp-0.8.0.RELEASE.jar','0.8.0','RELEASE','Apache License, Version 2.0','http://www.apache.org/licenses/LICENSE-2.0.html'
# NPM & Bower dependencies:
# Keys are the dependencies, Values are the Library instances
# 'dependency','version','homepage','license','license_url', 'type'
# dependency,version,type,license,license_url,homepage,license_page
# Example:
# 'angular','1.3.15','bower','MIT License','http://opensource.org/licenses/MIT','https://github.com/angular/bower-angular','https://github.com/angular/angular.js/blob/master/LICENSE'
# Get the current dependencies master csv file
#
# As of version 0.1.1
# Jar dependencies can now have a license_source_url, which is the URL where the license was determined.
master_libs_dict = {}
csv_path = os.path.join(SCRIPT_DIR_PATH, MASTER_CSV)
print_quiet("Reading master file: %s" % csv_path)
with open(csv_path, 'rb') as csvfile:
row_count = 0
comment_count = 0
csv_reader = csv.reader(csvfile)
for row in csv_reader:
row_count += 1
dependency = row[0]
if dependency.startswith('#'):
# Comment line
comment_count += 1
elif len(row) in (5, 6, 7):
if len(row) in (5, 6):
license_source_url = row[5] if len(row)==6 else None
lib = Library(dependency, row[3], row[4], license_source_url)
else:
# len(row)==7:
lib = UI_Library(*row)
# Place lib reference in dictionary
if not master_libs_dict.has_key(lib.id):
master_libs_dict[lib.dependency] = lib
else:
lib.print_duplicate(master_libs_dict)
else:
print "%sError with %s\n%srow: %s" % (SPACE, dependency, SPACE, row)
# Print out the results
keys = master_libs_dict.keys()
# keys.sort()
# for k in keys:
# master_libs_dict[k].pretty_print()
print_quiet("Master CSV: Rows read: %s (comments: %s); Unique Keys created: %s" % (row_count, comment_count, len(keys)))
return master_libs_dict
def master_print_terminal():
master_libs_dict = process_master()
# Print out the results
keys = master_libs_dict.keys()
keys.sort()
max_characters = len("%d" % len(keys)) # set max_characters to maximum number of characters in keys
i = 0
for type in MASTER_CSV_TYPES:
print MASTER_CSV_COMMENTS[type]
for k in keys:
if master_libs_dict[k].type == type:
i += 1
master_libs_dict[k].pretty_print(i, max_characters)
# for k in keys:
# print "key: %s %s" % (k, master_libs_dict[k])
# master_libs_dict[k].pretty_print()
def test_for_application(app):
results = subprocess.call(['which', app])
if results == 0:
print "Found executable for '%s'" % app
else:
message = "No executable for '%s'" % app
if app == 'bower':
print "%s; install bower using: npm install -g bower" % message
elif app == 'npm':
print "%s; need to install npm to run license software" % message
else:
print message
return not results
def process_cdap_ui(options):
# Read in the current master csv file and create a structure with it
# Read in the checked-in dependency files:
# cdap-ui/bower.json
# cdap-ui/package.json
# Create and print to standard out the list of the references
# Make a list of the references for which links are missing and need to be added to the master
# Creates a new master list
# Return a list:
# 'Dependency','Version','homepage','License','License URL','type'
# Versioning syntax: see https://nodesource.com/blog/semver-tilde-and-caret
master_libs_dict = process_master()
cdap_ui_dict = {}
missing_libs_dict = {}
new_versions_dict = {}
old_versions_dict = {}
print_quiet()
def clean_version(version):
# Cleans up a version: either
# extracts it from an HTTP(S) URL
# strips off any leading '~', '^'
if version.startswith('http'):
# "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-3.0.1.tgz"
version = version.split('/')[-1]
if version.find('-') != -1:
version = version.split('-')[-1]
if version.split('.')[-1].lower() in ('jar', 'tgz', 'zip'):
version = version[:-4]
elif version[0] in ('~', '^'):
version = version[1:]
return version
def higher_version(v1, v2):
"""Compare v1 and v2 and is true if v2 > v1 (v2 is a higher version than v1)"""
v1 = clean_version(v1)
v2 = clean_version(v2)
if v1 == v2:
return False
else:
v1_list = v1.split('.')
v2_list = v2.split('.')
if len(v1_list) != len(v2_list):
if len(v1_list) > len(v2_list):
v2_list = v2_list + ['0'] * (len(v1_list) - len(v2_list))
else:
v1_list = v1_list + ['0'] * (len(v2_list) - len(v1_list))
return cmp(v2_list, v1_list) == 1
for type in CDAP_UI_SOURCES.keys():
source = CDAP_UI_SOURCES[type][0]
json_path = os.path.join(SCRIPT_DIR_PATH, source)
print_quiet("Reading '%s' dependencies file:\n%s" % (type, json_path))
with open(json_path) as data_file:
data = json.load(data_file)
if CDAP_UI_DEPENDENCIES_KEY in data.keys():
for dependency in data[CDAP_UI_DEPENDENCIES_KEY]:
if not dependency.startswith(CDAP_UI_CASK_DEPENDENCIES):
version = data[CDAP_UI_DEPENDENCIES_KEY][dependency]
version = clean_version(version)
if master_libs_dict.has_key(dependency):
# Look up reference in dictionary
# cdap_ui_dict[dependency] = master_libs_dict[dependency]
# Compare versions
if higher_version(master_libs_dict[dependency].version, version):
if new_versions_dict.has_key(dependency):
print_quiet("Dependency already in new versions: %s current: %s new: %s newer: %s" % (dependency,
master_libs_dict[dependency].version, new_versions_dict[dependency], version))
else:
print_quiet("New version: %s for %s (old %s)" % (version, dependency, master_libs_dict[dependency].version))
new_versions_dict[dependency]=version
old_versions_dict[dependency]=master_libs_dict[dependency].version
master_libs_dict[dependency].version = version
cdap_ui_dict[dependency] = master_libs_dict[dependency]
else:
missing_libs_dict[dependency] = (type, version)
keys = new_versions_dict.keys()
count_new = len(keys)
if count_new:
print_quiet("\nCDAP UI: New Versions: %s" % count_new)
keys.sort()
for key in keys:
print_quiet("%s : current: %s new: %s" % (key, old_versions_dict[key], new_versions_dict[key]))
keys = missing_libs_dict.keys()
count_missing = len(keys)
print_quiet("\nCDAP UI: Missing Artifacts: %s" % count_missing)
if count_missing:
all_apps_available = True
type_keys = CDAP_UI_SOURCES.keys()
for type in type_keys:
if not test_for_application(type):
all_apps_available = False
if all_apps_available:
keys.sort()
missing_list = []
for dependency in keys:
type, version = missing_libs_dict[dependency]
if type in type_keys:
dependency_version = "%s%s%s" % (dependency, CDAP_UI_SOURCES[type][1], version)
print_quiet(dependency_version)
if type == 'bower':
p1 = subprocess.Popen([type, 'info', dependency_version, 'homepage' ], stdout=subprocess.PIPE)
homepage = p1.communicate()[0].strip().split('\n')[-1:][0].replace("'", '')
elif type == 'npm':
p1 = subprocess.Popen([type, '--json', 'view', dependency_version, 'homepage' ], stdout=subprocess.PIPE)
homepage = p1.communicate()[0].strip().split('\n')[-1:][0]
homepage = homepage.split(' ')[-1:][0].replace('"', '')
row = '"%s","%s","%s","","","%s",""' % (dependency, version, type, homepage)
missing_list.append(row)
else:
print_quiet("Unknown type: '%s' for dependency '%s', version '%s'" % (type, dependency, version))
print_quiet('\nCDAP UI: Missing Artifacts List:')
for row in missing_list:
print_quiet(row)
if options.debug:
for dependency in keys:
version = missing_libs_dict[dependency]
dependency_version = "%s#%s" % (dependency, version)
print_quiet(dependency_version)
p1 = subprocess.Popen(['bower', 'info', dependency_version, 'license' ], stdout=subprocess.PIPE)
results = p1.communicate()[0]
if results.count(MIT_LICENSE):
# Includes MIT License
print_quiet('MIT License\n')
else:
# Unknown license
p1 = subprocess.Popen(['bower', 'info', dependency_version], stdout=subprocess.PIPE)
results = p1.communicate()[0]
print_quiet("Debug:\n%s" % results)
p1 = subprocess.Popen(['bower', 'home', dependency_version], stdout=subprocess.PIPE)
print_quiet("\nCDAP UI: Row count: %s" % len(cdap_ui_dict.keys()))
# Return 'Dependency','Version', 'Type','License','License URL', 'License Source URL'
cdap_ui_data = []
keys = cdap_ui_dict.keys()
keys.sort()
for dependency in keys:
lib = cdap_ui_dict[dependency]
row = list(lib.get_row())
cdap_ui_data.append(UILicense(*row))
print_quiet("%s : %s" % (dependency, row))
# Print new master entries
print_quiet("\nNew Master Versions\n")
keys = master_libs_dict.keys()
keys.sort()
for type in ['bower', 'npm']:
print_quiet("# %s Dependencies\n# dependency,version,type,license,license_url,homepage,license_page" % type)
for dependency in keys:
lib = master_libs_dict[dependency]
row = list(lib.get_row())
if row[2] == type:
print_quiet("%s : %s" % (dependency, row))
print_quiet()
# Write out a new master csv file, only if not already exists
if count_new or count_missing:
write_new_master_csv_file(master_libs_dict)
return cdap_ui_data
def process_level_1(input_file, options):
master_libs_dict = process_master()
level_1_dict = {}
missing_libs_dict = {}
csv_path = os.path.join(SCRIPT_DIR_PATH, LICENSES_SOURCE, LEVEL_1 + '.csv')
print_quiet("Reading dependencies file:\n%s" % csv_path)
import csv
with open(csv_path, 'rb') as csvfile:
row_count = 0
unique_row_count = 0
csv_reader = csv.reader(csvfile)
for row in csv_reader:
row_count +=1
jar,group_id, artifact_id = row
key = jar
if not level_1_dict.has_key(key):
unique_row_count += 1
if master_libs_dict.has_key(jar):
# Look up jar reference in dictionary
lib = master_libs_dict[jar]
print_quiet("lib.jar %s" % lib.jar)
level_1_dict[key] = (group_id, artifact_id, lib.license, lib.license_url)
continue
if not missing_libs_dict.has_key(artifact_id) and not jar.startswith(CASK_REVERSE_DOMAIN):
missing_libs_dict[artifact_id] = jar
print_quiet("Level 1: Row count: %s" % row_count)
print_quiet("Level 1: Unique Row count: %s" % unique_row_count)
print_quiet("Level 1: Missing Artifacts: %s" % len(missing_libs_dict.keys()))
if len(missing_libs_dict.keys()) > 0:
for key in missing_libs_dict.keys():
print_quiet("Missing artifact_id: %s (for %s)" % (key, missing_libs_dict[key]))
print_quiet('Add these lines to the Master file:')
for key in missing_libs_dict.keys():
print_quiet('"%s","","","Apache License, Version 2.0","http://www.apache.org/licenses/LICENSE-2.0.html"' % missing_libs_dict[key])
# Return the 'Package','Artifact','License','License URL'
rst_data = []
keys = level_1_dict.keys()
keys.sort()
for k in keys:
row = level_1_dict[k]
rst_data.append(Level1License(*row))
return rst_data
def process_enterprise(input_file, options):
return _process_dependencies(ENTERPRISE)
def process_local_sandbox(input_file, options):
return _process_dependencies(LOCAL_SANDBOX)
def _process_dependencies(dependency):
# Read in the current master csv file and create a structure with it
# Read in the new dependencies csv file
# Create and print to standard out the list of the references
# Make a list of the references for which links are missing and need to be added to the master
# Make a new master list, if one does not already exist
# Return 'Package','Version','Classifier','License','License URL'
master_libs_dict = process_master()
# Read dependencies
new_libs_dict = {}
missing_libs_dict = {}
csv_path = os.path.join(SCRIPT_DIR_PATH, LICENSES_SOURCE, dependency + '.csv')
print_quiet("Reading dependencies file:\n%s" % csv_path)
import csv
with open(csv_path, 'rb') as csvfile:
row_count = 0
csv_reader = csv.reader(csvfile)
for row in csv_reader:
row_count += 1
jar = row[0]
lib = Library(row[0], '', '')
print_quiet("lib.jar %s" % lib.jar)
# Look up lib reference in master dictionary; if not there, add it
# if a CDAP jar, ignore it
if not master_libs_dict.has_key(lib.jar):
print_quiet(lib.jar + ' Not Present in Master')
if lib.jar.startswith('cdap-'):
print_quiet(' Skipping')
continue
else:
master_libs_dict[lib.jar] = lib
missing_libs_dict[lib.jar] = lib
print_quiet(' Adding to missing libs')
# Place lib reference in dictionary
if not new_libs_dict.has_key(lib.jar):
new_libs_dict[lib.jar] = master_libs_dict[lib.jar]
else:
lib.print_duplicate(new_libs_dict)
missing_entries = len(missing_libs_dict.keys())
for lib_dict in [master_libs_dict]:
keys = lib_dict.keys()
keys.sort()
missing_licenses = []
for k in keys:
if lib_dict[k].license == '':
missing_licenses.append(lib_dict[k])
if DEBUG:
lib_dict[k].pretty_print()
print_debug("Records: %s" % len(keys))
print_quiet("New CSV: Rows: %s" % len(new_libs_dict.keys()))
print_quiet("New Master CSV: Rows: %s" % len(master_libs_dict.keys()))
print_quiet("New Master CSV: Missing License Rows: %s" % len(missing_licenses))
print_quiet("New Master CSV: Missing Entry Rows: %s" % missing_entries)
if missing_licenses:
print_quiet('Missing Licenses')
for miss in missing_licenses:
print " %s" % miss
if missing_entries:
print_quiet('Missing Entries')
i = 0
for key in missing_libs_dict.keys():
i += 1
print " %2d: %s" % (i, missing_libs_dict[key])
# Write out a new master csv file, only if not already exists
if missing_entries or missing_licenses:
write_new_master_csv_file(master_libs_dict)
# Return the 'Package','Version','Classifier','License','License URL'
rst_data = []
keys = new_libs_dict.keys()
keys.sort()
for k in keys:
lib = new_libs_dict[k]
row = list(lib.get_row())
if row[2] == '':
row[2] = BACK_DASH
print_debug(row)
rst_data.append(License(*row))
return rst_data
def write_new_master_csv_file(lib_dict):
print 'Creating new Master CSV file'
import csv
csv.register_dialect('masterCSV', delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL, lineterminator='\n')
csv_path = os.path.join(SCRIPT_DIR_PATH, MASTER_CSV)
backup_csv_path = os.path.join(SCRIPT_DIR_PATH, MASTER_CSV + '.bu.csv')
if os.path.isfile(backup_csv_path):
print "Backup Master CSV: Exiting, as backup Master file already exists: %s" % backup_csv_path
return
if os.path.isfile(csv_path):
try:
import shutil
shutil.move(csv_path, backup_csv_path)
print "Created Backup Master CSV at: %s" % backup_csv_path
except:
print "Backup Master CSV: Exiting, as unable to create backup Master: %s" % backup_csv_path
return
if os.path.isfile(csv_path):
print "New Master CSV: Exiting, as new Master file already exists: %s" % csv_path
else:
csv_file = None
try:
csv_file = open(csv_path, 'w')
csv_writer = csv.writer(csv_file, 'masterCSV')
keys = lib_dict.keys()
keys.sort()
i = 0
for type in MASTER_CSV_TYPES:
csv_file.write(MASTER_CSV_COMMENTS[type])
for k in keys:
r = lib_dict[k].get_full_row()
row_type = lib_dict[k].type
if row_type == type:
i += 1
csv_writer.writerow(r)
finally:
if csv_file is not None:
csv_file.close()
else:
print "Unable to close New Master CSV: %s" % csv_path
print "New Master CSV: wrote %s records of %s to: %s" % (i, len(keys), csv_path)
def master_print_file():
master_libs_dict = process_master()
write_new_master_csv_file(master_libs_dict)
def print_rst_level_1(input_file, options):
title = 'Level 1'
file_base = LEVEL_1
header = '"Package","Artifact","License","License URL"'
widths = '20, 20, 20, 40'
data_list = process_level_1(input_file, options)
_print_dependencies(title, file_base, header, widths, data_list)
def print_rst_enterprise(input_file, options):
title = 'Distributed'
file_base = ENTERPRISE
header = '"Package","Version","Classifier","License","License URL"'
widths = '20, 10, 10, 20, 35'
data_list = process_enterprise(input_file, options)
_print_dependencies(title, file_base, header, widths, data_list)
def print_rst_local_sandbox(input_file, options):
title = 'Local Sandbox'
file_base = LOCAL_SANDBOX
header = '"Package","Version","Classifier","License","License URL"'
widths = '20, 10, 10, 20, 30'
data_list = process_local_sandbox(input_file, options)
_print_dependencies(title, file_base, header, widths, data_list)
def print_rst_cdap_ui(options):
title = 'UI'
file_base = CDAP_UI
header = '"Dependency","Version","Type","License","License Source URL"'
widths = '20, 10, 10, 20, 40'
data_list = process_cdap_ui(options)
print
_print_dependencies(title, file_base, header, widths, data_list)
def _print_dependencies(title, file_base, header, widths, data_list):
# Example: 'Level 1', LEVEL_1, ...
RST_HEADER=""".. meta::
:author: Cask Data, Inc.
:copyright: Copyright © %(year)d Cask Data, Inc.
:version: %(version)s
=================================================
Cask Data Application Platform |version|
=================================================
Cask Data Application Platform %(title)s Dependencies
--------------------------------------------------------------------------------
.. rst2pdf: PageBreak
.. rst2pdf: .. contents::
.. rst2pdf: build ../../../reference/licenses-pdf/
.. rst2pdf: config ../../../_common/_templates/pdf-config
.. rst2pdf: stylesheets ../../../_common/_templates/pdf-stylesheet
.. csv-table:: **Cask Data Application Platform %(title)s Dependencies**
:header: %(header)s
:widths: %(widths)s
"""
cdap_version = get_cdap_version()
if cdap_version:
RST_HEADER = RST_HEADER % {'version': cdap_version, 'title': title, 'header': header, 'widths': widths, 'year': COPYRIGHT_YEAR}
rst_path = os.path.join(SCRIPT_DIR_PATH, file_base + '.rst')
try:
with open(rst_path,'w') as f:
f.write(RST_HEADER)
for data in data_list:
f.write(data.rst_csv())
except:
raise
print "Wrote rst file:\n%s" % rst_path
else:
print "Unable to get CDAP version using grep"
def print_debug(message=''):
if DEBUG:
print message
def print_quiet(message=''):
if not QUIET:
print message
def list_special(input_file, options):
global QUIET
QUIET = True
LICENSE_LIST = ['Apache License, Version 2.0', 'MIT License', 'MIT+GPLv2', 'Public Domain',]
data_dict = dict()
data_list = process_enterprise(input_file, options) + process_local_sandbox(input_file, options) + process_cdap_ui(options)
for data in data_list:
library = data.get_library()
if library.startswith('com.sun.'):
library = library[len('com.sun.'):]
elif not library.startswith('javax'):
library = data.get_short_library()
if data.license not in LICENSE_LIST and library not in data_dict:
data_dict[library] = data
print "Listing special dependencies that require handling:\n"
libraries = data_dict.keys()
libraries.sort()
incomplete = 0
for library in libraries:
data = data_dict[library]
if data.license_source_url:
license_url = data.license_source_url
flag = ''
else:
license_url = "[%s %s]" % (data.id, data.license_url)
flag = '* '
incomplete += 1
print "%slibrary: %s '%s' %s" % (flag, library, data.license, license_url)
print "\nLibraries: %s (incomplete: %s)\n" % (len(libraries), incomplete)
class License:
SPACE_3 = ' '*3
def __init__(self, package, version, classifier, license, license_url, license_source_url=None):
self.id = package
self.package = package # aka 'jar' aka 'package' aka 'dependency'
self.version = version
self.classifier = classifier
self.license = license
self.license_url = license_url
self.license_source_url = license_source_url
def __str__(self):
return "%s : %s %s %s %s %s " % (self.package, self.version, self.classifier, self.license, self.license_url, self.license_source_url)
def _get_row(self):
return (self.package, self.version, self.classifier, self.license, self.license_url)
def get_library(self):
# The 'library' is the package less the version and classifier
if self.classifier == '\-':
library = self.package[:-len("-%s.jar" % self.version)]
elif self.classifier:
library = self.package[:-len("-%s.%s.jar" % (self.version, self.classifier))]
return library
def get_short_library(self):
library = self.get_library()
if library.count('.') < 2:
return library
else:
# Slice off the right-most two parts of the string
return library[library.rfind('.', 0, library.rfind('.'))+1:]
def rst_csv(self):
# Need to substitute quotes for double quotes in reST's csv table format
row = map(lambda x: x.replace('\"', '\"\"'), self._get_row())
return self.SPACE_3 + '"' + '","'.join(row) + '"\n'
class Level1License(License):
def __init__(self, package, artifact, license, license_url, license_source_url=None):
self.id = package
self.package = package # aka 'jar' aka 'package' aka 'dependency'
self.artifact = artifact
self.license = license
self.license_url = license_url
self.license_source_url = license_source_url
def __str__(self):
return "%s : %s %s %s %s " % (self.package, self.artifact, self.license, self.license_url, self.license_source_url)
def _get_row(self):
return (self.package, self.artifact, self.license, self.license_url)
def get_library(self):
return "%s.%s" % (self.package, self.artifact)
class UILicense(License):
def __init__(self, dependency, version, type, license, license_url, license_source_url=None):
self.id = dependency
self.dependency = dependency # aka 'jar' aka 'package' aka 'dependency'
self.version = version
self.type = type
self.license = license
self.license_url = license_url
self.license_source_url = license_source_url
def __str__(self):
return "%s : %s %s %s %s %s " % (self.dependency, self.version, self.type, self.license, self.license_url, self.license_source_url)
def _get_row(self):
linked_license = "`%s <%s>`__" % (self.license, self.license_url)
return (self.dependency, self.version, self.type, linked_license, self.license_source_url)
def get_library(self):
return self.dependency
class Library:
MAX_SIZES={}
PRINT_ORDER = ['id','jar','base','version','classifier','license','license_url', 'license_source_url']
SPACE = ' '*3
def __init__(self, jar, license, license_url, license_source_url=None):
self.jar = jar # aka 'package' aka 'dependency'
self.dependency = jar
self.id = ''
self.base = ''
self.version = ''
self.type = 'jar'
self.classifier = ''
self.license = license
self.license_url = license_url
self.license_source_url = license_source_url
self._initialize()
def __str__(self):
return "%s : %s" % (self.id, self.jar)
def _convert_jar(self):
# Looking for a string of the format 'base-version[-classifier].jar'
# If that fails, tries without the .jar
# If still no match, uses jar as base instead.
# Converts the jar into its component parts: base, version, classifier
import re
s_jar = r'(?P<base>.*?)-(?P<version>\d*[0-9.]*\d+)([.-]*(?P<classifier>.*?))\.jar$'
s_no_jar = r'(?P<base>.*?)-(?P<version>\d*[0-9.]*\d+)([.-]*(?P<classifier>.*?))'
try:
m = re.match( s_jar, self.jar)
if not m:
m = re.match( s_no_jar, self.jar)
if m:
if m.group('classifier'):
c = m.group('classifier')
else:
c = '<none>'
print_debug("%s: %s %s %s" % (self.jar, m.group('base'), m.group('version'), c ))
self.base = m.group('base')
self.version = m.group('version')
self.classifier = m.group('classifier')
else:
self.base = self.jar
if self.classifier:
self.id = "%s-%s" % (self.base, self.classifier)
else:
self.id = self.base
except:
raise
def _set_max_sizes(self):
# Used for pretty-printing
for element in self.__dict__.keys():
if element[0] != '_':
length = len(self.__dict__[element]) if self.__dict__[element] else 0
if self.MAX_SIZES.has_key(element):
length = max(self.MAX_SIZES[element], length)
self.MAX_SIZES[element] = length
def _initialize(self):
self._convert_jar()
self._set_max_sizes()
def pretty_print(self, i=0, digits=3):
SPACER = 1
line = ''
for element in self.PRINT_ORDER:
if element[0] != '_':
length = self.MAX_SIZES[element]
line += self.__dict__[element].ljust(self.MAX_SIZES[element]+ SPACER)
if i != 0:
format = "%%%dd:%%s" % digits
line = format % (i, line)
print line
def get_row(self):
# license_source_url = self.license_source_url if self.license_source_url else self.license_url
return (self.jar, self.version, self.classifier, self.license, self.license_url, self.license_source_url)
def get_full_row(self):
license_source_url = self.license_source_url if self.license_source_url else ''
return (self.jar, self.version, self.classifier, self.license, self.license_url, license_source_url)
def print_duplicate(self, lib_dict):
print "Duplicate key: %s" % self.id
print "%sCurrent library: %s" % (self.SPACE, lib_dict[self.id])
print "%sNew library: %s" % (self.SPACE, self)
class UI_Library(Library):
PRINT_ORDER = ['dependency','version','type','license','license_url','homepage','license_page']
def __init__(self, id, version, type, license, license_url, homepage, license_page):
self.id = id
self.dependency = id
self.version = version
self.type = type
self.license = license
self.license_url = license_url
self.homepage = homepage
self.license_page = license_page
self._initialize()
def __str__(self):
return "%s : %s (%s)" % (self.id, self.version, self.type)
def _initialize(self):
self._set_max_sizes()
def get_row(self):
license_source_url = self.license_page if self.license_page else self.homepage
return (self.id, self.version, self.type, self.license, self.license_url, license_source_url)
def get_full_row(self):
return (self.id, self.version, self.type, self.license, self.license_url, self.homepage, self.license_page)
#
# Main function
#
def main():
""" Main program entry point.
"""
global COPYRIGHT_YEAR
options, input_file = parse_options()
try:
options.logger = log
if options.use_next_year:
COPYRIGHT_YEAR +=1
print "\nWARNING: COPYRIGHT_YEAR of %s does not match current year of %s\n" % (COPYRIGHT_YEAR, COPYRIGHT_YEAR -1)
if options.cdap_ui:
process_cdap_ui(options)
if options.enterprise:
process_enterprise(input_file, options)
if options.level_1:
process_level_1(input_file, options)
if options.local_sandbox:
process_local_sandbox(input_file, options)
if options.rst_enterprise:
print_rst_enterprise(input_file, options)
if options.rst_level_1:
print_rst_level_1(input_file, options)
if options.rst_local_sandbox:
print_rst_local_sandbox(input_file, options)
if options.rst_cdap_ui:
print_rst_cdap_ui(options)
if options.master_print_terminal:
master_print_terminal()
if options.master_print_file:
master_print_file()
if options.list_special:
list_special(input_file, options)
except Exception, e:
try:
exc_info = sys.exc_info()
finally:
# Display the *original* exception
traceback.print_exception(*exc_info)
del exc_info
sys.stderr.write('Error: %s\n' % e)
sys.exit(1)
if __name__ == '__main__':
main()
| 37.148393
| 192
| 0.598733
|
285e7f8bf664656954d0414edd649e251d16c12d
| 1,044
|
py
|
Python
|
xlsxwriter/test/comparison/test_chart_points01.py
|
adgear/XlsxWriter
|
79bcaad28d57ac29038b1c74bccc6d611b7a385e
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2019-07-25T06:08:09.000Z
|
2019-11-01T02:33:56.000Z
|
xlsxwriter/test/comparison/test_chart_points01.py
|
adgear/XlsxWriter
|
79bcaad28d57ac29038b1c74bccc6d611b7a385e
|
[
"BSD-2-Clause-FreeBSD"
] | 13
|
2019-07-14T00:29:05.000Z
|
2019-11-26T06:16:46.000Z
|
xlsxwriter/test/comparison/test_chart_points01.py
|
adgear/XlsxWriter
|
79bcaad28d57ac29038b1c74bccc6d611b7a385e
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_points01.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with point formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'pie'})
data = [2, 5, 4, 1, 7, 4]
worksheet.write_column('A1', data)
chart.add_series({
'values': '=Sheet1!$A$1:$A$6',
'points': [{'fill': {'color': 'red'}}],
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 23.727273
| 79
| 0.570881
|
4dcec586a69fabaecc24d30137cedf15871e3838
| 115,439
|
py
|
Python
|
src/cluecode/copyrights.py
|
sthagen/nexB-scancode-toolkit
|
12cc1286df78af898fae76fa339da2bb50ad51b9
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
src/cluecode/copyrights.py
|
sthagen/nexB-scancode-toolkit
|
12cc1286df78af898fae76fa339da2bb50ad51b9
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
src/cluecode/copyrights.py
|
sthagen/nexB-scancode-toolkit
|
12cc1286df78af898fae76fa339da2bb50ad51b9
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import os
import re
import sys
from collections import deque
from time import time
import attr
from pygmars import lex
from pygmars import parse
from pygmars import Token
from pygmars import tree
from commoncode.text import toascii
from commoncode.text import unixlinesep
from cluecode import copyrights_hint
# Tracing flags
TRACE = False or os.environ.get('SCANCODE_DEBUG_COPYRIGHT', False)
# set to 1 to enable pygmars deep tracing
TRACE_DEEP = 0
if os.environ.get('SCANCODE_DEBUG_COPYRIGHT_DEEP'):
TRACE_DEEP = 1
TRACE = False
TRACE_TOK = False or os.environ.get('SCANCODE_DEBUG_COPYRIGHT_TOKEN', False)
VALIDATE = False or os.environ.get('SCANCODE_DEBUG_COPYRIGHT_VALIDATE', False)
# Tracing flags
def logger_debug(*args):
pass
if TRACE or TRACE_DEEP or TRACE_TOK:
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
"""
Detect and collect copyright statements.
The process consists in:
- prepare and cleanup text
- identify regions of text that may contain copyright (using hints).
These are called "candidates".
- tag the text to recognize (e.g. lex) parts-of-speech (POS) tags to identify
various copyright statements parts such as dates, companies, names
("named entities"), etc. This is done using pygmars which contains a lexer
derived from NLTK POS tagger.
- feed the tagged text to a parsing grammar describing actual copyright
statements (also using pygmars) and obtain a parse tree.
- Walk the parse tree and yield Detection objects (e.g., copyright statements,
holder or authors) with start and end line from the parse tree with some
extra post-detection cleanups.
"""
def detect_copyrights(
location,
include_copyrights=True,
include_holders=True,
include_authors=True,
include_copyright_years=True,
include_copyright_allrights=False,
demarkup=True,
deadline=sys.maxsize,
):
"""
Yield Detection objects detected in the file at ``location``.
The flags ``include_copyrights``, ``include_holders`` and
``include_authors`` drive which actual detections are done and returned.
For copyrights only:
- If ``include_copyright_years`` is True, include years and year ranges.
- If ``include_copyright_allrights`` is True, include trailing
"all rights reserved"-style mentions
Strip markup from text if ``demarkup`` is True.
Run for up to ``deadline`` seconds and return results found so far.
"""
from textcode.analysis import numbered_text_lines
numbered_lines = numbered_text_lines(location, demarkup=demarkup)
numbered_lines = list(numbered_lines)
if TRACE:
logger_debug('detect_copyrights: numbered_lines')
for nl in numbered_lines:
logger_debug(' numbered_line:', repr(nl))
include_copyright_years = include_copyrights and include_copyright_years
include_copyright_allrights = include_copyrights and include_copyright_allrights
yield from detect_copyrights_from_lines(
numbered_lines=numbered_lines,
include_copyrights=include_copyrights,
include_holders=include_holders,
include_authors=include_authors,
include_copyright_years=include_copyright_years,
include_copyright_allrights=include_copyright_allrights,
deadline=deadline,
)
DETECTOR = None
def detect_copyrights_from_lines(
numbered_lines,
include_copyrights=True,
include_holders=True,
include_authors=True,
include_copyright_years=True,
include_copyright_allrights=False,
deadline=sys.maxsize,
):
"""
Yield Detection objects detected in a ``numbered_lines`` sequence of
tuples of (line number, text).
The flags ``include_copyrights``, ``include_holders`` and
``include_authors`` drive which actual detections are done and returned.
For copyrights only:
- If ``include_copyright_years`` is True, include years and year ranges.
- If ``include_copyright_allrights`` is True, include trailing
"all rights reserved"-style mentions
Run for up to ``deadline`` seconds and return results found so far.
"""
if not numbered_lines:
return
include_copyright_years = include_copyrights and include_copyright_years
include_copyright_allrights = include_copyrights and include_copyright_allrights
global DETECTOR
if not DETECTOR:
DETECTOR = detector = CopyrightDetector()
else:
detector = DETECTOR
candidate_lines_groups = candidate_lines(numbered_lines)
if TRACE:
candidate_lines_groups = list(candidate_lines_groups)
logger_debug(
f'detect_copyrights_from_lines: ALL groups of candidate '
f'lines collected: {len(candidate_lines_groups)}',
)
for candidates in candidate_lines_groups:
if TRACE:
from pprint import pformat
can = pformat(candidates, width=160)
logger_debug(
f' detect_copyrights_from_lines: processing candidates group:\n'
f' {can}'
)
detections = detector.detect(
numbered_lines=candidates,
include_copyrights=include_copyrights,
include_holders=include_holders,
include_authors=include_authors,
include_copyright_years=include_copyright_years,
include_copyright_allrights=include_copyright_allrights
)
if TRACE:
detections = list(detections)
logger_debug(f' detect_copyrights_from_lines: {detections}')
for detection in detections:
yield detection
# TODO: return a warning if we missed the deadline
if time() > deadline:
break
################################################################################
# DETECTION PROPER
################################################################################
class CopyrightDetector(object):
"""
Detect copyrights and authors.
"""
def __init__(self):
self.lexer = lex.Lexer(patterns)
self.parser = parse.Parser(grammar, trace=TRACE_DEEP, validate=VALIDATE)
def detect(self,
numbered_lines,
include_copyrights=True,
include_holders=True,
include_authors=True,
include_copyright_years=True,
include_copyright_allrights=False,
):
"""
Yield Detection objects detected in a ``numbered_lines`` sequence of
tuples of (line number, text).
The flags ``include_copyrights``, ``include_holders`` and
``include_authors`` drive which actual detections are done and returned.
For copyrights only:
- If ``include_copyright_years`` is True, include years and year ranges.
- If ``include_copyright_allrights`` is True, include trailing
"all rights reserved"-style mentions
"""
include_copyright_years = include_copyrights and include_copyright_years
include_copyright_allrights = include_copyrights and include_copyright_allrights
numbered_lines = list(numbered_lines)
if not numbered_lines:
return
if TRACE:
logger_debug(f'CopyrightDetector: numbered_lines: {numbered_lines}')
tokens = list(get_tokens(numbered_lines))
if TRACE:
logger_debug(f'CopyrightDetector: initial tokens: {tokens}')
if not tokens:
return
# first, POS tag each token using token regexes
lexed_text = list(self.lexer.lex_tokens(tokens))
if TRACE:
logger_debug(f'CopyrightDetector: lexed tokens: {lexed_text}')
# then build a parse parse_tree based on tagged tokens
parse_tree = self.parser.parse(lexed_text)
if TRACE:
logger_debug(f'CopyrightDetector: parse_tree: {parse_tree}')
non_copyright_labels = frozenset()
if not include_copyright_years:
non_copyright_labels = frozenset([
'YR-RANGE', 'YR', 'YR-AND', 'YR-PLUS', 'BARE-YR',
])
non_holder_labels = frozenset([
'COPY',
'YR-RANGE', 'YR-AND', 'YR', 'YR-PLUS', 'BARE-YR',
'EMAIL', 'URL',
'HOLDER', 'AUTHOR',
])
non_holder_labels_mini = frozenset([
'COPY',
'YR-RANGE', 'YR-AND', 'YR', 'YR-PLUS', 'BARE-YR',
'HOLDER', 'AUTHOR',
])
non_authors_labels = frozenset([
'COPY',
'YR-RANGE', 'YR-AND', 'YR', 'YR-PLUS', 'BARE-YR',
'HOLDER', 'AUTHOR',
])
# then walk the parse parse_tree, collecting copyrights, years and authors
for tree_node in parse_tree:
if not isinstance(tree_node, tree.Tree):
continue
if TRACE:
logger_debug(f'CopyrightDetector: parse_tree node: {tree_node}')
tree_node_label = tree_node.label
if (include_copyrights or include_holders) and 'COPYRIGHT' in tree_node_label:
copyrght = build_detection_from_node(
node=tree_node,
cls=CopyrightDetection,
ignores=non_copyright_labels,
include_copyright_allrights=include_copyright_allrights,
refiner=refine_copyright,
junk=COPYRIGHTS_JUNK,
)
if TRACE:
logger_debug(f'CopyrightDetector: detection: {copyrght}')
if copyrght:
if include_copyrights:
yield copyrght
if include_holders:
# By default we strip email and urls from holders ....
holder = build_detection_from_node(
node=tree_node,
cls=HolderDetection,
ignores=non_holder_labels,
refiner=refine_holder,
)
if not holder:
# ... but if we have no holder, we try again and
# this time we keep email and URLs for holders using
# "non_holder_labels_mini" as an "ignores" label set
holder = build_detection_from_node(
node=tree_node,
cls=HolderDetection,
ignores=non_holder_labels_mini,
refiner=refine_holder,
)
if holder:
if TRACE:
logger_debug(f'CopyrightDetector: holders: {holder}')
yield holder
elif include_authors and tree_node_label == 'AUTHOR':
author = build_detection_from_node(
node=tree_node,
cls=AuthorDetection,
ignores=non_authors_labels,
refiner=refine_author,
junk=AUTHORS_JUNK,
)
if author:
if TRACE:
logger_debug(f'CopyrightDetector: detected authors: {author}')
yield author
def get_tokens(numbered_lines, splitter=re.compile('[\\t =;]+').split):
"""
Return an iterable of pygmars.Token built from a ``numbered_lines`` iterable
of tuples of (line number, text).
We perform a simple tokenization on spaces, tabs and some punctuation =;
"""
for start_line, line in numbered_lines:
if TRACE_TOK:
logger_debug(' get_tokens: bare line: ' + repr(line))
line = prepare_text_line(line)
if TRACE_TOK:
logger_debug(' get_tokens: preped line: ' + repr(line))
pos = 0
for tok in splitter(line):
# FIXME: strip trailing single quotes and ignore empties
tok = tok.strip("' ")
# FIXME: strip trailing colons: why?
tok = tok.rstrip(':').strip()
# FIXME: strip leading @: : why?
tok = tok.lstrip('@').strip()
# FIXME: why?
if tok and tok != ':':
yield Token(value=tok, start_line=start_line, pos=pos)
pos += 1
class Detection:
def to_dict(self):
"""
Return a dict of primitive Python types.
"""
return attr.asdict(self)
@classmethod
def split(cls, detections, to_dict=False):
"""
Return a list of CopyrightDetection, a list of HolderDetection and a
list of AuthorDetection given a ``detections`` list of Detection.
If ``to_dict`` is True, return lists of mappings instead of objects.
"""
copyrights = []
holders = []
authors = []
for detection in detections:
det = detection.to_dict() if to_dict else detection
if isinstance(detection, CopyrightDetection):
copyrights.append(det)
elif isinstance(detection, HolderDetection):
holders.append(det)
elif isinstance(detection, AuthorDetection):
authors.append(det)
return copyrights, holders, authors
@classmethod
def split_values(cls, detections):
"""
Return a list of copyright strings, a list of holder strings and a
list of author strings given a ``detections`` list of Detection.
"""
copyrights, holders, authors = cls.split(detections)
copyrights = [det.copyright for det in copyrights]
holders = [det.holder for det in holders]
authors = [det.author for det in authors]
return copyrights, holders, authors
@attr.s(slots=True)
class CopyrightDetection(Detection):
copyright = attr.ib()
start_line = attr.ib()
end_line = attr.ib()
@attr.s(slots=True)
class HolderDetection(Detection):
holder = attr.ib()
start_line = attr.ib()
end_line = attr.ib()
@attr.s(slots=True)
class AuthorDetection(Detection):
author = attr.ib()
start_line = attr.ib()
end_line = attr.ib()
def build_detection_from_node(
node,
cls,
ignores=frozenset(),
include_copyright_allrights=False,
refiner=None,
junk=frozenset(),
):
"""
Return a ``cls`` Detection object from a pygmars.tree.Tree ``node`` with a
space-normalized string value or None.
Filter ``node`` Tokens with a type found in the ``ignores`` set of ignorable
token types.
For copyright detection, include trailing "All rights reserved" if
``include_copyright_allrights`` is True.
Apply the ``refiner`` callable function to the detection string.
Return None if the value exists in the ``junk`` strings set.
"""
include_copyright_allrights = (
cls == CopyrightDetection
and include_copyright_allrights
)
if ignores:
leaves = [
token for token in node.leaves()
if token.label not in ignores
]
else:
leaves = node.leaves()
# if TRACE_DEEP: logger_debug(' starting leaves:', leaves)
if include_copyright_allrights:
filtered = leaves
else:
filtered = []
for token in leaves:
# FIXME: this should operate on the tree and not on the leaves
# ALLRIGHTRESERVED: <NNP|NN|CAPS> <RIGHT> <NNP|NN|CAPS>? <RESERVED>
# This pops ALL RIGHT RESERVED by finding it backwards from RESERVED
if token.label == 'RESERVED':
if (
len(filtered) >= 2
and filtered[-1].label == 'RIGHT'
and filtered[-2].label in ('NN', 'CAPS', 'NNP')
):
filtered = filtered[:-2]
elif (
len(filtered) >= 3
and filtered[-1].label in ('NN', 'CAPS', 'NNP')
and filtered[-2].label == 'RIGHT'
and filtered[-3].label in ('NN', 'CAPS', 'NNP')
):
filtered = filtered[:-3]
else:
filtered.append(token)
node_string = ' '.join(t.value for t in filtered)
node_string = ' '.join(node_string.split())
if refiner:
node_string = refiner(node_string)
if node_string and node_string.lower() not in junk:
start_line = filtered[0].start_line
end_line = filtered[-1].start_line
return cls(node_string, start_line=start_line, end_line=end_line)
################################################################################
# LEXING AND PARSING
################################################################################
_YEAR = (r'('
'19[6-9][0-9]' # 1960 to 1999
'|'
'20[0-2][0-9]' # 2000 to 2019
')')
_YEAR_SHORT = (r'('
'[6-9][0-9]' # 60 to 99
'|'
'[0-][0-9]' # 00 to 29
')')
_YEAR_YEAR = (r'('
# fixme v ....the underscore below is suspicious
'(19[6-9][0-9][\\.,\\-]_)+[6-9][0-9]' # 1960-99
'|'
'(19[6-9][0-9][\\.,\\-])+[0-9]' # 1998-9
'|'
'(20[0-2][0-9][\\.,\\-])+[0-2][0-9]' # 2001-16 or 2012-04
'|'
'(20[0-2][0-9][\\.,\\-])+[0-9]' # 2001-4 not 2012
'|'
'(20[0-2][0-9][\\.,\\-])+20[0-2][0-9]' # 2001-2012
')')
_PUNCT = (r'('
'['
'\\W' # not a word (word includes underscore)
'\\D' # not a digit
'\\_' # underscore
'i' # oddity
'\\?'
']'
'|'
'\\ ' # html entity sometimes are double escaped
')*') # repeated 0 or more times
_YEAR_PUNCT = _YEAR + _PUNCT
_YEAR_YEAR_PUNCT = _YEAR_YEAR + _PUNCT
_YEAR_SHORT_PUNCT = _YEAR_SHORT + _PUNCT
_YEAR_OR_YEAR_YEAR_WITH_PUNCT = (r'(' +
_YEAR_PUNCT +
'|' +
_YEAR_YEAR_PUNCT +
')')
_YEAR_THEN_YEAR_SHORT = (r'(' +
_YEAR_OR_YEAR_YEAR_WITH_PUNCT +
'(' +
_YEAR_SHORT_PUNCT +
')*' +
')')
# TODO: this needs to be simplified:
patterns = [
############################################################################
# COPYRIGHT
############################################################################
# some exceptions
# NOT a copyright Copyright.txt : treat as NN
(r'^Copyright\.txt$', 'NN'),
# when lowercase with trailing period. this is not a Copyright statement
(r'^copyright\.\)?$', 'NN'),
# NOT a copyright symbol (ie. "copyrighted."): treat as NN
(r'^Copyrighted[\.,]$', 'NN'),
(r'^Copyrights[\.,]$', 'NN'),
(r'^copyrighted[\.,]$', 'NN'),
(r'^copyrights[\.,]$', 'NN'),
(r'^COPYRIGHTS[\.,]$', 'NN'),
(r'^COPYRIGHTED[\.,]$', 'NN'),
# copyright word or symbol
(r'^[\(\.@_\-\#\):]*[Cc]opyrights?:?$', 'COPY'),
(r'^[\(\.@_]*COPYRIGHT[sS]?:?$', 'COPY'),
(r'^[\(\.@]*[Cc]opyrighted?:?$', 'COPY'),
(r'^[\(\.@]*COPYRIGHTED?:?$', 'COPY'),
(r'^[\(\.@]*CopyRights?:?$', 'COPY'),
# with a trailing comma
(r'^Copyright,$', 'COPY'),
(r'^\(C\)\,?$', 'COPY'),
(r'^\(c\)\,?$', 'COPY'),
(r'^COPR\.?$', 'COPY'),
(r'^copr\.?$', 'COPY'),
(r'^Copr\.?$', 'COPY'),
# copyright in markup, until we strip markup: apache'>Copyright
(r'[A-Za-z0-9]+[\'">]+[Cc]opyright', 'COPY'),
# A copyright line in some manifest, meta or structured files such Windows PE
(r'^AssemblyCopyright.?$', 'COPY'),
(r'^AppCopyright?$', 'COPY'),
# SPDX-FileCopyrightText as defined by the FSFE Reuse project
(r'^[Ss][Pp][Dd][Xx]-[Ff]ile[Cc]opyright[Tt]ext', 'COPY'),
# SPDX-FileContributor as defined in SPDX and seen used in KDE
(r'^[Ss][Pp][Dd][Xx]-[Ff]ile[Cc]ontributor', 'SPDX-CONTRIB'),
############################################################################
# ALL Rights Reserved.
############################################################################
# All|Some|No Rights Reserved. should be a terminator/delimiter.
(r'^All$', 'NN'),
(r'^all$', 'NN'),
(r'^ALL$', 'NN'),
(r'^NO$', 'NN'),
(r'^No$', 'NN'),
(r'^no$', 'NN'),
(r'^Some$', 'NN'),
(r'^[Rr]ights?$', 'RIGHT'),
(r'^RIGHTS?$', 'RIGHT'),
(r'^[Rr]eserved[\.,]*$', 'RESERVED'),
(r'^RESERVED[\.,]*$', 'RESERVED'),
(r'^[Rr]eversed[\.,]*$', 'RESERVED'),
(r'^REVERSED[\.,]*$', 'RESERVED'),
# in German
(r'^[Aa]lle$', 'NN'),
(r'^[Rr]echte$', 'RIGHT'),
(r'^[Vv]orbehalten[\.,]*$', 'RESERVED'),
# in French
(r'^[Tt]ous$', 'NN'),
(r'^[Dr]roits?$', 'RIGHT'),
(r'^[Rr]éservés[\.,]*$', 'RESERVED'),
(r'^[Rr]eserves[\.,]*$', 'RESERVED'),
############################################################################
# JUNK are things to ignore
############################################################################
# path with trailing year-like are NOT a year as in
# Landroid/icu/impl/IDNA2003 : treat as JUNK
(r'^[^\\/]+[\\/][^\\/]+[\\/].*$', 'JUNK'),
# Combo of many (3+) letters and punctuations groups without spaces is likely junk
# "AEO>>,o>>'!xeoI?o?O1/4thuA/"
# (r'((\w+\W+){3,})+', 'JUNK'),
# CamELCaseeXXX is typcally JUNK such as code variable names
# AzaAzaaaAz BBSDSB002923,
(r'^([A-Z][a-z]+){3,20}[A-Z]+[0-9]*,?$', 'JUNK'),
# multiple parens (at least two (x) groups) is a sign of junk
# such as in (1)(ii)(OCT
(r'^.*\(.*\).*\(.*\).*$', 'JUNK'),
# parens such as (1) or (a) is a sign of junk but of course NOT (c)
(r'^\(([abdefghi\d]|ii|iii)\)$', 'JUNK'),
# found in crypto certificates and LDAP
(r'^O=$', 'JUNK'),
(r'^OU=?$', 'JUNK'),
(r'^XML$', 'JUNK'),
(r'^Parser$', 'JUNK'),
(r'^Dual$', 'JUNK'),
(r'^Crypto$', 'JUNK'),
(r'^PART$', 'JUNK'),
(r'^[Oo]riginally?$', 'JUNK'),
(r'^[Rr]epresentations?\.?$', 'JUNK'),
(r'^works,$', 'JUNK'),
(r'^Refer$', 'JUNK'),
(r'^Apt$', 'JUNK'),
(r'^Agreement$', 'JUNK'),
(r'^Usage$', 'JUNK'),
(r'^Please$', 'JUNK'),
(r'^\(?Based$', 'JUNK'),
(r'^Upstream$', 'JUNK'),
(r'^Files?$', 'JUNK'),
(r'^Filename:?$', 'JUNK'),
(r'^Description:?$', 'JUNK'),
(r'^[Pp]rocedures?$', 'JUNK'),
(r'^You$', 'JUNK'),
(r'^Everyone$', 'JUNK'),
(r'^Unless$', 'JUNK'),
(r'^rant$', 'JUNK'),
(r'^Subject$', 'JUNK'),
(r'^Acknowledgements?$', 'JUNK'),
(r'^Special$', 'JUNK'),
(r'^Derivative$', 'JUNK'),
(r'^[Ll]icensable$', 'JUNK'),
(r'^[Ss]ince$', 'JUNK'),
(r'^[Ll]icen[cs]e[\.d]?$', 'JUNK'),
(r'^[Ll]icen[cs]ors?$', 'JUNK'),
(r'^under$', 'JUNK'),
(r'^TCK$', 'JUNK'),
(r'^Use$', 'JUNK'),
(r'^[Rr]estrictions?$', 'JUNK'),
(r'^[Ii]ntrodu`?ction$', 'JUNK'),
(r'^[Ii]ncludes?$', 'JUNK'),
(r'^[Vv]oluntary$', 'JUNK'),
(r'^[Cc]ontributions?$', 'JUNK'),
(r'^[Mm]odifications?$', 'JUNK'),
(r'^Company:$', 'JUNK'),
(r'^For$', 'JUNK'),
(r'^File$', 'JUNK'),
(r'^Last$', 'JUNK'),
(r'^[Rr]eleased?$', 'JUNK'),
(r'^[Cc]opyrighting$', 'JUNK'),
(r'^[Aa]uthori.*$', 'JUNK'),
(r'^such$', 'JUNK'),
(r'^[Aa]ssignments?[.,]?$', 'JUNK'),
(r'^[Bb]uild$', 'JUNK'),
(r'^[Ss]tring$', 'JUNK'),
(r'^Implementation-Vendor$', 'JUNK'),
(r'^dnl$', 'JUNK'),
(r'^rem$', 'JUNK'),
(r'^REM$', 'JUNK'),
(r'^Supports$', 'JUNK'),
(r'^Separator$', 'JUNK'),
(r'^\.byte$', 'JUNK'),
(r'^Idata$', 'JUNK'),
(r'^[Cc]ontributed?$', 'JUNK'),
(r'^[Ff]unctions?$', 'JUNK'),
(r'^[Nn]otices?$', 'JUNK'),
(r'^[Mm]ust$', 'JUNK'),
(r'^ISUPPER?$', 'JUNK'),
(r'^ISLOWER$', 'JUNK'),
(r'^AppPublisher$', 'JUNK'),
(r'^DISCLAIMS?$', 'JUNK'),
(r'^SPECIFICALLY$', 'JUNK'),
(r'^IDENTIFICATION$', 'JUNK'),
(r'^WARRANTIE?S?$', 'JUNK'),
(r'^WARRANTS?$', 'JUNK'),
(r'^WARRANTYS?$', 'JUNK'),
(r'^hispagestyle$', 'JUNK'),
(r'^Generic$', 'JUNK'),
(r'^Change$', 'JUNK'),
(r'^Add$', 'JUNK'),
(r'^Generic$', 'JUNK'),
(r'^Average$', 'JUNK'),
(r'^Taken$', 'JUNK'),
(r'^LAWS\.?$', 'JUNK'),
(r'^design$', 'JUNK'),
(r'^Driver$', 'JUNK'),
(r'^[Cc]ontribution\.?', 'JUNK'),
(r'DeclareUnicodeCharacter$', 'JUNK'),
(r'^Language-Team$', 'JUNK'),
(r'^Last-Translator$', 'JUNK'),
(r'^OMAP730$', 'JUNK'),
(r'^Law\.$', 'JUNK'),
(r'^dylid$', 'JUNK'),
(r'^BeOS$', 'JUNK'),
(r'^Generates?$', 'JUNK'),
(r'^Thanks?$', 'JUNK'),
(r'^therein$', 'JUNK'),
# various programming constructs
(r'^var$', 'JUNK'),
(r'^[Tt]his$', 'JUNK'),
(r'^return$', 'JUNK'),
(r'^function$', 'JUNK'),
(r'^thats?$', 'JUNK'),
(r'^xmlns$', 'JUNK'),
(r'^file$', 'JUNK'),
(r'^[Aa]sync$', 'JUNK'),
(r'^Keyspan$', 'JUNK'),
# neither and nor conjunctions and some common licensing words are NOT part
# of a copyright statement
(r'^neither$', 'JUNK'),
(r'^nor$', 'JUNK'),
(r'^providing$', 'JUNK'),
(r'^Execute$', 'JUNK'),
(r'^NOTICE[.,]*$', 'JUNK'),
(r'^[Nn]otice[.,]*$', 'JUNK'),
(r'^passes$', 'JUNK'),
(r'^Should$', 'JUNK'),
(r'^[Ll]icensing\@?$', 'JUNK'),
(r'^Disclaimer$', 'JUNK'),
(r'^LAWS\,?$', 'JUNK'),
(r'^[Ll]aws?,?$', 'JUNK'),
(r'^Some$', 'JUNK'),
(r'^Derived$', 'JUNK'),
(r'^Limitations?$', 'JUNK'),
(r'^Nothing$', 'JUNK'),
(r'^Policy$', 'JUNK'),
(r'^available$', 'JUNK'),
(r'^Recipient\.?$', 'JUNK'),
(r'^LICEN[CS]EES?\.?$', 'JUNK'),
(r'^[Ll]icen[cs]ees?,?$', 'JUNK'),
(r'^Application$', 'JUNK'),
(r'^Receiving$', 'JUNK'),
(r'^Party$', 'JUNK'),
(r'^interfaces$', 'JUNK'),
(r'^owner$', 'JUNK'),
(r'^Sui$', 'JUNK'),
(r'^Generis$', 'JUNK'),
(r'^Conditioned$', 'JUNK'),
(r'^Disclaimer$', 'JUNK'),
(r'^Warranty$', 'JUNK'),
(r'^Represents$', 'JUNK'),
(r'^Sufficient$', 'JUNK'),
(r'^Each$', 'JUNK'),
(r'^Partially$', 'JUNK'),
(r'^Limitation$', 'JUNK'),
(r'^Liability$', 'JUNK'),
(r'^Named$', 'JUNK'),
(r'^Use.$', 'JUNK'),
(r'^EXCEPT$', 'JUNK'),
(r'^OWNER\.?$', 'JUNK'),
(r'^Comments\.?$', 'JUNK'),
(r'^you$', 'JUNK'),
(r'^means$', 'JUNK'),
(r'^information$', 'JUNK'),
(r'^[Aa]lternatively.?$', 'JUNK'),
(r'^[Aa]lternately.?$', 'JUNK'),
(r'^INFRINGEMENT.?$', 'JUNK'),
(r'^Install$', 'JUNK'),
(r'^Updates$', 'JUNK'),
(r'^Record-keeping$', 'JUNK'),
(r'^Privacy$', 'JUNK'),
(r'^within$', 'JUNK'),
# various trailing words that are junk
(r'^Copyleft$', 'JUNK'),
(r'^LegalCopyright$', 'JUNK'),
(r'^Distributed$', 'JUNK'),
(r'^Report$', 'JUNK'),
(r'^Available$', 'JUNK'),
(r'^true$', 'JUNK'),
(r'^false$', 'JUNK'),
(r'^node$', 'JUNK'),
(r'^jshint$', 'JUNK'),
(r'^node\':true$', 'JUNK'),
(r'^node:true$', 'JUNK'),
(r'^this$', 'JUNK'),
(r'^Act,?$', 'JUNK'),
(r'^[Ff]unctionality$', 'JUNK'),
(r'^bgcolor$', 'JUNK'),
(r'^F+$', 'JUNK'),
(r'^Rewrote$', 'JUNK'),
(r'^Much$', 'JUNK'),
(r'^remains?,?$', 'JUNK'),
(r'^earlier$', 'JUNK'),
(r'^is$', 'JUNK'),
(r'^[lL]aws?$', 'JUNK'),
(r'^Insert$', 'JUNK'),
(r'^url$', 'JUNK'),
(r'^[Ss]ee$', 'JUNK'),
(r'^[Pp]ackage\.?$', 'JUNK'),
(r'^Covered$', 'JUNK'),
(r'^date$', 'JUNK'),
(r'^practices$', 'JUNK'),
(r'^[Aa]ny$', 'JUNK'),
(r'^ANY$', 'JUNK'),
(r'^fprintf.*$', 'JUNK'),
(r'^CURDIR$', 'JUNK'),
(r'^Environment/Libraries$', 'JUNK'),
(r'^Environment/Base$', 'JUNK'),
(r'^Violations\.?$', 'JUNK'),
(r'^Owner$', 'JUNK'),
(r'^behalf$', 'JUNK'),
(r'^know-how$', 'JUNK'),
(r'^interfaces?,?$', 'JUNK'),
(r'^than$', 'JUNK'),
(r'^whom$', 'JUNK'),
(r'^are$', 'JUNK'),
(r'^However,?$', 'JUNK'),
(r'^[Cc]ollectively$', 'JUNK'),
(r'^following$', 'JUNK'),
(r'^file\.$', 'JUNK'),
# version variables listed after Copyright variable in FFmpeg
(r'^ExifVersion$', 'JUNK'),
(r'^FlashpixVersion$', 'JUNK'),
(r'^.+ArmsAndLegs$', 'JUNK'),
# junk when HOLDER(S): typically used in disclaimers instead
(r'^HOLDER\(S\)$', 'JUNK'),
# some HTML tags
(r'^width$', 'JUNK'),
# this trigger otherwise "copyright ownership. The ASF" in Apache license headers
(r'^[Oo]wnership\.?$', 'JUNK'),
# exceptions to composed proper namess, mostly debian copyright/control tag-related
# FIXME: may be lowercase instead?
(r'^Title:?$', 'JUNK'),
(r'^Debianized-By:?$', 'JUNK'),
(r'^Upstream-Maintainer:?$', 'JUNK'),
(r'^Content', 'JUNK'),
(r'^Upstream-Author:?$', 'JUNK'),
(r'^Packaged-By:?$', 'JUNK'),
# Windows XP
(r'^Windows$', 'JUNK'),
(r'^XP$', 'JUNK'),
(r'^SP1$', 'JUNK'),
(r'^SP2$', 'JUNK'),
(r'^SP3$', 'JUNK'),
(r'^SP4$', 'JUNK'),
(r'^assembly$', 'JUNK'),
# various junk bits
(r'^example\.com$', 'JUNK'),
(r'^null$', 'JUNK'),
(r'^:Licen[cs]e$', 'JUNK'),
(r'^Agent\.?$', 'JUNK'),
(r'^behalf$', 'JUNK'),
(r'^[aA]nyone$', 'JUNK'),
# when uppercase this is likely part of some SQL statement
(r'^FROM$', 'JUNK'),
(r'^CREATE$', 'JUNK'),
(r'^CURDIR$', 'JUNK'),
# found in sqlite
(r'^\+0$', 'JUNK'),
(r'^ToUpper$', 'JUNK'),
# Java
(r'^.*Servlet,?$', 'JUNK'),
(r'^class$', 'JUNK'),
# C/C++
(r'^template$', 'JUNK'),
(r'^struct$', 'JUNK'),
(r'^typedef$', 'JUNK'),
(r'^type$', 'JUNK'),
(r'^next$', 'JUNK'),
(r'^typename$', 'JUNK'),
(r'^namespace$', 'JUNK'),
(r'^type_of$', 'JUNK'),
(r'^begin$', 'JUNK'),
(r'^end$', 'JUNK'),
# Some mixed case junk
(r'^LastModified$', 'JUNK'),
# Some font names
(r'^Lucida$', 'JUNK'),
# various trailing words that are junk
(r'^CVS$', 'JUNK'),
(r'^EN-IE$', 'JUNK'),
(r'^Info$', 'JUNK'),
(r'^GA$', 'JUNK'),
(r'^unzip$', 'JUNK'),
(r'^EULA', 'JUNK'),
(r'^Terms?[.,]?$', 'JUNK'),
(r'^Non-Assertion$', 'JUNK'),
# this is not Copr.
(r'^Coproduct,?[,\.]?$$', 'JUNK'),
# FIXME: may be these should be NNs?
(r'^CONTRIBUTORS?[,\.]?$', 'JUNK'),
(r'^OTHERS?[,\.]?$', 'JUNK'),
(r'^Contributors?\:[,\.]?$', 'JUNK'),
(r'^Version$', 'JUNK'),
# JUNK from binary
(r'^x1b|1H$', 'JUNK'),
# JUNK as camel case with a single hump such as in "processingInfo"
(r'^[a-z]{3,10}[A-Z][a-z]{3,10}$', 'JUNK'),
(r'^\$?Guid$', 'JUNK'),
(r'^Small$', 'NN'),
############################################################################
# Nouns and proper Nouns
############################################################################
# Various rare bits treated as NAME directly
(r'^FSFE?[\.,]?$', 'NAME'),
(r'^This_file_is_part_of_KDE$', 'NAME'),
# K.K. (a company suffix), needs special handling
(r'^K.K.,?$', 'NAME'),
# MIT is problematic
# With a comma, always CAPS (MIT alone is too error prone to be always tagged as CAPS
(r'^MIT,$', 'CAPS'),
(r'^MIT\.?$', 'MIT'),
# MIT is common enough, but not with a trailing period.
(r'^MIT$', 'NN'),
# ISC is always a company
(r'^MIT$', 'COMP'),
# NOT A CAPS
# [YEAR] W3C® (MIT, ERCIM, Keio, Beihang)."
(r'^YEAR', 'NN'),
# Various NN, exceptions to NNP or CAPS: note that some are open ended and
# do not end with a $
(r'^Activation\.?$', 'NN'),
(r'^Act[\.,]?$', 'NN'),
(r'^Added$', 'NN'),
(r'^Are$', 'NN'),
(r'^Additional$', 'NN'),
(r'^AGPL.?$', 'NN'),
(r'^Agreements?\.?$', 'NN'),
(r'^AIRTM$', 'NN'),
(r'^Android$', 'NN'),
(r'^Any$', 'NN'),
(r'^Appropriate', 'NN'),
(r'^APPROPRIATE', 'NN'),
(r'^Asset$', 'NN'),
(r'^Assignment', 'NN'),
(r'^Atomic$', 'NN'),
(r'^Attribution$', 'NN'),
(r'^[Aa]uthored$', 'NN'),
(r'^Baslerstr\.?$', 'NN'),
(r'^BSD$', 'NN'),
(r'^BUT$', 'NN'),
(r'^But$', 'NN'),
(r'^Cases$', 'NN'),
(r'^Change\.?[lL]og$', 'NN'),
(r'^CHANGElogger$', 'NN'),
(r'^CHANGELOG$', 'NN'),
(r'^CHANGES$', 'NN'),
(r'^Code$', 'NN'),
(r'^Commercial', 'NN'),
(r'^Commons$', 'NN'),
# TODO: Compilation could be JUNK?
(r'^Compilation', 'NN'),
(r'^Contact', 'NN'),
(r'^Contracts?$', 'NN'),
(r'^Convention$', 'NN'),
(r'^Copying', 'NN'),
(r'^COPYING', 'NN'),
(r'^Customer', 'NN'),
(r'^Custom$', 'NN'),
(r'^Data$', 'NN'),
(r'^Date$', 'NN'),
(r'^DATED', 'NN'),
(r'^Delay', 'NN'),
(r'^Derivative', 'NN'),
(r'^DISCLAIMED', 'NN'),
(r'^Docs?$', 'NN'),
(r'^DOCUMENTATION', 'NN'),
(r'^DOM$', 'NN'),
(r'^Do$', 'NN'),
(r'^DoubleClick$', 'NN'),
(r'^Each$', 'NN'),
(r'^Education$', 'NN'),
(r'^E-?[Mm]ail\:?$', 'NN'),
(r'^END$', 'NN'),
(r'^Entity$', 'NN'),
(r'^Example', 'NN'),
(r'^Except', 'NN'),
(r'^Experimental$', 'NN'),
(r'^F2Wku$', 'NN'),
(r'^False$', 'NN'),
(r'^FALSE$', 'NN'),
(r'^FAQ', 'NN'),
(r'^Foreign', 'NN'),
(r'^From$', 'NN'),
(r'^Further', 'NN'),
(r'^Gaim$', 'NN'),
(r'^Generated', 'NN'),
(r'^Glib$', 'NN'),
(r'^GPLd', 'NN'),
(r'^GPL\'d', 'NN'),
(r'^Gnome$', 'NN'),
(r'^GnuPG$', 'NN'),
(r'^Government.', 'NNP'),
(r'^Government', 'NN'),
(r'^Grants?\.?,?$', 'NN'),
(r'^Header', 'NN'),
(r'^HylaFAX$', 'NN'),
(r'^IA64$', 'NN'),
(r'^IDEA$', 'NN'),
(r'^Id$', 'NN'),
(r'^IDENTIFICATION?\.?$', 'NN'),
(r'^IEEE$', 'NN'),
(r'^If$', 'NN'),
(r'^[Ii]ntltool$', 'NN'),
(r'^Immediately$', 'NN'),
(r'^Implementation', 'NN'),
(r'^Improvement', 'NN'),
(r'^INCLUDING', 'NN'),
(r'^Indemnification', 'NN'),
(r'^Indemnified', 'NN'),
(r'^Information', 'NN'),
(r'^In$', 'NN'),
(r'^Intellij$', 'NN'),
(r'^ISC-LICENSE$', 'NN'),
(r'^IS$', 'NN'),
(r'^It$', 'NN'),
(r'^Java$', 'NN'),
(r'^JavaScript$', 'NN'),
(r'^JMagnetic$', 'NN'),
(r'^Joint$', 'NN'),
(r'^Jsunittest$', 'NN'),
(r'^List$', 'NN'),
(r'^Set$', 'NN'),
(r'^Last$', 'NN'),
(r'^LAW', 'NN'),
(r'^Legal$', 'NN'),
(r'^LegalTrademarks$', 'NN'),
(r'^Library$', 'NN'),
(r'^Libraries$', 'NN'),
(r'^Licen[cs]e', 'NN'),
(r'^License-Alias\:?$', 'NN'),
(r'^Linux$', 'NN'),
(r'^Locker$', 'NN'),
(r'^Log$', 'NN'),
(r'^Logos?$', 'NN'),
(r'^Luxi$', 'NN'),
(r'^Mac$', 'NN'),
(r'^Manager$', 'NN'),
(r'^Material$', 'NN'),
(r'^Mode$', 'NN'),
(r'^Modified$', 'NN'),
(r'^Mouse$', 'NN'),
(r'^Module$', 'NN'),
(r'^Natural$', 'NN'),
(r'^New$', 'NN'),
(r'^NEWS$', 'NN'),
(r'^Neither$', 'NN'),
(r'^Norwegian$', 'NN'),
(r'^Notes?$', 'NN'),
(r'^NOTICE', 'NN'),
(r'^NOT$', 'NN'),
(r'^NULL$', 'NN'),
(r'^Objects$', 'NN'),
(r'^Open$', 'NN'),
(r'^Operating$', 'NN'),
(r'^OriginalFilename$', 'NN'),
(r'^Original$', 'NN'),
(r'^OR$', 'NN'),
(r'^OWNER', 'NN'),
(r'^Package$', 'NN'),
(r'^PACKAGE$', 'NN'),
(r'^Packaging$', 'NN'),
(r'^Patent', 'NN'),
(r'^Pentium$', 'NN'),
(r'^[Pp]ermission', 'NN'),
(r'^PERMISSIONS?', 'NN'),
(r'^PGP$', 'NN'),
(r'^Phrase', 'NN'),
(r'^Plugin', 'NN'),
(r'^Policy', 'NN'),
(r'^POSIX$', 'NN'),
(r'^Possible', 'NN'),
(r'^Powered$', 'NN'),
(r'^Predefined$', 'NN'),
(r'^Products?\.?$', 'NN'),
(r'^PROFESSIONAL?\.?$', 'NN'),
(r'^Programming$', 'NN'),
(r'^PROOF', 'NN'),
(r'^PROVIDED$', 'NN'),
(r'^Public\.?$', 'NN'),
(r'^Qualified$', 'NN'),
(r'^RCSfile$', 'NN'),
(r'^README$', 'NN'),
(r'^Read$', 'NN'),
(r'^RECURSIVE$', 'NN'),
(r'^Redistribution', 'NN'),
(r'^References', 'NN'),
(r'^Related$', 'NN'),
(r'^Release', 'NN'),
(r'^Revision', 'NN'),
(r'^RIGHT', 'NN'),
(r'^[Rr]espective', 'NN'),
(r'^SAX$', 'NN'),
(r'^Section', 'NN'),
(r'^Send$', 'NN'),
(r'^Separa', 'NN'),
(r'^Service$', 'NN'),
(r'^Several$', 'NN'),
(r'^SIGN$', 'NN'),
(r'^Site\.?$', 'NN'),
(r'^Statement', 'NN'),
(r'^software$', 'NN'),
(r'^SOFTWARE$', 'NN'),
(r'^So$', 'NN'),
(r'^Sort$', 'NN'),
(r'^Source$', 'NN'),
(r'^Standard$', 'NN'),
(r'^Std$', 'NN'),
(r'^Supplicant', 'NN'),
(r'^Support', 'NN'),
(r'^TagSoup$', 'NN'),
(r'^Target$', 'NN'),
(r'^Technical$', 'NN'),
(r'^Termination$', 'NN'),
(r'^The$', 'NN'),
(r'^THE', 'NN'),
(r'^These$', 'NN'),
(r'^[tT]here$', 'NN'),
(r'^This$', 'NN'),
(r'^THIS$', 'NN'),
(r'^Those$', 'NN'),
(r'^Timer', 'NN'),
(r'^TODO$', 'NN'),
(r'^Tool.?$', 'NN'),
(r'^Trademarks?$', 'NN'),
(r'^True$', 'NN'),
(r'^TRUE$', 'NN'),
(r'^[Tt]ext$', 'NN'),
(r'^Unicode$', 'NN'),
(r'^Updated', 'NN'),
(r'^URL$', 'NN'),
(r'^Users?$', 'NN'),
(r'^VALUE$', 'NN'),
(r'^Various', 'NN'),
(r'^Vendor', 'NN'),
(r'^VIEW$', 'NN'),
(r'^Visit', 'NN'),
(r'^Website', 'NN'),
(r'^Wheel$', 'NN'),
(r'^Win32$', 'NN'),
(r'^Work', 'NN'),
(r'^WPA$', 'NN'),
(r'^Xalan$', 'NN'),
(r'^YOUR', 'NN'),
(r'^Your', 'NN'),
(r'^DateTime', 'NN'),
(r'^Create$', 'NN'),
(r'^Engine\.$', 'NN'),
(r'^While$', 'NN'),
(r'^Review', 'NN'),
(r'^Help', 'NN'),
(r'^Web', 'NN'),
# alone this is not enough for an NNP
(r'^Free$', 'NN'),
# Hours/Date/Day/Month text references
(r'^am$', 'NN'),
(r'^pm$', 'NN'),
(r'^AM$', 'NN'),
(r'^PM$', 'NN'),
(r'^January$', 'NN'),
(r'^February$', 'NN'),
(r'^March$', 'NN'),
(r'^April$', 'NN'),
(r'^May$', 'NN'),
(r'^June$', 'NN'),
(r'^July$', 'NN'),
(r'^August$', 'NN'),
(r'^September$', 'NN'),
(r'^October$', 'NN'),
(r'^November$', 'NN'),
(r'^December$', 'NN'),
(r'^Name[\.,]?$', 'NN'),
(r'^Co-Author[\.,]?$', 'NN'),
(r'^Author\'s$', 'NN'),
(r'^Co-Author\'s$', 'NN'),
# the Universal Copyright Convention (1971 Paris text).
(r'^Convention[\.,]?$', 'NN'),
(r'^Paris[\.,]?$', 'NN'),
# we do not include Jan and Jun that are common enough first names
(r'^(Feb|Mar|Apr|May|Jul|Aug|Sep|Oct|Nov|Dec)$', 'NN'),
(r'^(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)$', 'NN'),
(r'^(Mon|Tue|Wed|Thu|Fri|Sat|Sun)$', 'NN'),
############################################################################
# Proper Nouns
############################################################################
# Title case word with a trailing parens is an NNP
(r'^[A-Z][a-z]{3,}\)$', 'NNP'),
# names with a slash that are NNP
# Research/Unidata, LCS/Telegraphics.
(r'^('
r'[A-Z]'
r'([a-z]|[A-Z])+'
r'/'
r'[A-Z][a-z]+[\.,]?'
r')$', 'NNP'),
# communications
(r'communications', 'NNP'),
# Places: TODO: these are NOT NNPs but we treat them as such for now
(r'^\(?'
r'(?:Cambridge|Stockholm|Davis|Sweden[\)\.]?'
r'|Massachusetts'
r'|Oregon'
r'|California'
r'|Norway'
r'|UK'
r'|Berlin'
r'|CONCORD'
r'|Manchester'
r'|MASSACHUSETTS'
r'|Finland'
r'|Espoo'
r'|Munich'
r'|Germany'
r'|Italy'
r'|Spain'
r'|Europe'
r'|Lafayette'
r'|Indiana'
r')[\),\.]?$', 'NNP'),
# Misc corner case combos ?(mixed, NN or CAPS) that are NNP
(r'^Software,\',$', 'NNP'),
(r'\(Royal$', 'NNP'),
(r'PARADIGM$', 'NNP'),
(r'vFeed$', 'NNP'),
(r'nexB$', 'NNP'),
(r'UserTesting$', 'NNP'),
(r'D\.T\.Shield\.?$', 'NNP'),
(r'Antill\',$', 'NNP'),
# Corner cases of lowercased NNPs
(r'^suzuki$', 'NNP'),
(r'toshiya\.?$', 'NNP'),
(r'leethomason$', 'NNP'),
(r'finney$', 'NNP'),
(r'sean$', 'NNP'),
(r'chris$', 'NNP'),
(r'ulrich$', 'NNP'),
(r'wadim$', 'NNP'),
(r'dziedzic$', 'NNP'),
(r'okunishinishi$', 'NNP'),
(r'yiminghe$', 'NNP'),
(r'daniel$', 'NNP'),
(r'wirtz$', 'NNP'),
(r'vonautomatisch$', 'NNP'),
(r'werkstaetten\.?$', 'NNP'),
(r'werken$', 'NNP'),
(r'various\.?$', 'NNP'),
# treat Attributable as proper noun as it is seen in Author tags such as in:
# @author not attributable
(r'^[Aa]ttributable$', 'NNP'),
# rarer caps
# EPFL-LRC/ICA
(r'^[A-Z]{3,6}-[A-Z]{3,6}/[A-Z]{3,6}', 'NNP'),
############################################################################
# Named entities: companies, groups, universities, etc
############################################################################
# AT&T (the company), needs special handling
(r'^AT\&T[\.,]?$', 'COMP'),
# company suffix name with suffix Tech.,ltd
(r'^[A-Z][a-z]+[\.,]+(LTD|LTd|LtD|Ltd|ltd|lTD|lTd|ltD).?,?$', 'COMP'),
# company suffix
(r'^[Ii]nc[.]?[,\.]?\)?$', 'COMP'),
(r'^Incorporated[,\.]?\)?$', 'COMP'),
# ,Inc. suffix without spaces is directly a company name
(r'^.+,Inc\.$', 'COMPANY'),
(r'^[Cc]ompany[,\.]?\)?$', 'COMP'),
(r'^Limited[,\.]??$', 'COMP'),
(r'^LIMITED[,\.]??$', 'COMP'),
# Caps company suffixes
(r'^INC[\.,\)]*$', 'COMP'),
(r'^INCORPORATED[\.,\)]*$', 'COMP'),
(r'^CORP[\.,\)]*$', 'COMP'),
(r'^CORPORATION[\.,\)]*$', 'COMP'),
(r'^FOUNDATION[\.,\)]*$', 'COMP'),
(r'^GROUP[\.,\)]*$', 'COMP'),
(r'^COMPANY[\.,\)]*$', 'COMP'),
(r'^\(tm\)[\.,]?$', 'COMP'),
(r'^[Ff]orum[\.,\)]*', 'COMP'),
# company suffix
(r'^[Cc]orp[\.,\)]*$', 'COMP'),
(r'^[Cc]orporation[\.,\)]*$', 'COMP'),
(r'^[Cc][oO][\.,\)]*$', 'COMP'),
(r'^[Cc]orporations?[\.,\)]*$', 'COMP'),
(r'^[Cc]onsortium[\.,\)]*$', 'COMP'),
(r'^[Ff]oundation[\.,\)]*$', 'COMP'),
(r'^[Aa]lliance[\.,\)]*$', 'COMP'),
(r'^Working$', 'COMP'),
(r'^[Gg]roup[\.,\)]*$', 'COMP'),
(r'^[Tt]echnolog(y|ies)[\.,\)]*$', 'COMP'),
(r'^[Cc]ommunit(y|ies)[\.,\)]*$', 'COMP'),
(r'^[Mm]icrosystems[\.,\)]*$', 'COMP'),
(r'^[Pp]rojects?[\.,\)]*,?$', 'COMP'),
(r'^[Tt]eams?[\.,\)]*$', 'COMP'),
(r'^[Tt]ech[\.,\)]*$', 'COMP'),
(r"^Limited'?[\.,\)]*$", 'COMP'),
# company suffix : LLC, LTD, LLP followed by one extra char
(r'^[Ll][Tt][Dd]\.?,?$', 'COMP'),
(r'^[Ll]\.?[Ll]\.?[CcPp]\.?,?$', 'COMP'),
(r'^L\.P\.?$', 'COMP'),
(r'^[Ss]ubsidiary$', 'COMP'),
(r'^[Ss]ubsidiaries\.?$', 'COMP'),
(r'^[Ss]ubsidiary\(\-ies\)\.?$', 'COMP'),
# company suffix : SA, SAS, AG, AB, AS, CO, labs followed by a dot
(r'^(S\.?A\.?S?|Sas|sas|A\/S|AG,?|AB|Labs?|[Cc][Oo]|Research|Center|INRIA|Societe)\.?$', 'COMP'),
# French SARL
(r'^(SARL|S\.A\.R\.L\.)[\.,\)]*$', 'COMP'),
# company suffix : AS: this is frequent beyond Norway.
(r'^AS.$', 'COMP'),
(r'^AS', 'CAPS'),
# (german) company suffix
(r'^[Gg][Mm][Bb][Hh].?$', 'COMP'),
# ( e.V. german) company suffix
(r'^[eV]\.[vV]\.?$', 'COMP'),
# (italian) company suffix
(r'^[sS]\.[pP]\.[aA]\.?$', 'COMP'),
# sweedish company suffix : ASA followed by a dot
(r'^ASA.?$', 'COMP'),
# czech company suffix: JetBrains s.r.o.
(r'^s\.r\.o\.?$', 'COMP'),
# (Laboratory) company suffix
(r'^(Labs?|Laboratory|Laboratories|Laboratoire)\.?,?$', 'COMP'),
# (dutch and belgian) company suffix
(r'^[Bb]\.?[Vv]\.?|BVBA$', 'COMP'),
# university
(r'^\(?[Uu]niv(?:[.]|ersit(?:y|e|at?|ad?))[\.,\)]*$', 'UNI'),
(r'^UNIVERSITY$', 'UNI'),
(r'^College$', 'UNI'),
# Academia/ie
(r'^[Ac]cademi[ae]s?$', 'UNI'),
# Academia/ie
(r'^[Ac]cademy[\.,\)]*$', 'UNI'),
# institutes
(r'INSTITUTE', 'COMP'),
(r'^\(?[Ii]nstitut(s|o|os|e|es|et|a|at|as|u|i)?\)?$', 'COMP'),
# Facility
(r'Facility', 'COMP'),
(r'Tecnologia', 'COMP'),
# (danish) company suffix
(r'^ApS|A\/S|IVS\.?,?$', 'COMP'),
# (finnsih) company suffix
(r'^Abp\.?,?$', 'COMP'),
# "holders" is considered Special
(r'^([Hh]olders?|HOLDERS?).?$', 'HOLDER'),
# affiliates or "and its affiliate(s)."
(r'^[Aa]ffiliate(s|\(s\))?\.?$', 'NNP'),
# OU as in Org unit, found in some certficates
(r'^OU$', 'OU'),
############################################################################
# AUTHORS
############################################################################
# "authors" or "contributors" is interesting, and so a tag of its own
(r'^[Aa]uthor$', 'AUTH'),
(r'^[Aa]uthors?\.$', 'AUTHDOT'),
(r'^Authors$', 'AUTHS'),
(r'^authors|author\'$', 'AUTHS'),
(r'^[Aa]uthor\(s\)\.?$', 'AUTHS'),
(r'^@author$', 'AUTH'),
(r'^[Cc]ontribut(ors|ing)\.?$', 'CONTRIBUTORS'),
(r'^contributors,$', 'CONTRIBUTORS'),
(r'^Contributor[,.]?$', 'NN'),
(r'^Licensor[,.]?$', 'NN'),
# same for developed, etc...
(r'^[Cc]oded$', 'AUTH2'),
(r'^[Rr]ecoded$', 'AUTH2'),
(r'^[Mm]odified$', 'AUTH2'),
(r'^[Cc]reated$', 'AUTH2'),
(r'^[Ww]ritten$', 'AUTH2'),
(r'^[Mm]aintained$', 'AUTH2'),
(r'^[Dd]eveloped$', 'AUTH2'),
# commiters is interesting, and so a tag of its own
(r'[Cc]ommitters\.??', 'COMMIT'),
# same for maintainers, developers, admins.
(r'^[Aa]dmins?$', 'MAINT'),
(r'^[Dd]evelopers?\.?$', 'MAINT'),
(r'^[Mm]aintainers?\.?$', 'MAINT'),
(r'^co-maintainers?$', 'MAINT'),
############################################################################
# Conjunctions and related
############################################################################
(r'^OF$', 'OF'),
(r'^of$', 'OF'),
(r'^Of$', 'OF'),
# DE/de/di: OF:
# FIXME this conflicts with VAN??
(r'^De$', 'OF'),
(r'^DE$', 'OF'),
(r'^Di$', 'OF'),
(r'^di$', 'OF'),
# in
(r'^in$', 'IN'),
(r'^en$', 'IN'),
# by
(r'^by$', 'BY'),
(r'^BY$', 'BY'),
(r'^By$', 'BY'),
# conjunction: and
(r'^and$', 'CC'),
(r'^And$', 'CC'),
(r'^AND$', 'CC'),
(r'^and/or$', 'CC'),
(r'^&$', 'CC'),
(r'^at$', 'CC'),
(r'^et$', 'CC'),
(r'^Et$', 'CC'),
(r'^ET$', 'CC'),
(r'^Und$', 'CC'),
(r'^und$', 'CC'),
# solo comma as a conjunction
(r'^,$', 'CC'),
# ie. in things like "Copyright (c) 2012 John Li and others"
# or et.al.
(r'^[Oo]ther?s[\.,]?$', 'OTH'),
(r'^et\. ?al[\.,]?$', 'OTH'),
# in year ranges: dash, or 'to': "1990-1995", "1990/1995" or "1990 to 1995"
(r'^-$', 'DASH'),
(r'^/$', 'DASH'),
(r'^to$', 'TO'),
# Portions copyright .... are worth keeping
(r'[Pp]ortions?|[Pp]arts?', 'PORTIONS'),
# in dutch/german names, like Marco van Basten, or Klemens von Metternich
# and Spanish/French Da Siva and De Gaulle
(r'^(([Vv][ao]n)|[Dd][aeu])$', 'VAN'),
(r'^van$', 'VAN'),
(r'^Van$', 'VAN'),
(r'^von$', 'VAN'),
(r'^Von$', 'VAN'),
(r'^Da$', 'VAN'),
(r'^da$', 'VAN'),
(r'^De$', 'VAN'),
(r'^de$', 'VAN'),
(r'^Du$', 'VAN'),
(r'^du$', 'VAN'),
############################################################################
# Years and Year ranges
############################################################################
# rare cases of trailing + signon years
(r'^20[0-1][0-9]\+$', 'YR-PLUS'),
# year or year ranges
# plain year with various leading and trailing punct
# dual or multi years 1994/1995. or 1994-1995
# 1987,88,89,90,91,92,93,94,95,96,98,99,2000,2001,2002,2003,2004,2006
# multi years
# dual years with second part abbreviated
# 1994/95. or 2002-04 or 1991-9
(r'^' + _PUNCT + _YEAR_OR_YEAR_YEAR_WITH_PUNCT + '+' +
'(' +
_YEAR_OR_YEAR_YEAR_WITH_PUNCT +
'|' +
_YEAR_THEN_YEAR_SHORT +
')*' + '$', 'YR'),
(r'^' + _PUNCT + _YEAR_OR_YEAR_YEAR_WITH_PUNCT + '+' +
'(' +
_YEAR_OR_YEAR_YEAR_WITH_PUNCT +
'|' +
_YEAR_THEN_YEAR_SHORT +
'|' +
_YEAR_SHORT_PUNCT +
')*' + '$', 'YR'),
(r'^(' + _YEAR_YEAR + ')+$', 'YR'),
# 88, 93, 94, 95, 96: this is a pattern mostly used in FSF copyrights
(r'^[8-9][0-9],$', 'YR'),
# 80 to 99: this is a pattern mostly used in FSF copyrights
(r'^[8-9][0-9]$', 'BARE-YR'),
# weird year
(r'today.year', 'YR'),
(r'^\$?LastChangedDate\$?$', 'YR'),
# Copyright templates in W3C documents
(r'^\$?date-of-software$', 'YR'),
(r'^\$?date-of-document$', 'YR'),
# cardinal numbers
(r'^-?[0-9]+(.[0-9]+)?.?$', 'CD'),
############################################################################
# All caps and proper nouns
############################################################################
# composed proper nouns, ie. Jean-Claude or ST-Microelectronics
# FIXME: what about a variant with spaces around the dash?
(r'^[A-Z][a-zA-Z]*\s?[\-]\s?[A-Z]?[a-zA-Z]+.?$', 'NNP'),
# Countries abbreviations
(r'^U\.S\.A\.?$', 'NNP'),
# Dotted ALL CAPS initials
(r'^([A-Z]\.){1,3}$', 'NNP'),
# misc corner cases such LaTeX3 Project and other
(r'^LaTeX3$', 'NNP'),
(r'^Meridian\'93$', 'NNP'),
(r'^Xiph.Org$', 'NNP'),
(r'^iClick,?$', 'NNP'),
# proper nouns with digits
(r'^([A-Z][a-z0-9]+){1,2}\.?$', 'NNP'),
# saxon genitive, ie. Philippe's
(r"^[A-Z][a-z]+[']s$", 'NNP'),
# Uppercase dotted name, ie. P. or DMTF.
(r'^([A-Z]+\.)+$', 'PN'),
# proper noun with some separator and trailing comma
(r'^[A-Z]+[.][A-Z][a-z]+[,]?$', 'NNP'),
# proper noun with apostrophe ': D'Orleans, D'Arcy, T'so, Ts'o
(r"^[A-Z][a-z]?['][A-Z]?[a-z]+[,.]?$", 'NNP'),
# proper noun with apostrophe ': d'Itri
(r"^[a-z]['][A-Z]?[a-z]+[,\.]?$", 'NNP'),
# all CAPS word, at least 1 char long such as MIT, including an optional trailing comma or dot
(r'^[A-Z0-9]+[,]?$', 'CAPS'),
# all caps word 3 chars and more, enclosed in parens
(r'^\([A-Z0-9]{2,}\)$', 'CAPS'),
# all CAPS word, all letters including an optional trailing single quote
(r"^[A-Z]{2,}\'?$", 'CAPS'),
# proper noun: first CAP, including optional trailing comma
# note: this also captures a bare comma as an NNP ... this is a bug
(r'^([A-Z][a-zA-Z0-9]+){,2}\.?,?$', 'NNP'),
############################################################################
# URLS and emails
############################################################################
# email start-at-end: <sebastian.classen at freenet.ag>: <EMAIL_START> <AT> <EMAIL_END>
(r'^<([a-zA-Z]+[a-zA-Z\.]){2,5}$', 'EMAIL_START'),
(r'^[a-zA-Z\.]{2,5}>$', 'EMAIL_END'),
# a .sh shell scripts is NOT an email.
(r'^.*\.sh\.?$', 'JUNK'),
# email eventually in parens or brackets with some trailing punct.
(r'^[\<\(]?[a-zA-Z0-9]+[a-zA-Z0-9\+_\-\.\%]*(@|at)[a-zA-Z0-9][a-zA-Z0-9\+_\-\.\%]+\.[a-zA-Z]{2,5}?[\>\)\.\,]*$', 'EMAIL'),
# URLS such as <(http://fedorahosted.org/lohit)> or ()
(r'[<\(]https?:.*[>\)]', 'URL'),
# URLS such as ibm.com without a scheme
(r'\s?[a-z0-9A-Z\-\.\_]+\.([Cc][Oo][Mm]|[Nn][Ee][Tt]|[Oo][Rr][Gg]|us|mil|io|edu|co\.[a-z][a-z]|eu|ch|fr|de|be|se|nl|au|biz)\s?\.?$', 'URL2'),
# TODO: add more extensions: there are so main TLD these days!
# URL wrapped in () or <>
(r'[\(<]+\s?[a-z0-9A-Z\-\.\_]+\.(com|net|org|us|mil|io|edu|co\.[a-z][a-z]|eu|ch|fr|jp|de|be|se|nl|au|biz)\s?[\.\)>]+$', 'URL'),
(r'<?a?.(href)?.\(?[a-z0-9A-Z\-\.\_]+\.(com|net|org|us|mil|io|edu|co\.[a-z][a-z]|eu|ch|fr|jp|de|be|se|nl|au|biz)[\.\)>]?$', 'URL'),
# derived from regex in cluecode.finder
(r'<?a?.(href)?.('
r'(?:http|ftp|sftp)s?://[^\s<>\[\]"]+'
r'|(?:www|ftp)\.[^\s<>\[\]"]+'
r')\.?>?', 'URL'),
(r'^\(?<?https?://[a-zA-Z0-9_\-]+(\.([a-zA-Z0-9_\-])+)+.?\)?>?$', 'URL'),
# URLS with trailing/ such as http://fedorahosted.org/lohit/
# URLS with leading( such as (http://qbnz.com/highlighter/
(r'\(?https?:.*/', 'URL'),
############################################################################
# Misc
############################################################################
# .\" is not a noun
(r'^\.\\\?"?$', 'JUNK'),
# Mixed cap nouns (rare) LeGrande
(r'^[A-Z][a-z]+[A-Z][a-z]+[\.\,]?$', 'MIXEDCAP'),
# Code variable names including snake case
(r'^.*(_.*)+$', 'JUNK'),
# !$?
(r'^\!\$\?$', 'JUNK'),
# things composed only of non-word letters (e.g. junk punctuations)
# but keeping _ ? and () as parts of words
(r'^[^\w\?()]{2,10}$', 'JUNK'),
############################################################################
# catch all other as Nouns
############################################################################
# nouns (default)
(r'.+', 'NN'),
]
# Comments in the Grammar are lines that start with #
# End of line commenst are rules descriptions.
# One rule per line.
grammar = """
#######################################
# YEARS
#######################################
YR-RANGE: {<YR>+ <CC>+ <YR>} #20
YR-RANGE: {<YR> <DASH|TO>* <YR|BARE-YR>+} #30
YR-RANGE: {<CD|BARE-YR>? <YR> <BARE-YR>?} #40
YR-RANGE: {<YR>+ <BARE-YR>? } #50
YR-AND: {<CC>? <YR>+ <CC>+ <YR>} #60
YR-RANGE: {<YR-AND>+} #70|
YR-RANGE: {<YR-RANGE>+ <DASH|TO> <YR-RANGE>+} #71
YR-RANGE: {<YR-RANGE>+ <DASH>?} #72
CD: {<BARE-YR>} #bareyear
#######################################
# All/No/Some Rights Reserved
#######################################
# All/No/Some Rights Reserved OR All Rights Are Reserved
ALLRIGHTRESERVED: {<NNP|NN|CAPS> <RIGHT> <NNP|NN|CAPS>? <RESERVED>} #allrightsreserved
#######################################
# COMPOSITE emails
#######################################
EMAIL: {<EMAIL_START> <CC> <NN>* <EMAIL_END>} # composite_email
#######################################
# NAMES and COMPANIES
#######################################
# two CC such as ", and" are treated as a single CC
CC: {<CC><CC>} #73
NAME: {<NAME><NNP>} #75
NAME: {<NN|NNP> <CC> <URL|URL2>} #80
# the Tor Project, Inc.
COMP: {<COMP> <COMP>+} #81
# Laboratory for Computer Science Research Computing Facility
COMPANY: {<COMP> <NN> <NNP> <NNP> <COMP> <NNP> <COMP>} #83
COMPANY: {<COMP> <NN> <NNP> <NNP> <COMP>} #82
# E. I. du Pont de Nemours and Company
COMPANY: {<NNP> <NNP> <VAN> <NNP> <OF> <NNP> <CC> <COMP>} #1010
# Robert A. van Engelen OR NetGroup, Politecnico di Torino (Italy)
NAME: {<NNP>+ <VAN|OF> <NNP>+} #88
NAME: {<NNP> <VAN|OF> <NN*> <NNP>} #90
NAME: {<NNP> <PN> <VAN> <NNP>} #100
# by the netfilter coreteam <coreteam@netfilter.org>
NAME: {<BY> <NN>+ <EMAIL>} #110
# Kaleb S. KEITHLEY
NAME: {<NNP> <PN> <CAPS>} #120
# Trolltech AS, Norway.
NAME: {<NNP> <CAPS> <NNP>} #121
# BY GEORGE J. CARRETTE
NAME: {<BY> <CAPS> <PN> <CAPS>} #85
DASHCAPS: {<DASH> <CAPS>}
# INRIA - CIRAD - INRA
COMPANY: {<COMP> <DASHCAPS>+} #1280
# Project Admins leethomason
COMPANY: {<COMP> <MAINT> <NNP>+} #1281
# the Regents of the University of California
COMPANY: {<BY>? <NN> <NNP> <OF> <NN> <UNI> <OF> <COMPANY|NAME|NAME-EMAIL><COMP>?} #130
# Free Software Foundation, Inc.
COMPANY: {<NN|NNP> <NNP> <COMP> <COMP>} #135
# Mediatrix Telecom, inc. <ericb@mediatrix.com>
COMPANY: {<NNP>+ <COMP> <EMAIL>} #136
# Corporation/COMP for/NN National/NNP Research/COMP Initiatives/NNP
COMPANY: {<COMP> <NN> <NNP> <COMP> <NNP>} #140
# Sun Microsystems, Inc. Mountain View
COMPANY: {<COMP> <COMP> <NNP><NNP>} #144
# AT&T Laboratories, Cambridge
COMPANY: {<COMP> <COMP> <NNP>} #145
# rare "Software in the public interest, Inc."
COMPANY: {<COMP> <CD> <COMP>} #170
COMPANY: {<NNP> <IN><NN> <NNP> <NNP>+<COMP>?} #180
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
COMPANY: {<NNP> <NNP> <CC> <NNP> <COMP> <NNP> <CAPS>}
COMPANY: {<NNP> <CC> <NNP> <COMP> <NNP>*} #200
# Android Open Source Project, 3Dfx Interactive, Inc.
COMPANY: {<NN>? <NN> <NNP> <COMP>} #205
NAME: {<NNP> <NNP> <COMP> <CONTRIBUTORS> <URL|URL2>} #206
# Thai Open Source Software Center Ltd
# NNP NN NNP NNP COMP COMP')
COMPANY: {<NNP> <NN> <NNP> <NNP> <COMP>+} #207
# Massachusetts Institute of Technology
COMPANY: {<NNP> <COMP|COMPANY> <OF> <NNP>+} #208
COMPANY: {<NNP|CAPS>+ <COMP|COMPANY>+} #210
# University of Southern California, Information Sciences Institute (ISI)
COMPANY: {<UNI> <OF> <COMPANY> <CAPS>?} #211
COMPANY: {<UNI|NNP> <VAN|OF> <NNP>+ <UNI>?} #220
COMPANY: {<NNP>+ <UNI>} #230
COMPANY: {<UNI> <OF> <NN|NNP>} #240
COMPANY: {<COMPANY> <CC> <COMPANY>} #250
# University of Southern California, Information Sciences Institute (ISI)
COMPANY: {<COMPANY> <COMPANY> <CAPS>} #251
# University of Technology
COMPANY: {<UNI> <OF> <COMP|COMPANY>} #252
# GNOME i18n Project for Vietnamese
COMPANY: {<CAPS> <NN> <COMP> <NN> <NNP>} #253
COMPANY: {<CAPS> <NN> <COMP>} #255
# Project contributors
COMPANY: {<COMP> <CONTRIBUTORS>} #256
COMPANY: {<COMP>+} #260
# Nokia Corporation and/or its subsidiary(-ies)
COMPANY: {<COMPANY> <CC> <NN> <COMPANY>} #265
COMPANY: {<COMPANY> <CC> <NNP>+} #270
# AIRVENT SAM s.p.a - RIMINI(ITALY)
COMPANY: {<COMPANY> <DASH> <NNP|NN> <EMAIL>?} #290
# Typical names
#John Robert LoVerso
NAME: {<NNP> <NNP> <MIXEDCAP>} #340
# Kaleb S. KEITHLEY
NAME: {<NNP> <NNP> <CAPS>} #345
# Copyright (c) 2006, Industrial Light & Magic
NAME: {<NNP> <NNP> <CC> <NNP>+} #346
# NAME-YEAR starts or ends with a YEAR range
NAME-YEAR: {<YR-RANGE> <NNP> <NNP>+} #350
# Academy of Motion Picture Arts
NAME: {<NNP|PN>+ <NNP>+} #351
# Joe DASILVA
NAME: {<NNP> <CAPS>} #352
# <s> Gangadharan N </s>
NAME: {<NNP> <PN>+} #353
NAME: {<NNP> <NN|NNP> <EMAIL>} #390
NAME: {<NNP> <PN|VAN>? <PN|VAN>? <NNP>} #400
NAME: {<NNP> <NN> <NNP>} #410
NAME: {<NNP> <COMMIT>} #420
# the LGPL VGABios developers Team
NAME: {<NN>? <NNP> <MAINT> <COMP>} #440
# Debian Qt/KDE Maintainers
NAME: {<NNP> <NN>? <MAINT>} #460
NAME: {<NN>? <NNP> <CC> <NAME>} #480
NAME: {<NN>? <NNP> <OF> <NN>? <NNP> <NNP>?} #490
# Academy of Motion Picture Arts and Sciences
NAME: {<NNP|PN>+ <CC>+ <NNP>+} #350again
NAME: {<NAME> <CC> <NAME>} #500
COMPANY: {<NNP> <IN> <NN>? <COMPANY>} #510
# and Josh MacDonald.
NAME: {<CC> <NNP> <MIXEDCAP>} #480
NAME: {<NAME> <UNI>} #483
# Kungliga Tekniska Hogskolan (Royal Institute of Technology, Stockholm, Sweden)
COMPANY: {<COMPANY> <OF> <COMPANY> <NAME> } #529
# Instituto Nokia de Tecnologia
COMPANY: {<COMPANY> <NNP> <OF> <COMPANY>} # 5391
# Laboratoire MASI - Institut Blaise Pascal
COMPANY: {<COMPANY> <CAPS> <DASH> <COMPANY> <NAME>} #5292
# Nara Institute of Science and Technology.
COMPANY: {<COMPANY> <OF> <NNP> <CC> <COMPANY> } #5293
# Instituto Nokia de Tecnologia - INdT
COMPANY: {<COMPANY> <NNP> <VAN> <COMPANY>} #52934
# Name 2 has a trailing email
NAME-EMAIL: {<NAME> <EMAIL>} #530
# Project Mayo.
NAME-YEAR: {<YR-RANGE> <NAME-EMAIL|COMPANY>+ <NNP>?} #535
NAME-YEAR: {<YR-RANGE> <NAME-EMAIL|COMPANY>+ <CC> <YR-RANGE>} #540
NAME: {<NAME|NAME-EMAIL>+ <OF> <NNP> <OF> <NN>? <COMPANY>} #550
NAME: {<NAME|NAME-EMAIL>+ <CC|OF>? <NAME|NAME-EMAIL|COMPANY>} #560
NAME: {<NNP><NNP>} #5611
# strip Software from Copyright (c) Ian Darwin 1995. Software
NAME-YEAR: {<NAME>+ <YR-RANGE>} #5611
NAME-YEAR: {<YR-RANGE> <NNP>+ <CAPS>?} #5612
#Academy of Motion Picture Arts and Sciences
NAME: {<NAME> <CC> <NNP>} # 561
# Adam Weinberger and the GNOME Foundation
NAME: {<CC> <NN> <COMPANY>} # 565
# (c) 1991-1992, Thomas G. Lane , Part of the Independent JPEG Group's
NAME: {<PORTIONS> <OF> <NN> <NAME>+} #566
NAME-YEAR: {<YR-RANGE> <NAME>+ <CONTRIBUTORS>?} #570
#also accept trailing email and URLs
NAME-YEAR: {<NAME-YEAR> <EMAIL>?<URL>?} #5701
NAME-YEAR: {<NAME-YEAR>+} #5702
NAME: {<NNP> <OF> <NNP>} #580
NAME: {<NAME> <NNP>} #590
NAME: {<NN|NNP|CAPS>+ <CC> <OTH>} #600
NAME: {<NNP> <CAPS>} #610
NAME: {<CAPS> <DASH>? <NNP|NAME>} #620
NAME: {<NNP> <CD> <NNP>} #630
NAME: {<COMP> <NAME>+} #640
# Copyright 2018-2019 @paritytech/substrate-light-ui authors & contributors
# and other contributors
NAME: {<AUTHS>? <CC> <NN>? <CONTRIBUTORS>} #644
NAME: {<NNP|CAPS>+ <AUTHS|AUTHDOT|CONTRIBUTORS>} #660
NAME: {<VAN|OF> <NAME>} #680
NAME: {<NAME-YEAR> <COMP|COMPANY>} #690
# more names
NAME: {<NNP> <NAME>} #710
NAME: {<CC>? <IN> <NAME|NNP>} #720
NAME: {<NAME><UNI>} #730
NAME: {<NAME> <IN> <NNP> <CC|IN>+ <NNP>} #740
# by BitRouter <www.BitRouter.com>
NAME: {<BY> <NNP> <URL>} #741
# Philippe http//nexb.com joe@nexb.com
NAME: {<NNP> <URL> <EMAIL>} #742
# Companies
COMPANY: {<NAME|NAME-EMAIL|NAME-YEAR|NNP>+ <OF> <NN>? <COMPANY|COMP> <NNP>?} #770
COMPANY: {<NNP> <COMP|COMPANY> <COMP|COMPANY>} #780
COMPANY: {<NN>? <COMPANY|NAME|NAME-EMAIL> <CC> <COMPANY|NAME|NAME-EMAIL>} #790
COMPANY: {<COMP|COMPANY|NNP> <NN> <COMPANY|COMPANY> <NNP>+} #800
# by the Institute of Electrical and Electronics Engineers, Inc.
COMPANY: {<BY> <NN> <COMPANY> <OF> <NNP> <CC> <COMPANY>}
COMPANY: {<COMPANY> <CC> <AUTH|CONTRIBUTORS|AUTHS>} #810
COMPANY: {<NN> <COMP|COMPANY>+} #820
# this is catching a wide net by teating any bare URL as a company
COMPANY: {<URL|URL2>} #830
COMPANY: {<COMPANY> <COMP|COMPANY>} #840
# the Software and Component Technologies group of Trimble Navigation, Ltd.
COMPANY: {<COMPANY> <OF> <COMP|COMPANY>} #840.1
# University Corporation for Advanced Internet Development, Inc.
COMPANY: {<UNI> <COMPANY>} #845
# The Regents of the University of California
NAME: {<NN> <NNP> <OF> <NN> <COMPANY>} #870
# Trailing Authors
COMPANY: {<NAME|NAME-EMAIL|NNP>+ <CONTRIBUTORS>} #900
# Jeffrey C. Foo
COMPANY: {<PN> <COMP|COMPANY>} #910
# "And" some name
ANDCO: {<CC> <NNP> <NNP>+} #930
ANDCO: {<CC> <OTH>} #940
ANDCO: {<CC> <NN> <NAME>+} #950
# Copyright 2005-2007 <s>Christopher Montgomery</s>, <s>Jean-Marc Valin</s>,
# <s>Timothy Terriberry</s>, <s>CSIRO</s>, and other contributors
ANDCO: {<CC> <CAPS|COMPANY|NAME|NAME-EMAIL|NAME-YEAR>+} #960
COMPANY: {<COMPANY|NAME|NAME-EMAIL|NAME-YEAR> <ANDCO>+} #970
# de Nemours and Company
NAME: {<VAN>? <NNP> <ANDCO>+} #980
NAME: {<BY> <NN> <AUTH|CONTRIBUTORS|AUTHS>} #1000
# NetGroup, Politecnico di Torino (Italy)
# Chinese Service Center for Scholarly Exchange
COMPANY: {<NNP> <COMPANY> <NN|NNP> <NAME>?} #1030
# Arizona Board of Regents (University of Arizona)
NAME: {<COMPANY> <OF> <NN|NNP>} #1060
# The Regents of the University of California
NAME: {<NAME> <COMPANY>} #1090
# John Doe and Myriam Doe
NAME: {<NAME|NNP> <CC> <NNP|NAME>} #1120
# International Business Machines Corporation and others
COMPANY: {<COMPANY> <CC> <OTH>} #1150
COMPANY: {<NAME-YEAR> <CC> <OTH>} #1160
# Nara Institute of Science and Technology.
COMPANY: {<NNP> <COMPANY> <CC> <COMP>} #1190
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
COMPANY: {<NNP> <COMPANY> <NAME>} #1220
# (The) Android Open Source Project
COMPANY: {<NN><NN><NN>? <COMPANY>} #1250
# Bio++ Development Team
COMPANY: {<NN> <NNP> <COMPANY>} #1251
# Institut en recherche ....
COMPANY: {<NNP> <IN> <NN>+ <COMPANY>} #1310
# OU OISTE Foundation
COMPANY: {<OU> <COMPANY>} #1340
# MIT, W3C, NETLABS Temple University
COMPANY: {<CAPS>+ <COMPANY>} #1370
# XZY emails
COMPANY: {<COMPANY> <EMAIL>+} #1400
# by the a href http wtforms.simplecodes.com WTForms Team
COMPANY: {<BY> <NN>+ <COMP|COMPANY>} #1420
# the Regents of the University of California, Sun Microsystems, Inc., Scriptics Corporation
COMPANY: {<NN> <NNP> <OF> <NN> <UNI> <OF> <COMPANY>+}
# Copyright (c) 1998-2000 University College London
COMPANY: {<UNI> <UNI> <NNP>}
# "And" some name
ANDCO: {<CC>+ <NN> <NNP>+<UNI|COMP>?} #1430
ANDCO: {<CC>+ <NNP> <NNP>+<UNI|COMP>?} #1440
ANDCO: {<CC>+ <COMPANY|NAME|NAME-EMAIL|NAME-YEAR>+<UNI|COMP>?} #1450
COMPANY: {<COMPANY|NAME|NAME-EMAIL|NAME-YEAR> <ANDCO>+} #1460
COMPANY: {<COMPANY><COMPANY>+} #1480
# Copyright (c) 2002 World Wide Web Consortium, (Massachusetts Institute of
# Technology, Institut National de Recherche en Informatique et en Automatique, Keio University).
COMPANY: {<CC> <IN> <COMPANY>} #1490
# Oracle and/or its affiliates.
NAME: {<NNP> <ANDCO>} #1410
# the University of California, Berkeley and its contributors.
COMPANY: {<COMPANY> <CC> <NN> <CONTRIBUTORS>} #1411
# UC Berkeley and its contributors
NAME: {<NAME> <CC> <NN> <CONTRIBUTORS>} #1412
# copyrighted by Douglas C. Schmidt and his research group at Washington University,
# University of California, Irvine, and Vanderbilt University, Copyright (c) 1993-2008,
COMPANY: {<NAME> <CC> <NN> <COMPANY>+} #1413
# The University of Utah and the Regents of the University of California
COMPANY: {<NN> <COMPANY> <CC> <NN> <COMPANY>} #1414
# by the Massachusetts Institute of Technology
COMPANY: {<BY> <COMPANY> <OF> <COMPANY>} #1415
# Computer Systems and Communication Lab, Institute of Information Science, Academia Sinica.
COMPANY: {<NNP> <COMPANY> <OF> <COMPANY> <NNP>} #1416
# Copyright 2007-2010 the original author or authors.
# Copyright (c) 2007-2010 the original author or authors.
NAME: {<NN> <NN> <AUTH|CONTRIBUTORS|AUTHS> <NN> <AUTH|CONTRIBUTORS|AUTHS|AUTH|AUTHDOT>} #1960
# Copyright (C) <s>Suresh P <suresh@ippimail.com></s> #19601
NAME: {<NNP> <PN> <EMAIL>}
# Copyright or Copr. Mines Paristech, France - Mark NOBLE, Alexandrine GESRET
NAME: {<NAME> <DASH> <NAME> <CAPS>} #19601
#######################################
# VARIOUS FORMS OF COPYRIGHT
#######################################
COPYRIGHT: {<COPY> <NAME> <COPY> <YR-RANGE>} #1510
COPYRIGHT: {<COPY>+ <BY>? <COMPANY|NAME*|YR-RANGE>* <BY>? <EMAIL>+} #1530
COPYRIGHT: {<COPY>+ <NAME|NAME-EMAIL|NAME-YEAR> <CAPS> <YR-RANGE>} #1550
#Copyright . 2008 Mycom Pany, inc.
COPYRIGHT: {<COPY>+ <NN> <NAME-YEAR>} #1560
# Copyright (c) 1995-2008 Software in the Public Interest
COPYRIGHT: {<COPY>+ <NAME-YEAR> <IN> <NN><NN> <NNP>} #1562
# GeSHi (C) 2004 - 2007 Nigel McNie, MyCo Inc.
COPYRIGHT: {<NNP> <COPY>+ <NAME-YEAR> <COMPANY>+} #1565
# Copyright (c) 2013-2015 Streams Standard Reference Implementation Authors
COPYRIGHT: {<COPY>+ <NAME-YEAR> <NN|NNP>+ <AUTHS>} #1566
# Copyright (c) Ian F. Darwin 1986, 1987, 1989, 1990, 1991, 1992, 1994, 1995.
COPYRIGHT: {<COPY>+ <NAME|NAME-EMAIL|NAME-YEAR>+ <YR-RANGE>*} #157999
COPYRIGHT: {<COPY>+ <CAPS|NNP>+ <CC> <NN> <COPY> <YR-RANGE>?} #1590
COPYRIGHT: {<COPY>+ <BY>? <COMPANY|NAME*|NAME-EMAIL*>+ <YR-RANGE>*} #1610
COPYRIGHT: {<NNP>? <COPY>+ (<YR-RANGE>+ <BY>? <NN>? <COMPANY|NAME|NAME-EMAIL>+ <EMAIL>?)+} #1630
COPYRIGHT: {<COPY>+ <NN> <NAME> <YR-RANGE>} #1650
COPYRIGHT: {<COPY>+ <BY> <NAME|NAME-EMAIL|NAME-YEAR>+} #1670
COPYRIGHT: {<COPY> <COPY> <COMP>+} #1690
COPYRIGHT: {<COPY> <COPY> <NN>+ <COMPANY|NAME|NAME-EMAIL>+} #1710
COPYRIGHT: {<COPY>+ <NN> <NN>? <COMP> <YR-RANGE>?} #1730
COPYRIGHT: {<COPY>+ <NN> <NN>? <COMP> <YR-RANGE>?} #1750
COPYRIGHT: {<COPY> <NN> <NN>? <COMPANY> <YR-RANGE>?} #1760
COPYRIGHT: {<COPY>+ <YR-RANGE|NNP> <CAPS|BY>? <NNP|YR-RANGE|NAME>+} #1780
COPYRIGHT: {<COPY> <COPY> <NNP>+} #1800
# Copyright (c) 2003+ Evgeniy Polyakov <johnpol@2ka.mxt.ru>
COPYRIGHT: {<COPY> <COPY> <YR-PLUS> <NAME|NAME-EMAIL|NAME-YEAR>+} #1801
# Copyright (c) 2016 Project Admins foobar
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE>+ <COMP> <NNP> <NN>} #1830
# Copyright (c) 1995, 1996 The President and Fellows of Harvard University
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <NN> <NNP> <ANDCO>} #1860
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <NN> <AUTH|CONTRIBUTORS|AUTHS>} #1880
# Copyright 1999, 2000 - D.T.Shield.
# Copyright (c) 1999, 2000 - D.T.Shield.
COPYRIGHT2: {<COPY>+ <YR-RANGE> <DASH> <NN>} #1920
#(c) 2017 The Chromium Authors
COPYRIGHT2: {<COPY>+ <YR-RANGE> <NN> <NNP> <NN>} #1990
# Copyright (C) Research In Motion Limited 2010. All rights reserved.
COPYRIGHT2: {<COPYRIGHT> <COMPANY> <YR-RANGE>} #2020
# Copyright (c) 1999 Computer Systems and Communication Lab,
# Institute of Information Science, Academia Sinica.
COPYRIGHT2: {<COPYRIGHT> <COMPANY> <COMPANY>} #2060
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <BY> <NN> <NN> <NAME>} #2080
COPYRIGHT2: {<COPY> <YR-RANGE> <BY> <NN> <NN> <NAME>} #2090
COPYRIGHT2: {<COPY> <COPY><NN>? <COPY> <YR-RANGE> <BY> <NN>} #2110
# Copyright (c) 1992-2002 by P.J. Plauger.
COPYRIGHT2: {<COPY> <NN>? <COPY> <YR-RANGE> <BY> <NN> <NNP>?} #2115
COPYRIGHT2: {<COPY>+ <NN> <YR-RANGE> <BY> <NAME>} #2140
COPYRIGHT2: {<COPY>+ <YR-RANGE> <DASH> <BY>? <NAME-EMAIL|NAME>} #2160
COPYRIGHT2: {<COPY>+ <YR-RANGE> <NNP> <NAME>} #2180
# Copyright (c) 2012-2016, Project contributors
COPYRIGHT2: {<COPY>+ <YR-RANGE> <COMP> <AUTHS|CONTRIBUTORS>} #2210
COPYRIGHT2: {<COPY>+ <YR-RANGE> <COMP>} #2230
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE>+ <CAPS>? <MIXEDCAP>} #2240
COPYRIGHT2: {<NAME> <COPY> <YR-RANGE>} #2260
# Copyright 2008 TJ <linux@tjworld.net>
COPYRIGHT2: {<COPY> <YR-RANGE> <CAPS> <EMAIL>} #2270
# (c) Copyright 1985-1999 SOME TECHNOLOGY SYSTEMS
COPYRIGHT2: {<COPY> <COPY> <YR-RANGE> <CAPS> <CAPS> <CAPS>? <CAPS>?} #2271
# NAME-COPY is a name with a trailing copyright
# Daisy (c) 1998
NAME-COPY: {<NNP> <COPY>} #2272
COPYRIGHT2: {<NAME-COPY> <YR-RANGE>} #2273
# Scilab (c)INRIA-ENPC.
COPYRIGHT: {<NAME-COPY> <NNP>} #2274
# Copyright 1994-2007 (c) RealNetworks, Inc.
COPYRIGHT: {<COPY>+ <YR-RANGE> <COPYRIGHT>} #2274
# Copyright (c) 2017 Contributors et.al.
COPYRIGHT: {<COPY> <COPY> <YR-RANGE> <CONTRIBUTORS> <OTH> } #2276
#Copyright (c) 2020 Contributors as noted in the AUTHORS file
COPYRIGHT: {<COPY> <COPY> <YR-RANGE> <CONTRIBUTORS> <NN>* <IN>? <NN>* <CAPS|AUTHS|ATH> <JUNK> }
# copyrighted by Object Computing, Inc., St. Louis Missouri, Copyright (C) 2002, all rights reserved.
COPYRIGHT: {<COPYRIGHT> <COPY>+ <YR-RANGE> <ALLRIGHTRESERVED>} #2278
# copyrighted by Object Computing, Inc., St. Louis Missouri, Copyright (C) 2002, all rights reserved.
COPYRIGHT: {<COPYRIGHT> <COPY>+ <YR-RANGE> <ALLRIGHTRESERVED>} #2279
# Copyright (c) 2004, The Codehaus
COPYRIGHT: {<COPY> <COPY> <YR-RANGE> <NN> <NNP>} #22790
# Copyright (c) 2015, Contributors
COPYRIGHT: {<COPY>+ <YR-RANGE> <CONTRIBUTORS> <ALLRIGHTRESERVED>?} #22791
# Copyright 1996, 1997 Linux International.
COPYRIGHT: {<COPY>+ <YR-RANGE> <NN> <NNP>} #22792
# Copyright (c) 2017 odahcam
# or Copyright (c) 2019-2021, Open source contributors.
COPYRIGHT: {<COPY>+ <YR-RANGE> <NN>+ <CONTRIBUTORS>? <ALLRIGHTRESERVED>?} #22793
# Licensed material of Foobar Company, All Rights Reserved, (C) 2005
COPYRIGHT: {<COMPANY> <ALLRIGHTRESERVED> <COPYRIGHT>} #22794
COPYRIGHT2: {<COPY>+ <NN|CAPS>? <YR-RANGE>+ <PN>*} #2280
COPYRIGHT2: {<COPY>+ <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <COMPANY>?} #2300
# Copyright (c) 2014, 2015, the respective contributors All rights reserved.
COPYRIGHT: {<COPYRIGHT|COPYRIGHT2> <NN|NNP|CONTRIBUTORS>+ <ALLRIGHTRESERVED>} #2862
COPYRIGHT2: {<COPY>+ <NN|CAPS>? <YR-RANGE>+ <NN|CAPS>* <DASH> <COMPANY>} #2320
COPYRIGHT2: {<NNP|NAME|COMPANY> <COPYRIGHT2>} #2340
COPYRIGHT: {<COPYRIGHT> <NN> <COMPANY>} #2360
COPYRIGHT: {<COPY>+ <BY>? <NN> <COMPANY>} #2380
COPYRIGHT: {<COMPANY> <NN> <NAME> <COPYRIGHT2>} #2400
COPYRIGHT: {<COPYRIGHT2> <COMP> <COMPANY>} #2410
COPYRIGHT: {<COPYRIGHT2> <NNP> <CC> <COMPANY>} #2430
COPYRIGHT: {<COPYRIGHT2> <NAME|NAME-EMAIL|NAME-YEAR>+} #2860
# Rare form Copyright (c) 2008 All rights reserved by Amalasoft Corporation.
COPYRIGHT: {<COPYRIGHT2> <ALLRIGHTRESERVED> <BY> <COMPANY>} #2861
# Copyright (c) 1996 Adrian Rodriguez (adrian@franklins-tower.rutgers.edu) Laboratory for Computer Science Research Computing Facility
COPYRIGHT: {<COPYRIGHT> <NAME>} #2400
# copyrights in the style of Scilab/INRIA
COPYRIGHT: {<NNP> <NN> <COPY> <NNP>} #2460
COPYRIGHT: {<NNP> <COPY> <NNP>} #2470
# Copyright or Copr. 2006 INRIA - CIRAD - INRA
COPYRIGHT: {<COPY> <NN> <COPY> <YR-RANGE>+ <COMPANY>+} #2500
COPYRIGHT: {<COPYRIGHT|COPYRIGHT2> <COMPANY>+ <NAME>*} #2580
# iClick, Inc., software copyright (c) 1999
COPYRIGHT: {<ANDCO> <NN>? <COPYRIGHT2>} #2590
# portions copyright
COPYRIGHT: {<PORTIONS> <COPYRIGHT|COPYRIGHT2>} #2610
#copyright notice (3dfx Interactive, Inc. 1999), (notice is JUNK)
COPYRIGHT: {<COPY> <JUNK> <COMPANY> <YR-RANGE>} #2620
# Copyright (C) <2013>, GENIVI Alliance, Inc.
COPYRIGHT: {<COPYRIGHT2> <ANDCO>} #2625
# copyright C 1988 by the Institute of Electrical and Electronics Engineers, Inc.
COPYRIGHT: {<COPY> <PN> <YR-RANGE> <BY> <COMPANY> } #2630
# Copyright 1996-2004, John LoVerso.
COPYRIGHT: {<COPYRIGHT> <MIXEDCAP> } #2632
# Copyright (C) 1992, 1993, 1994, 1995 Remy Card (card@masi.ibp.fr) Laboratoire MASI - Institut Blaise Pascal
COPYRIGHT: {<COPYRIGHT> <DASH> <NAME>} #2634
# Copyright 2002, 2003 University of Southern California, Information Sciences Institute
COPYRIGHT: {<COPYRIGHT> <NN> <NAME>} #2635
# Copyright 2008 TJ <linux@tjworld.net>
COPYRIGHT: {<COPYRIGHT2> <EMAIL>} #2636
# Copyright RUSS DILL Russ <Russ.Dill@asu.edu>
COPYRIGHT: {<COPYRIGHT> <CAPS> <NAME-EMAIL>} #2637
# maintainer Norbert Tretkowski <nobse@debian.org> 2005-04-16
AUTHOR: {<BY|MAINT> <NAME-EMAIL> <YR-RANGE>?} #26382
# Russ Dill <Russ.Dill@asu.edu> 2001-2003
COPYRIGHT: {<NAME-EMAIL> <YR-RANGE>} #2638
# (C) 2001-2009, <s>Takuo KITAME, Bart Martens, and Canonical, LTD</s>
COPYRIGHT: {<COPYRIGHT> <NNP> <COMPANY>} #26381
#Copyright (c) 1995, 1996 - Blue Sky Software Corp.
COPYRIGHT: {<COPYRIGHT2> <DASH> <COMPANY>} #2639
#copyright 2000-2003 Ximian, Inc. , 2003 Gergo Erdi
COPYRIGHT: {<COPYRIGHT> <NNP> <NAME-YEAR>} #1565
#2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
COPYRIGHT: {<YR-PLUS> <COPYRIGHT>} #1566
# Copyright (c) 1992 David Giller, rafetmad@oxy.edu 1994, 1995 Eberhard Moenkeberg, emoenke@gwdg.de 1996 David van Leeuwen, david@tm.tno.nl
COPYRIGHT: {<COPYRIGHT> <EMAIL>} #2000
COPYRIGHT: {<COPYRIGHT> <NAME|NAME-YEAR>+} #2001
# copyright by M.I.T. or by MIT
COPYRIGHT: {<COPY> <BY> <NNP|CAPS>} #2002
# Copyright property of CompuServe Incorporated.
COPYRIGHT: {<COPY> <NN> <OF> <COMPANY>} #2003
# Copyright (c) 2005 DMTF.
COPYRIGHT: {<COPY> <YR-RANGE> <PN>} #2004
# Copyright (c) YEAR This_file_is_part_of_KDE
COPYRIGHT: {<COPY> <COPY> <CAPS>} #2005
# copyrighted by the Free Software Foundation
COPYRIGHT: {<COPY> <BY> <NN>? <NNP>? <COMPANY>} #2006
# copyright C 1988 by the Institute of Electrical and Electronics Engineers, Inc
COPYRIGHT: {<COPY> <PN>? <YR-RANGE> <BY> <NN> <NAME>} #2007
# Copyright (C) 2005 SUSE Linux Products GmbH.
COPYRIGHT: {<COPYRIGHT2> <CAPS> <NN> <COMPANY>} #2008
# COPYRIGHT (c) 2006 - 2009 DIONYSOS
COPYRIGHT: {<COPYRIGHT2> <CAPS>} #2009
# Copyright (C) 2000 See Beyond Communications Corporation
COPYRIGHT2: {<COPYRIGHT2> <JUNK> <COMPANY>} # 2010
# copyright C 1988 by the Institute of Electrical and Electronics Engineers, Inc.
COPYRIGHT: {<COPY> <PN> <YR-RANGE> <COMPANY>}
COPYRIGHT2: {<NAME-COPY> <COPYRIGHT2>} #2274
# (C) COPYRIGHT 2004 UNIVERSITY OF CHICAGO
COPYRIGHT: {<COPYRIGHT2> <UNI> <OF> <CAPS>} #2276
# NAME-CAPS is made of all caps words
#Copyright or Copr. CNRS
NAME-CAPS: {<CAPS>+} #2530
#Copyright or Copr. CNRS
COPYRIGHT: {<COPY> <NN> <COPY> <COPYRIGHT|NAME-CAPS>} #2560
COPYRIGHT: {<COPYRIGHT2> <BY> <NAME-CAPS>} #2561
# Copyright (c) 2004, The Codehaus
COPYRIGHT: {<COPYRIGHT2> <NN> <NNP>} #2562
# Copyright (c) 2007-2014 IOLA and Ole Laursen.
COPYRIGHT: {<COPYRIGHT> <ANDCO>} #2563
# Vladimir Oleynik <dzo@simtreas.ru> (c) 2002
COPYRIGHT: {<NAME-EMAIL> <COPYRIGHT2>} #2840
#copyright of CERN. or copyright CERN.
COPYRIGHT: {<COPY> <OF>? <PN>} #26371
COPYRIGHT: {<NAME-EMAIL> <COPYRIGHT2>} #2840
COPYRIGHT: {<COPYRIGHT2> <COPY> <NN> <NNP> <ALLRIGHTRESERVED>} #3000
# Copyright (c) World Wide Web Consortium , Massachusetts Institute of Technology ,
# Institut National de Recherche en Informatique et en Automatique , Keio University
COPYRIGHT: {<COPYRIGHT> <OF> <COMPANY> <NAME> <NAME> <COMPANY> } #3000
# Copyright (c) 1988, 1993 The Regents of the University ofCalifornia. All rights reserved.
COPYRIGHT: {<COPYRIGHT> <OF> <NN> <UNI> <NN|OF>? <NNP>? <ALLRIGHTRESERVED> } #3010
# (C) Unpublished Work of Sample Group, Inc. All Rights Reserved.
COPYRIGHT: {<COPY>+ <NNP> <NN> <OF> <COMPANY>} #3020
# Foobar Company, All Rights Reserved, (C) 2005
COPYRIGHT: {<COMPANY> <ALLRIGHTRESERVED> <COPYRIGHT2>} #3030
# Copyright (c) 2000 United States Government as represented by the Secretary of the Navy. All rights reserved.
COPYRIGHT: {<COPYRIGHT> <NN> <NN> <NN|NNP> <BY> <NN> <NAME> <ALLRIGHTRESERVED>} #3035
# Copyright (c) 2007-2008, Y Giridhar Appaji Nag <giridhar@appaji.net>
COPYRIGHT: {<COPYRIGHT> <COMPANY|NAME|NAME-EMAIL|NAME-YEAR>+} #3040
# copyright C 1988 by the Institute of Electrical and Electronics Engineers, Inc.
COPYRIGHT: {<COPYRIGHT2> <BY> <COMPANY>} #3050
# Copyright (c) 2007 Hiran Venugopalan , Hussain K H , Suresh P , Swathanthra Malayalam Computing
COPYRIGHT: {<COPYRIGHT> <NAME-CAPS> <ANDCO>} #3060
# Copyright (c) 1995-2018 The PNG Reference Library Authors
COPYRIGHT: {<COPYRIGHT2> <NN> <NAME-CAPS> <NN> <NAME>} #3065
# Copyright (c) 2011 The WebRTC project authors
COPYRIGHT: {<COPY>+ <NAME-YEAR> <AUTHS>} #1567
# Copyright (c), ALL Consulting, 2008
COPYRIGHT: {<COPY>+ <NN> <NN>? <NNP> <YR-RANGE>} # 15675
# Multilines
# Copyright (c) Sebastian Classen sebastian.classen [at] freenet.ag, 2007
# Jan Engelhardt jengelh [at] medozas de, 2007 - 2010
COPYRIGHT: {<COPYRIGHT> <CC> <YR-RANGE>} # 15676
# Copyright (C), 2001-2011, Omega Tech. Co., Ltd.
# Or complex with markup as in Copyright (C) &#36;today.year Google Inc.
COPYRIGHT: {<COPY> <COPY> <ANDCO>} #2841
# Copyright (c) 1995-2018 The PNG Reference Library Authors. (with and without trailing dot)
COPYRIGHT: {<COPYRIGHT> <NN> <AUTHDOT>} #35011
############ All right reserved in the middle ##############################
# http//www.enox.biz/ Copyright (C) All rights Reserved by Enoxbiz
COPYRIGHT: {<COMPANY> <COPY> <COPY> <ALLRIGHTRESERVED> <BY> <NAME>} #15800
# South Baylo University Copyright (c) All Right Reserved. 2018
COPYRIGHT: {<COMPANY> <COPY> <COPY> <ALLRIGHTRESERVED> <YR-RANGE>?} #157201
# Crown Copyright C All rights reserved. or Crown Copyright (C) All rights reserved.
COPYRIGHT: {<NAME-COPY> <NAME-CAPS|COPY> <ALLRIGHTRESERVED>} #15730
# Copyright (c) All Rights Reserved by the District Export Council of Georgia
COPYRIGHT: {<COPY>+ <ALLRIGHTRESERVED> <BY>? <NN> <NAME> } #15674
# Copyright (c) All right reserved SSC. Ltd.
# Copyright (C) All Rights Reserved by Leh. www.leh.jp
# Copyright (c) 2014-2019 New Avenue Foundation.
COPYRIGHT: {<COPY>+ <ALLRIGHTRESERVED> <NAME|NAME-YEAR|COMPANY> } # 15680
# Copyright (c) - All Rights Reserved - PROAIM Medical.
COPYRIGHT: {<COPY>+ <DASH>? <ALLRIGHTRESERVED> <DASHCAPS> <NNP> } # 15690
# Copyright(c) All rights reserved by Minds, Japan Council for Quality Health Care.
# Copyright(c) All Rights Reserved by Chinese Service Center for Scholarly Exchange
COPYRIGHT: {<COPY>+ <ALLRIGHTRESERVED> <BY> <NAME|COMPANY> <NN> <NAME>} #15700
# Copyright(c) All rights reserved by IBM Corp.
COPYRIGHT: {<COPY>+ <ALLRIGHTRESERVED> <BY> <NAME|NAME-YEAR|COMPANY> } # 15710
############################################################################
# Copyright . 2008 Mycom Pany, inc. OR Copyright . 2008 company name, inc.
COPYRIGHT: {<COPY> <NNP> <NAME-YEAR> <COMPANY>?} #15720
# Copyright (c) 2008-1010 Intel Corporation
COPYRIGHT: {<COPY> <COPY> <CD> <COMPANY>} #rare-cd-not-year
# Copyright (C) 2005-2006 dann frazier <dannf@dannf.org>
COPYRIGHT: {<COPYRIGHT2> <NN> <NN> <EMAIL>} #999991
# URL-like at the start
COPYRIGHT: {<COMPANY> <YR-RANGE> <COPY>+ <ALLRIGHTRESERVED>} #999992
# Copyright (c) 2008 Intel Corporation / Qualcomm Inc.
COPYRIGHT: {<COPYRIGHT> <DASH> <COMPANY>} #copydash-co
#Copyright Holders Kevin Vandersloot <kfv101@psu.edu> Erik Johnsson <zaphod@linux.nu>
COPYRIGHT: {<COPY> <HOLDER> <NAME>} #83000
#holder is Tim Hudson (tjh@mincom.oz.au).
COPYRIGHT: {<HOLDER> <JUNK> <NAME-EMAIL>} #83001
# Copyright lowRISC contributors.
COPYRIGHT: {<COPY> <NN> <CONTRIBUTORS>}
#######################################
# Authors
#######################################
# SPDX-FileContributor special case
AUTHOR: {<SPDX-CONTRIB> <CCOMPANY|NAME|NAME-EMAIL|NAME-YEAR|EMAIL> <COMPANY|NAME|NAME-EMAIL|NAME-YEAR|EMAIL|NN>? } #264000
# developed by Project Mayo.
AUTHOR: {<AUTH2>+ <BY> <COMPANY> <NNP>} #2645-1
# Created by XYZ
AUTH: {<AUTH2>+ <BY>} #2645-2
# by Yukihiro Matsumoto matz@netlab.co.jp.
# AUTH: {<BY> <NAME>} #2645-3
AUTHOR: {<AUTH|CONTRIBUTORS|AUTHS>+ <NN>? <COMPANY|NAME|YR-RANGE>* <BY>? <EMAIL>+} #2650
AUTHOR: {<AUTH|CONTRIBUTORS|AUTHS>+ <NN>? <COMPANY|NAME|NAME-EMAIL|NAME-YEAR>+ <YR-RANGE>*} #2660
AUTHOR: {<AUTH|CONTRIBUTORS|AUTHS>+ <YR-RANGE>+ <BY>? <COMPANY|NAME|NAME-EMAIL>+} #2670
AUTHOR: {<AUTH|CONTRIBUTORS|AUTHS>+ <YR-RANGE|NNP> <NNP|YR-RANGE>+} #2680
AUTHOR: {<AUTH|CONTRIBUTORS|AUTHS>+ <NN|CAPS>? <YR-RANGE>+} #2690
AUTHOR: {<COMPANY|NAME|NAME-EMAIL>+ <AUTH|CONTRIBUTORS|AUTHS>+ <YR-RANGE>+} #2700
#AUTHOR: {<YR-RANGE> <NAME|NAME-EMAIL>+} #2710
AUTHOR: {<BY> <CC>? <NAME-EMAIL>+} #2720
AUTHOR: {<AUTH|CONTRIBUTORS|AUTHS>+ <NAME-EMAIL>+} #2720
AUTHOR: {<AUTHOR> <CC> <NN>? <AUTH|AUTHS>} #2730
AUTHOR: {<BY> <EMAIL>} #2740
ANDAUTH: {<CC> <AUTH|NAME|CONTRIBUTORS>+} #2750
AUTHOR: {<AUTHOR> <ANDAUTH>+} #2760
# developed by Mitsubishi and NTT.
AUTHOR: {<AUTH|AUTHS|AUTH2> <BY>? <NNP> <CC> <PN>} #2761
# developed by the National Center for Supercomputing Applications at the University of Illinois at Urbana-Champaign
AUTHOR: {<AUTHOR> <NN> <NAME> <NAME>} #2762
# created by Axel Metzger and Till Jaeger, Institut fur Rechtsfragen der Freien und Open Source Software
AUTHOR: {<AUTH2> <CC> <AUTHOR> <NN> <NAME> <NN> <NN> <NNP>} #2645-4
# developed by the XML DB Initiative http//www.xmldb.org
AUTHOR: {<AUTH2> <COMPANY>} #2645-7
# Author not attributable
AUTHOR: {<AUTH> <NN> <NNP>} #not attributable
# author (Panagiotis Tsirigotis)
AUTHOR: {<AUTH> <NNP><NNP>+} #author Foo Bar
#######################################
# Mixed AUTHOR and COPYRIGHT
#######################################
# Compounded statements usings authors
# Copyright by Daniel K. Gebhart
# Also found in some rare cases with a long list of authors.
COPYRIGHT: {<COPY> <BY>? <AUTHOR>+ <YR-RANGE>*} #2800-1
COPYRIGHT: {<AUTHOR> <COPYRIGHT2>} #2820
COPYRIGHT: {<AUTHOR> <YR-RANGE>} #2830
# copyrighted by MIT
COPYRIGHT: {<COPY> <BY> <MIT>} #2840
# Copyright (c) 1995-2018 The PNG Reference Library Authors
COPYRIGHT: {<COPYRIGHT2> <NN> <NAME-CAPS> <NN> <NN> <AUTHS>} #3000
# COPYRIGHT Written by John Cunningham Bowler, 2015.
COPYRIGHT: {<COPY> <AUTHOR>} #4000
# Created by Samvel Khalatyan, May 28, 2013 Copyright 2013, All rights reserved
COPYRIGHT: {<AUTHOR> <NN> <YR-RANGE> <COPYRIGHT2> <ALLRIGHTRESERVED>} #4200
#######################################
# Last resort catch all ending with ALLRIGHTRESERVED
#######################################
COPYRIGHT: {<COMPANY><COPY>+<ALLRIGHTRESERVED>} #99900
COPYRIGHT: {<COPYRIGHT|COPYRIGHT2|COPY|NAME-COPY> <COPY|NNP|AUTHDOT|CAPS|CD|YR-RANGE|NAME|NAME-EMAIL|NAME-YEAR|NAME-COPY|NAME-CAPS|AUTHORANDCO|COMPANY|YEAR|PN|COMP|UNI|CC|OF|IN|BY|OTH|VAN|URL|EMAIL|URL2|MIXEDCAP|NN>+ <ALLRIGHTRESERVED>} #99999
COPYRIGHT: {<COPY|NAME-COPY><COPY|NAME-COPY>} #999990
COPYRIGHT: {<COPYRIGHT|COPYRIGHT2> <ALLRIGHTRESERVED>} #99900111
"""
################################################################################
# MAIN CLEANUP ENTRY POINTS
################################################################################
def refine_copyright(c):
"""
Refine a detected copyright string.
FIXME: the grammar should not allow this to happen.
"""
if not c:
return
c = ' '.join(c.split())
c = strip_some_punct(c)
# this catches trailing slashes in URL for consistency
c = c.strip('/ ')
c = strip_all_unbalanced_parens(c)
c = remove_same_extra_words(c)
c = ' '.join(c.split())
c = remove_dupe_copyright_words(c)
c = strip_prefixes(c, prefixes=set(['by', 'c']))
c = c.strip()
c = c.strip('+')
c = strip_balanced_edge_parens(c)
c = strip_suffixes(c, suffixes=COPYRIGHTS_SUFFIXES)
c = strip_trailing_period(c)
c = c.strip("'")
return c.strip()
def refine_holder(h):
"""
Refine a detected holder.
FIXME: the grammar should not allow this to happen.
"""
if not h:
return
# handle the acse where "all right reserved" is in the middle and the
# company name contains the word all.
if 'reserved' in h.lower():
prefixes = HOLDERS_PREFIXES_WITH_ALL
else:
prefixes = HOLDERS_PREFIXES
h = refine_names(h, prefixes=prefixes)
h = strip_suffixes(h, HOLDERS_SUFFIXES)
h = h.strip()
h = h.strip('+')
h = h.replace('( ', ' ').replace(' )', ' ')
h = h.strip()
h = strip_trailing_period(h)
h = h.strip()
if h and h.lower() not in HOLDERS_JUNK:
return h
def refine_author(a):
"""
Refine a detected author.
FIXME: the grammar should not allow this to happen.
"""
if not a:
return
# FIXME: we could consider to split comma separated lists such as
# gthomas, sorin@netappi.com, andrew.lunn@ascom.che.g.
a = refine_names(a, prefixes=AUTHORS_PREFIXES)
a = a.strip()
a = strip_trailing_period(a)
a = a.strip()
a = strip_balanced_edge_parens(a)
a = a.strip()
a = refine_names(a, prefixes=AUTHORS_PREFIXES)
a = a.strip()
if a and a.lower() not in AUTHORS_JUNK:
return a
def refine_names(s, prefixes):
"""
Refine a detected name (author, hodler).
FIXME: the grammar should not allow this to happen.
"""
s = strip_some_punct(s)
s = strip_leading_numbers(s)
s = strip_all_unbalanced_parens(s)
s = strip_some_punct(s)
s = s.strip()
s = strip_balanced_edge_parens(s)
s = s.strip()
s = strip_prefixes(s, prefixes)
s = s.strip()
return s
################################################################################
# COPYRIGHTS CLEANUPS
################################################################################
PREFIXES = frozenset([
'?',
'????',
'(insert',
'then',
'current',
'year)',
'maintained',
'by',
'developed',
'created',
'written',
'recoded',
'coded',
'modified',
'maintained'
'created',
'$year',
'year',
'uref',
'owner',
'from',
'and',
'of',
'to',
'for',
'or',
'<p>',
])
COPYRIGHTS_SUFFIXES = frozenset([
'copyright',
'.',
',',
'year',
'parts',
'any',
'0',
'1',
'author',
'all',
'some',
'and'
])
# Set of statements that get detected and are junk/false positive
# note: this must be lowercase and be kept to a minimum.
# A junk copyright cannot be resolved otherwise by parsing with a grammar.
# It would be best not to have to resort to this, but this is practical.
COPYRIGHTS_JUNK = frozenset([
# TODO: consider removing to report these (and this is a sign that curation is needed)
'copyright (c)',
'(c) by',
"copyright holder's name",
'(c) (c)',
'c',
'(c)',
'full copyright statement',
'copyrighted by their authors',
'copyrighted by their authors.',
'copyright holder or other authorized',
'copyright holder who authorizes',
'copyright holder has authorized',
'copyright holder nor the author',
'copyright holder(s) or the author(s)',
'copyright holders and contributors',
'copyright owner or entity authorized',
'copyright owner or contributors',
'copyright and license, contributing',
'copyright for a new language file should be exclusivly the authors',
'copyright (c) year',
'copyright (c) year your name',
'copyright holder or said author',
'copyright holder, or any author',
'copyright holder and contributor',
'copyright-holder and its contributors',
'copyright holders and contributors.',
'copyright holder and contributors.',
'copyright holders and contributors',
'copyright holder and contributors',
'copyrighted material, only this license, or another one contracted with the authors',
'copyright notices, authorship',
'copyright holder means the original author(s)',
"copyright notice. timevar.def's author",
'copyright copyright and',
"copyright holder or simply that it is author-maintained'.",
"copyright holder or simply that is author-maintained'.",
'(c) if you bring a patent claim against any contributor',
'copyright-check writable-files m4-check author_mark_check',
"copyright of uc berkeley's berkeley software distribution",
'(c) any recipient',
'(c) each recipient',
'copyright in section',
'u.s. copyright act',
# from a WROX license text
'copyright john wiley & sons, inc. year',
'copyright holders and contributing',
'(c) individual use.',
'copyright, license, and disclaimer',
'(c) forums',
# from the rare LATEX licenses
'copyright 2005 m. y. name',
'copyright 2003 m. y. name',
'copyright 2001 m. y. name',
'copyright. united states',
'(c) source code',
'copyright, designs and patents',
'(c) software activation.',
'(c) cockroach enterprise edition',
'attn copyright agent',
'code copyright grant',
# seen in a weird Adobe license
'copyright redistributions',
'copyright neither',
'copyright including, but not limited',
'copyright not limited',
# found in an RPM spec file COPYRIGHT: LGPL\nGROUP: ....
'copyright lgpl group',
'copyright gpl group',
# from strace-4.6/debian/changelog:
# * Add location of upstream sources to the copyright
# * Merged ARM architecture support from Jim Studt <jim@federated.com>
'copyright merged arm',
# common in sqlite
'(c) as',
# 'copyright as',
# from libmng - libmng.spec
# Copyright: AS IS
# Group: System Environment/Libraries
'copyright as is group system',
'copyright united states',
'copyright as is group',
'copyrighted by its',
'copyright',
'copyright by',
'copyrighted',
'copyrighted by',
'copyright (c) <holders>',
'copyright (c) , and others',
'copyright from license',
'and/or the universal copyright convention 1971',
'universal copyright convention',
'copyright 2005 m. y. name',
'copyright 2005 m. y.',
'copyright 2003 m. y. name',
'copyright 2003 m. y.',
'copyright 2001 m. y. name',
'copyright 2001 m. y.',
'copyright help center',
])
################################################################################
# AUTHORS CLEANUPS
################################################################################
AUTHORS_PREFIXES = frozenset(set.union(
set(PREFIXES),
set([
'contributor',
'contributors',
'contributor(s)',
'authors',
'author',
'author:',
'author(s)',
'authored',
'created',
'author.',
'author\'',
'authors,',
'authorship',
'or',
'spdx-filecontributor',
])
))
# Set of authors that get detected and are junk/false positive
# note: this must be lowercase and be kept to a minimum.
# A junk copyright cannot be resolved otherwise by parsing with a grammar.
# It would be best not to have to resort to this, but this is practical.
AUTHORS_JUNK = frozenset([
# in GNU licenses
'james hacker.',
'james random hacker.',
'contributor. c. a',
'grant the u.s. government and others',
'james random hacker',
'james hacker',
'company',
'contributing project',
'its author',
'gnomovision',
'would',
'may',
'attributions',
'the',
])
################################################################################
# HOLDERS CLEANUPS
################################################################################
HOLDERS_PREFIXES = frozenset(set.union(
set(PREFIXES),
set([
'-',
'a',
'<a',
'href',
'ou',
'portions',
'portion',
'notice',
'holders',
'holder',
'property',
'parts',
'part',
'at',
'cppyright',
'assemblycopyright',
'c',
'works',
'present',
'at',
'right',
'rights',
'reserved',
'held',
'by',
'is',
'(x)',
])
))
HOLDERS_PREFIXES_WITH_ALL = HOLDERS_PREFIXES.union(set(['all']))
HOLDERS_SUFFIXES = frozenset([
'http',
'and',
'email',
'licensing@',
'(minizip)',
'website',
'(c)',
'<http',
'/>',
'.',
',',
'year',
# this may truncate rare companies named "all something"
'some',
'all',
'right',
'rights',
'reserved',
'reserved.',
'href',
'c',
'a',
])
# these final holders are ignored.
HOLDERS_JUNK = frozenset([
'a href',
'property',
'licensing@',
'c',
'works',
'http',
'the',
'are',
'?',
'cppyright',
'parts',
'disclaimed',
'or',
'<holders>',
'author',
])
################################################################################
# TEXT POST PROCESSING and CLEANUP
################################################################################
def remove_dupe_copyright_words(c):
c = c.replace('SPDX-FileCopyrightText', 'Copyright')
# from .net assemblies
c = c.replace('AssemblyCopyright', 'Copyright')
c = c.replace('AppCopyright', 'Copyright')
c = c.replace('JCOPYRIGHT', 'Copyright')
# FIXME: this should be in the grammar, but is hard to get there right
# these are often artifacts of markup
c = c.replace('COPYRIGHT Copyright', 'Copyright')
c = c.replace('Copyright Copyright', 'Copyright')
c = c.replace('Copyright copyright', 'Copyright')
c = c.replace('copyright copyright', 'Copyright')
c = c.replace('copyright Copyright', 'Copyright')
c = c.replace('copyright\'Copyright', 'Copyright')
c = c.replace('copyright"Copyright', 'Copyright')
c = c.replace('copyright\' Copyright', 'Copyright')
c = c.replace('copyright" Copyright', 'Copyright')
return c
def remove_same_extra_words(c):
c = c.replace('<p>', ' ')
c = c.replace('<a href', ' ')
c = c.replace('date-of-software', ' ')
c = c.replace('date-of-document', ' ')
c = c.replace(' $ ', ' ')
c = c.replace(' ? ', ' ')
c = c.replace('</a>', ' ')
c = c.replace('( )', ' ')
c = c.replace('()', ' ')
return c
def strip_prefixes(s, prefixes=()):
"""
Return the `s` string with any of the string in the `prefixes` set
striped. Normalize and strip spacing.
"""
s = s.split()
# strip prefixes.
# NOTE: prefixes are hard to catch otherwise, unless we split the
# author vs copyright grammar in two
while s and s[0].lower() in prefixes:
s = s[1:]
s = ' '.join(s)
return s
def strip_suffixes(s, suffixes=()):
"""
Return the `s` string with any of the string in the `suffixes` set
striped. Normalize and strip spacing.
"""
s = s.split()
while s and s[-1].lower() in suffixes:
s = s[:-1]
s = ' '.join(s)
return s
def strip_trailing_period(s):
"""
Return the `s` string with trailing periods removed when needed.
"""
if not s:
return s
s = s.strip()
if not s.endswith('.'):
return s
if len(s) < 3 :
return s
is_single_word = len(s.split()) == 1
if s[-2].isupper() and not is_single_word:
# U.S.A., e.V., M.I.T. and similar
return s
if s[-3] == '.':
# S.A., e.v., b.v. and other
return s
if s.lower().endswith(('inc.', 'corp.', 'ltd.', 'llc.', 'co.', 'llp.')):
return s
return s.rstrip('.')
def refine_date(c):
"""
Refine a detected date or date range.
FIXME: the grammar should not allow this to happen.
"""
return strip_some_punct(c)
def strip_leading_numbers(s):
"""
Return a string removing leading words made only of digits.
"""
s = s.split()
while s and s[0].isdigit():
s = s[1:]
return ' '.join(s)
def strip_some_punct(s):
"""
Return a string stripped from some leading and trailing punctuations.
"""
if s:
s = s.strip(''','"}{-_:;&@!''')
s = s.lstrip('.>)]\\/')
s = s.rstrip('<([\\/')
return s
def fix_trailing_space_dot(s):
"""
Return a string stripped from some leading and trailing punctuations.
"""
if s and s.endswith(' .'):
s = s[:-2] + '.'
return s
def strip_unbalanced_parens(s, parens='()'):
"""
Return a string where unbalanced parenthesis are replaced with a space.
`paren` is a pair of characters to balance such as (), <>, [], {}.
For instance:
>>> strip_unbalanced_parens('This is a super string', '()')
'This is a super string'
>>> strip_unbalanced_parens('This is a super(c) string', '()')
'This is a super(c) string'
>>> strip_unbalanced_parens('This ((is a super(c) string))', '()')
'This ((is a super(c) string))'
>>> strip_unbalanced_parens('This )(is a super(c) string)(', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens('This )(is a super(c) string)(', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens('This )(is a super(c) string)(', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens('This )((is a super(c) string)((', '()')
'This (is a super(c) string) '
>>> strip_unbalanced_parens('This ) is', '()')
'This is'
>>> strip_unbalanced_parens('This ( is', '()')
'This is'
>>> strip_unbalanced_parens('This )) is', '()')
'This is'
>>> strip_unbalanced_parens('This (( is', '()')
'This is'
>>> strip_unbalanced_parens('(', '()')
' '
>>> strip_unbalanced_parens(')', '()')
' '
"""
start, end = parens
if not start in s and not end in s:
return s
unbalanced = []
unbalanced_append = unbalanced.append
stack = []
stack_append = stack.append
stack_pop = stack.pop
for i, c in enumerate(s):
if c == start:
stack_append((i, c,))
elif c == end:
try:
stack_pop()
except IndexError:
unbalanced_append((i, c,))
unbalanced.extend(stack)
pos_to_del = set([i for i, c in unbalanced])
cleaned = [c if i not in pos_to_del else ' ' for i, c in enumerate(s)]
return type(s)('').join(cleaned)
def strip_all_unbalanced_parens(s):
"""
Return a string where unbalanced parenthesis are replaced with a space.
Strips (), <>, [] and {}.
"""
c = strip_unbalanced_parens(s, '()')
c = strip_unbalanced_parens(c, '<>')
c = strip_unbalanced_parens(c, '[]')
c = strip_unbalanced_parens(c, '{}')
return c
def strip_balanced_edge_parens(s):
"""
Return a string where a pair of balanced leading and trailing parenthesis is
stripped.
For instance:
>>> strip_balanced_edge_parens('(This is a super string)')
'This is a super string'
>>> strip_balanced_edge_parens('(This is a super string')
'(This is a super string'
>>> strip_balanced_edge_parens('This is a super string)')
'This is a super string)'
>>> strip_balanced_edge_parens('(This is a super (string')
'(This is a super (string'
>>> strip_balanced_edge_parens('(This is a super (string)')
'(This is a super (string)'
"""
if s.startswith('(') and s.endswith(')'):
c = s[1:-1]
if '(' not in c and ')' not in c:
return c
return s
################################################################################
# CANDIDATE LINES SELECTION
################################################################################
remove_non_chars = re.compile(r'[^a-z0-9]').sub
def prep_line(line):
"""
Return a tuple of (line, line with only chars) from a line of text prepared
for candidate and other checks or None.
"""
line = prepare_text_line(line.lower(), dedeb=False)
chars_only = remove_non_chars('', line)
return line, chars_only.strip()
is_only_digit_and_punct = re.compile('^[^A-Za-z]+$').match
def is_candidate(prepared_line):
"""
Return True if a prepared line is a candidate line for copyright detection
"""
if not prepared_line:
return False
if is_only_digit_and_punct(prepared_line):
if TRACE:
logger_debug(
f'is_candidate: is_only_digit_and_punct:\n{prepared_line!r}')
return False
if copyrights_hint.years(prepared_line):
return True
else:
pass
for marker in copyrights_hint.statement_markers:
if marker in prepared_line:
return True
def is_inside_statement(
chars_only_line,
markers=('copyright', 'copyrights', 'copyrightby',) + copyrights_hint.all_years,
):
"""
Return True if a line ends with some strings that indicate we are still
inside a statement.
"""
return chars_only_line and chars_only_line.endswith(markers)
def is_end_of_statement(chars_only_line):
"""
Return True if a line ends with some strings that indicate we are at the end
of a statement.
"""
return (
chars_only_line
and chars_only_line.endswith(('rightreserved', 'rightsreserved'))
)
has_trailing_year = re.compile(r'(?:19\d\d|20[0-4]\d)+$').findall
def candidate_lines(numbered_lines):
"""
Yield groups of candidate line lists where each list element is a tuple of
(line number, line text) given an iterable of ``numbered_lines`` as tuples
of (line number, line text) .
A candidate line is a line of text that may contain copyright statements.
A few lines before and after a candidate line are also included.
"""
candidates = deque()
candidates_append = candidates.append
candidates_clear = candidates.clear
# used as a state and line counter
in_copyright = 0
# the previous line (chars only)
previous_chars = None
for numbered_line in numbered_lines:
if TRACE:
logger_debug(
f'# candidate_lines: evaluating line: {numbered_line!r}')
_line_number, line = numbered_line
# FIXME: we should get the prepared text from here and return
# effectively pre-preped lines... but the prep taking place here is
# different?
prepped, chars_only = prep_line(line)
if is_end_of_statement(chars_only):
candidates_append(numbered_line)
if TRACE:
cands = list(candidates)
logger_debug(
' candidate_lines: is EOS: yielding candidates\n'
f' {cands}r\n\n'
)
yield list(candidates)
candidates_clear()
in_copyright = 0
previous_chars = None
elif is_candidate(prepped):
# the state is now "in copyright"
in_copyright = 2
candidates_append(numbered_line)
previous_chars = chars_only
if TRACE: logger_debug(' candidate_lines: line is candidate')
elif 's>' in line:
# this is for debian-style <s></s> copyright name tags
# the state is now "in copyright"
in_copyright = 2
candidates_append(numbered_line)
previous_chars = chars_only
if TRACE:
logger_debug(' candidate_lines: line is <s></s>candidate')
elif in_copyright > 0:
# these are a sign that the copyrights continue after
# a possibly empty line
# see https://github.com/nexB/scancode-toolkit/issues/1565
# if these are no present we treat empty lines... as empty!
if (
(not chars_only)
and (
not previous_chars.endswith((
'copyright',
'copyrights',
'and',
'by',
))
)
and not has_trailing_year(previous_chars)
):
# completely empty or only made of punctuations
if TRACE:
cands = list(candidates)
logger_debug(
' candidate_lines: empty: yielding candidates\n'
f' {cands}r\n\n'
)
yield list(candidates)
candidates_clear()
in_copyright = 0
previous_chars = None
else:
candidates_append(numbered_line)
# and decrement our state
in_copyright -= 1
if TRACE:
logger_debug(' candidate_lines: line is in copyright')
elif candidates:
if TRACE:
cands = list(candidates)
logger_debug(
' candidate_lines: not in COP: yielding candidates\n'
f' {cands}r\n\n'
)
yield list(candidates)
candidates_clear()
in_copyright = 0
previous_chars = None
# finally
if candidates:
if TRACE:
cands = list(candidates)
logger_debug(
'candidate_lines: finally yielding candidates\n'
f' {cands}r\n\n'
)
yield list(candidates)
################################################################################
# TEXT PRE PROCESSING
################################################################################
# this catches tags but not does not remove the text inside tags
remove_tags = re.compile(
r'<'
r'[(-\-)\?\!\%\/]?'
r'[a-gi-vx-zA-GI-VX-Z][a-zA-Z#\"\=\s\.\;\:\%\&?!,\+\*\-_\/]*'
r'[a-zA-Z0-9#\"\=\s\.\;\:\%\&?!,\+\*\-_\/]+'
r'\/?>',
re.MULTILINE | re.UNICODE
).sub
def strip_markup(text, dedeb=True):
"""
Strip markup tags from ``text``.
If ``dedeb`` is True, remove "Debian" <s> </s> markup tags seen in
older copyright files.
"""
text = remove_tags(' ', text)
# Debian copyright file markup
if dedeb:
return text.replace('</s>', '').replace('<s>', '').replace('<s/>', '')
else:
return text
# this catches the common C-style percent string formatting codes
remove_printf_format_codes = re.compile(r' [\#\%][a-zA-Z] ').sub
remove_punctuation = re.compile(r'[\*#"%\[\]\{\}`]+').sub
remove_ascii_decorations = re.compile(r'[-_=!\\*]{2,}|/{3,}').sub
fold_consecutive_quotes = re.compile(r"\'{2,}").sub
# less common rem comment line prefix in dos
# less common dnl comment line prefix in autotools am/in
remove_comment_markers = re.compile(r'^(rem|\@rem|dnl)\s+').sub
# common comment line prefix in man pages
remove_man_comment_markers = re.compile(r'.\\"').sub
def prepare_text_line(line, dedeb=True, to_ascii=True):
"""
Prepare a text ``line`` for copyright detection.
If ``dedeb`` is True, remove "Debian" <s> </s> markup tags seen in
older copyright files.
If ``to_ascii`` convert the text to ASCII characters.
"""
# remove some junk in man pages: \(co
line = (line
.replace('\\\\ co', ' ')
.replace('\\ co', ' ')
.replace('(co ', ' ')
)
line = remove_printf_format_codes(' ', line)
# less common comment line prefixes
line = remove_comment_markers(' ', line)
line = remove_man_comment_markers(' ', line)
line = (line
# C and C++ style comment markers
.replace('/*', ' ').replace('*/', ' ')
.strip().strip('/*#')
# un common pipe chars in some ascii art
.replace('|', ' ')
# normalize copyright signs and spacing around them
.replace('"Copyright', '" Copyright')
.replace('( C)', ' (c) ')
.replace('(C)', ' (c) ')
.replace('(c)', ' (c) ')
# the case of \251 is tested by 'weirdencoding.h'
.replace('©', ' (c) ')
.replace('\251', ' (c) ')
.replace('©', ' (c) ')
.replace('©', ' (c) ')
.replace('©', ' (c) ')
.replace('©', ' (c) ')
.replace('©', ' (c) ')
.replace('u00A9', ' (c) ')
.replace('u00a9', ' (c) ')
.replace('\xa9', ' (c) ')
.replace('\\XA9', ' (c) ')
# \xc2 is a Â
.replace('\xc2', '')
.replace('\\xc2', '')
# not really a dash: an emdash
.replace('–', '-')
# TODO: add more HTML entities replacements
# see http://www.htmlhelp.com/reference/html40/entities/special.html
# convert html entities CR LF to space
.replace(' ', ' ')
.replace(' ', ' ')
.replace(' ', ' ')
# spaces
.replace(' ', ' ')
.replace(' ', ' ')
.replace(' ', ' ')
# common named HTML entities
.replace('"', '"')
.replace('"', '"')
.replace('&', '&')
.replace('&', '&')
.replace('>', '>')
.replace('>', '>')
.replace('<', '<')
.replace('<', '<')
# normalize (possibly repeated) quotes to unique single quote '
# backticks ` and "
.replace('`', u"'")
.replace('"', u"'")
)
# keep only one quote
line = fold_consecutive_quotes(u"'", line)
# treat some escaped literal CR, LF, tabs, \00 as new lines
# such as in code literals: a="\\n some text"
line = (line
.replace('\\t', ' ')
.replace('\\n', ' ')
.replace('\\r', ' ')
.replace('\\0', ' ')
# TODO: why backslashes?
.replace('\\', ' ')
# replace ('
.replace('("', ' ')
# some trailing garbage ')
.replace(u"')", ' ')
.replace(u"],", ' ')
)
# note that we do not replace the debian tag by a space: we remove it
line = strip_markup(line, dedeb=dedeb)
line = remove_punctuation(' ', line)
# normalize spaces around commas
line = line.replace(' , ', ', ')
# remove ASCII "line decorations"
# such as in --- or === or !!! or *****
line = remove_ascii_decorations(' ', line)
# in apache'>Copyright replace ">" by "> "
line = line.replace('>', '> ').replace('<', ' <')
# normalize to ascii text
if to_ascii:
line = toascii(line, translit=True)
# normalize to use only LF as line endings so we can split correctly
# and keep line endings
line = unixlinesep(line)
# strip verbatim back slash and comment signs again at both ends of a line
# FIXME: this is done at the start of this function already
line = line.strip('\\/*#%;')
# normalize spaces
line = ' '.join(line.split())
return line
| 31.566585
| 254
| 0.528556
|
7c3f24b4dfa408a7a7b684c86531834c764d342f
| 20,720
|
py
|
Python
|
Packs/AzureActiveDirectory/Integrations/AzureADIdentityProtection/AzureADIdentityProtection.py
|
mazmat-panw/content
|
024a65c1dea2548e2637a9cbbe54966e9e34a722
|
[
"MIT"
] | null | null | null |
Packs/AzureActiveDirectory/Integrations/AzureADIdentityProtection/AzureADIdentityProtection.py
|
mazmat-panw/content
|
024a65c1dea2548e2637a9cbbe54966e9e34a722
|
[
"MIT"
] | null | null | null |
Packs/AzureActiveDirectory/Integrations/AzureADIdentityProtection/AzureADIdentityProtection.py
|
mazmat-panw/content
|
024a65c1dea2548e2637a9cbbe54966e9e34a722
|
[
"MIT"
] | null | null | null |
import demistomock as demisto
import dateparser
import urllib3
from MicrosoftApiModule import *
urllib3.disable_warnings()
''' GLOBAL VARS '''
INTEGRATION_NAME = 'Azure Active Directory Identity and Access'
OUTPUTS_PREFIX = "AADIdentityProtection"
BASE_URL = 'https://graph.microsoft.com/beta'
REQUIRED_PERMISSIONS = (
'offline_access', # allows device-flow login
'https://graph.microsoft.com/.default',
'IdentityRiskEvent.Read.All',
'IdentityRiskyUser.ReadWrite.All'
)
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
def __reorder_first_headers(headers: List[str], first_headers: List[str]) -> None:
"""
brings given headers to the head of the list, while preserving their order
used for showing important content first.
"""
for h in reversed(first_headers):
if h in headers:
headers.insert(0, headers.pop(headers.index(h)))
def __json_list_to_headers(value_list: List[Dict[str, Any]]) -> List[str]:
headers: List[str] = []
seen: Set[str] = set()
for value in value_list:
headers.extend((k for k in value if k not in seen)) # to preserve order
seen.update(value.keys())
return headers
def get_next_link_url(raw_response: dict) -> str:
return raw_response.get('@odata.nextLink', '').replace(' ', '%20')
def parse_list(raw_response: dict, human_readable_title: str, context_path: str) -> CommandResults:
"""
converts a response of Microsoft's graph search into a CommandResult object
"""
values = raw_response.get('value', [])
headers = __json_list_to_headers(values)
__reorder_first_headers(headers,
['Id', 'userId', 'userPrincipalName', 'userDisplayName', 'ipAddress', 'detectedDateTime'])
readable_output = tableToMarkdown(f'{human_readable_title.title()} '
f'({len(values)} {"result" if len(values) == 1 else "results"})',
values,
removeNull=True,
headers=headers,
headerTransform=pascalToSpace)
outputs = {f'{OUTPUTS_PREFIX}.{context_path}(val.id === obj.id)': values}
# removing whitespaces so they aren't mistakenly considered as argument separators in CLI
next_link = get_next_link_url(raw_response)
if next_link:
next_link_key = f'{OUTPUTS_PREFIX}.NextLink(obj.Description === "{context_path}")'
next_link_value = {'Description': context_path, 'URL': next_link}
outputs[next_link_key] = next_link_value
return CommandResults(outputs=outputs,
readable_output=readable_output,
raw_response=raw_response)
class AADClient(MicrosoftClient):
def __init__(self, app_id: str, subscription_id: str, verify: bool, proxy: bool, azure_ad_endpoint: str,
tenant_id: str = None, enc_key: str = None, client_credentials: bool = False):
if '@' in app_id: # for use in test-playbook
app_id, refresh_token = app_id.split('@')
integration_context = get_integration_context()
integration_context.update(current_refresh_token=refresh_token)
set_integration_context(integration_context)
self.client_credentials = client_credentials
super().__init__(azure_ad_endpoint=azure_ad_endpoint,
self_deployed=True,
auth_id=app_id, # client id
grant_type=CLIENT_CREDENTIALS if client_credentials else DEVICE_CODE,
base_url=BASE_URL,
token_retrieval_url='https://login.microsoftonline.com/organizations/oauth2/v2.0/token' # type: ignore
if not client_credentials else None,
verify=verify,
proxy=proxy,
scope=' '.join(REQUIRED_PERMISSIONS),
tenant_id=tenant_id, # type: ignore
enc_key=enc_key # client secret
)
self.subscription_id = subscription_id
def http_request(self, **kwargs):
return super().http_request(**kwargs)
def query_list(self,
url_suffix: str,
limit: int,
filter_arguments: Optional[List[str]] = None,
filter_expression: Optional[str] = None,
next_link: Optional[str] = None) -> Dict:
"""
Used for querying when the result is a collection (list) of items, for example RiskyUsers.
filter_arguments is a list of the form ['foo eq \'bar\'] to be joined with a `' and '` separator.
"""
if next_link:
next_link = next_link.replace('%20', ' ') # OData syntax can't handle '%' character
return self.http_request(method='GET', full_url=next_link)
else:
params: Dict[str, Optional[Any]] = {'$top': limit}
if filter_expression is None and filter_arguments is not None:
filter_expression = ' and '.join(filter_arguments)
params['$filter'] = filter_expression
remove_nulls_from_dictionary(params)
# This could raise:
# {
# "error": {
# "code": "TooManyRequests",
# "message": "Too many requests.",
# "innerError": {
# "date": "2021-08-18T05:56:15",
# "request-id": "some-request-id",
# "client-request-id": "some-client-request-id"
# }
# }
# }
return self.http_request(method='GET', url_suffix=url_suffix, params=params)
def azure_ad_identity_protection_risk_detection_list_raw(self,
limit: int,
filter_expression: Optional[str] = None,
next_link: Optional[str] = None,
user_id: Optional[str] = None,
user_principal_name: Optional[str] = None,
country: Optional[str] = None) -> Dict:
filter_arguments = []
if user_id:
filter_arguments.append(f"userId eq '{user_id}'")
if user_principal_name:
filter_arguments.append(f"userPrincipalName eq '{user_principal_name}'")
if country:
filter_arguments.append(f"location/countryOrRegion eq '{country}'")
return self.query_list(url_suffix='riskDetections',
filter_arguments=filter_arguments,
limit=limit,
filter_expression=filter_expression,
next_link=next_link)
def azure_ad_identity_protection_risk_detection_list(self,
limit: int,
filter_expression: Optional[str] = None,
next_link: Optional[str] = None,
user_id: Optional[str] = None,
user_principal_name: Optional[str] = None,
country: Optional[str] = None) -> CommandResults:
raw_response = self.azure_ad_identity_protection_risk_detection_list_raw(limit=limit,
filter_expression=filter_expression,
next_link=next_link,
user_id=user_id,
user_principal_name=user_principal_name,
country=country)
return parse_list(raw_response, human_readable_title="Risks", context_path="Risks")
def azure_ad_identity_protection_risky_users_list(self,
limit: int,
filter_expression: Optional[str] = None,
next_link: Optional[str] = None,
updated_time: Optional[str] = None,
risk_level: Optional[str] = None,
risk_state: Optional[str] = None,
risk_detail: Optional[str] = None,
user_principal_name: Optional[str] = None) -> CommandResults:
filter_arguments = []
if risk_level:
filter_arguments.append(f"riskLevel eq '{risk_level}'")
if risk_state:
filter_arguments.append(f"riskState eq '{risk_state}'")
if risk_detail:
filter_arguments.append(f"riskDetail eq '{risk_level}'")
if user_principal_name:
filter_arguments.append(f"userPrincipalName eq '{user_principal_name}'")
updated_time = arg_to_datetime(updated_time) # None input to arg_to_datetime stays None
if updated_time:
filter_arguments.append(
f"riskLastUpdatedDateTime gt {updated_time.strftime(DATE_FORMAT)}") # '' wrap only required for strings
raw_response = self.query_list(
url_suffix='RiskyUsers',
filter_arguments=filter_arguments,
limit=limit,
filter_expression=filter_expression,
next_link=next_link,
)
return parse_list(raw_response, human_readable_title='Risky Users', context_path='RiskyUsers')
def azure_ad_identity_protection_risky_users_history_list(self,
limit: int,
user_id: Optional[str] = None,
filter_expression: Optional[str] = None,
next_link: Optional[str] = None) -> CommandResults:
raw_response = self.query_list(limit=limit, filter_expression=filter_expression,
next_link=next_link, url_suffix=f'RiskyUsers/{user_id}/history')
return parse_list(raw_response,
context_path="RiskyUserHistory",
human_readable_title=f'Risky user history for {user_id}')
def azure_ad_identity_protection_risky_users_confirm_compromised(self, user_ids: Union[str, List[str]]):
self.http_request(method='POST',
resp_type='text', # default json causes error, as the response is empty bytecode.
url_suffix='riskyUsers/confirmCompromised',
json_data={'userIds': argToList(user_ids)},
ok_codes=(204,))
return '✅ Confirmed successfully.' # raises exception if not successful
def azure_ad_identity_protection_risky_users_dismiss(self, user_ids: Union[str, List[str]]):
self.http_request(method='POST',
resp_type='text', # default json causes error, as the response is empty bytecode.
url_suffix='riskyUsers/dismiss',
json_data={'userIds': argToList(user_ids)},
ok_codes=(204,))
return '✅ Dismissed successfully.' # raises exception if not successful
def azure_ad_identity_protection_risk_detection_list_command(client: AADClient, **kwargs):
return client.azure_ad_identity_protection_risk_detection_list(**kwargs)
def azure_ad_identity_protection_risky_users_list_command(client: AADClient, **kwargs):
return client.azure_ad_identity_protection_risky_users_list(**kwargs)
def azure_ad_identity_protection_risky_users_history_list_command(client: AADClient, **kwargs):
return client.azure_ad_identity_protection_risky_users_history_list(**kwargs)
def azure_ad_identity_protection_risky_users_confirm_compromised_command(client: AADClient, **kwargs):
return client.azure_ad_identity_protection_risky_users_confirm_compromised(**kwargs)
def azure_ad_identity_protection_risky_users_dismiss_command(client: AADClient, **kwargs):
return client.azure_ad_identity_protection_risky_users_dismiss(**kwargs)
def detection_to_incident(detection, detection_date):
detection_id: str = detection.get('id', '')
detection_type: str = detection.get('riskEventType', '')
detection_detail: str = detection.get('riskDetail', '')
incident = {
'name': f'Azure AD:'
f' {detection_id} {detection_type} {detection_detail}',
'occurred': f'{detection_date}Z',
'rawJSON': json.dumps(detection)
}
return incident
def detections_to_incidents(detections: List[Dict[str, str]], last_fetch_datetime: str) -> \
Tuple[List[Dict[str, str]], str]:
"""
Given the detections retrieved from Azure Identity Protection, transforms their data to incidents format.
"""
incidents: List[Dict[str, str]] = []
latest_incident_time = last_fetch_datetime
for detection in detections:
detection_datetime = detection.get('detectedDateTime', '')
detection_datetime_in_azure_format = date_str_to_azure_format(detection_datetime)
incident = detection_to_incident(detection, detection_datetime_in_azure_format)
incidents.append(incident)
if datetime.strptime(detection_datetime_in_azure_format, DATE_FORMAT) > \
datetime.strptime(date_str_to_azure_format(latest_incident_time), DATE_FORMAT):
latest_incident_time = detection_datetime
return incidents, latest_incident_time
def get_last_fetch_time(last_run, params):
last_fetch = last_run.get('latest_detection_found')
if not last_fetch:
demisto.debug('[AzureADIdentityProtection] First run')
# handle first time fetch
first_fetch = f"{params.get('first_fetch') or '1 days'} ago"
default_fetch_datetime = dateparser.parse(date_string=first_fetch, date_formats=[DATE_FORMAT])
assert default_fetch_datetime is not None, f'failed parsing {first_fetch}'
last_fetch = str(default_fetch_datetime.isoformat(timespec='milliseconds')) + 'Z'
demisto.debug(f'[AzureADIdentityProtection] last_fetch: {last_fetch}')
return last_fetch
def build_filter(last_fetch, params):
start_time_enforcing_filter = f"detectedDateTime gt {last_fetch}"
user_supplied_filter = params.get('fetch_filter_expression', '')
query_filter = f'({user_supplied_filter}) and {start_time_enforcing_filter}' if user_supplied_filter \
else start_time_enforcing_filter
demisto.debug(f'[AzureADIdentityProtection] query_filter: {query_filter}')
return query_filter
def date_str_to_azure_format(date_str):
"""
Given a string representing a date in some general format, modifies the date to Azure format.
That means removing the Z at the end and adding nanoseconds if they don't exist.
Moreover, sometimes the date has too many digits for
"""
date_str = date_str[:-1] if date_str[-1].lower() == 'z' else date_str
if '.' not in date_str:
date_str = f'{date_str}.000'
else:
date_without_ns, ns = date_str.split('.')
ns = ns[:6]
date_str = f'{date_without_ns}.{ns}'
return date_str
def fetch_incidents(client: AADClient, params: Dict[str, str]):
last_run: Dict[str, str] = demisto.getLastRun()
demisto.debug(f'[AzureIdentityProtection] last run: {last_run}')
last_fetch = get_last_fetch_time(last_run, params)
query_filter = build_filter(last_fetch, params)
demisto.debug(f'[AzureIdentityProtection] last fetch is: {last_fetch}, filter is: {query_filter}')
risk_detection_list_raw: Dict = client.azure_ad_identity_protection_risk_detection_list_raw(
limit=int(params.get('max_fetch', '50')),
filter_expression=query_filter,
user_id=params.get('fetch_user_id', ''),
user_principal_name=params.get('fetch_user_principal_name', ''),
)
detections: list = risk_detection_list_raw.get('value', [])
incidents, latest_detection_time = detections_to_incidents(detections, last_fetch_datetime=last_fetch)
demisto.debug(f'[AzureIdentityProtection] Fetched {len(incidents)} incidents')
demisto.debug(f'[AzureIdentityProtection] next run latest_detection_found: {latest_detection_time}')
last_run = {
'latest_detection_found': latest_detection_time,
}
return incidents, last_run
def start_auth(client: AADClient) -> CommandResults:
result = client.start_auth('!azure-ad-auth-complete')
return CommandResults(readable_output=result)
def complete_auth(client: AADClient) -> str:
client.get_access_token() # exception on failure
return '✅ Authorization completed successfully.'
def test_connection(client: AADClient) -> str:
client.get_access_token() # exception on failure
return '✅ Success!'
def reset_auth() -> str:
set_integration_context({})
return 'Authorization was reset successfully. Run **!azure-ad-auth-start** to start the authentication process.'
def main() -> None:
params = demisto.params()
command = demisto.command()
args = demisto.args()
demisto.debug(f'Command being called is {command}')
try:
client = AADClient(
app_id=params.get('app_id', ''),
subscription_id=params.get('subscription_id', ''),
verify=not params.get('insecure', False),
proxy=params.get('proxy', False),
azure_ad_endpoint=params.get('azure_ad_endpoint', 'https://login.microsoftonline.com'),
tenant_id=params.get("tenant_id"),
client_credentials=params.get("client_credentials", False),
enc_key=(params.get('credentials') or {}).get('password')
)
# auth commands
if command == 'test-module':
return_results('The test module is not functional, run the azure-ad-auth-start command instead.')
elif command == 'azure-ad-auth-start':
return_results(start_auth(client))
elif command == 'azure-ad-auth-complete':
return_results(complete_auth(client))
elif command == 'azure-ad-auth-test':
return_results(test_connection(client))
elif command == 'azure-ad-auth-reset':
return_results(reset_auth())
# actual commands
elif command == 'azure-ad-identity-protection-risks-list':
return_results(azure_ad_identity_protection_risk_detection_list_command(client, **args))
elif command == 'azure-ad-identity-protection-risky-user-list':
return_results(azure_ad_identity_protection_risky_users_list_command(client, **args))
elif command == 'azure-ad-identity-protection-risky-user-history-list':
return_results(azure_ad_identity_protection_risky_users_history_list_command(client, **args))
elif command == 'azure-ad-identity-protection-risky-user-confirm-compromised':
return_results(azure_ad_identity_protection_risky_users_confirm_compromised_command(client, **args))
elif command == 'azure-ad-identity-protection-risky-user-dismiss':
return_results(azure_ad_identity_protection_risky_users_dismiss_command(client, **args))
elif command == 'fetch-incidents':
incidents, last_run = fetch_incidents(client, params)
demisto.incidents(incidents)
demisto.setLastRun(last_run)
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
except Exception as e:
return_error("\n".join((f'Failed to execute command "{demisto.command()}".',
f'Error:{str(e)}',
f'Traceback: {traceback.format_exc()}'
)), e)
from MicrosoftApiModule import * # noqa: E402
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 46.666667
| 128
| 0.603813
|
ba034f2f6909d14d021a668992c9fe253fec8534
| 382
|
py
|
Python
|
ckanext/archiver/tests/fixtures.py
|
vrk-kpa/ckanext-archiver
|
13472493d4ec55a6f9299bee6ba7c98b4ddbcfb1
|
[
"MIT"
] | 16
|
2016-01-11T14:42:41.000Z
|
2022-03-18T03:20:15.000Z
|
ckanext/archiver/tests/fixtures.py
|
vrk-kpa/ckanext-archiver
|
13472493d4ec55a6f9299bee6ba7c98b4ddbcfb1
|
[
"MIT"
] | 49
|
2015-03-23T14:47:37.000Z
|
2021-05-27T06:27:55.000Z
|
ckanext/archiver/tests/fixtures.py
|
vrk-kpa/ckanext-archiver
|
13472493d4ec55a6f9299bee6ba7c98b4ddbcfb1
|
[
"MIT"
] | 30
|
2015-01-08T09:04:31.000Z
|
2021-06-22T13:53:21.000Z
|
import pytest
from ckanext.archiver.tests.mock_flask_server import create_app
import threading
@pytest.fixture(scope='session', autouse=True)
def client():
app = create_app()
port = 9091
thread = threading.Thread(target=lambda: app.run(debug=True, port=port, use_reloader=False))
thread.daemon = True
thread.start()
yield "http://127.0.0.1:" + str(port)
| 23.875
| 96
| 0.712042
|
f7b33711f5b1f38f4ebff74da49afc737c4efd18
| 4,930
|
py
|
Python
|
settings/automation_settings.py
|
mjvandermeulen/rpi-automation
|
0b328cab8876929e46235482d217dc4771dfdc6a
|
[
"MIT"
] | null | null | null |
settings/automation_settings.py
|
mjvandermeulen/rpi-automation
|
0b328cab8876929e46235482d217dc4771dfdc6a
|
[
"MIT"
] | null | null | null |
settings/automation_settings.py
|
mjvandermeulen/rpi-automation
|
0b328cab8876929e46235482d217dc4771dfdc6a
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/env python3
import collections
from settings import personal_settings
AUTOMATION_PROJECT_PATH = '/home/pi/Programming/Automation/' # end with '/'
TEMPCONTROLLER_MEASUREMENTS_PATH = AUTOMATION_PROJECT_PATH + \
'measurements/temperature_controller/'
AUTOMATION_EXECUTABLES_PATH = AUTOMATION_PROJECT_PATH + \
'executables/'
### remote frequency outlets ###
################################
rfoutlet_codes = personal_settings.rfoutlet_codes
# pulse length 195 average
rfpulse_length = '195'
# includes homophones (specific form of homonym): '4', 'four', 'for', 'fore'
# NOTE every outlet command is a group, including 1..5
# @todo replace string with r'...'
# @todo: change name to rfoutlet_groups
# @todo: hierarchy noise points to fan and filter? -> you need to recursively
# find the base groups.
#
# NOTE: no spaces in the keys OR key: '"fans (noise)"' --> wrap in double quotes
# so the key is actually contains the double quotes: "fans (noise)"
# Fluent Python implemented
OutletGroup = collections.namedtuple('OutletGroup', ['regex', 'outlets'])
outlet_groups = {
'1': OutletGroup(
r'1|one|won', # '1|w?one?' adds incorrect on: problem!
['1'],
),
'2': OutletGroup(
r'2|t[wo]?o',
['2'],
),
'3': OutletGroup(
r'3|three',
['3'],
),
'4': OutletGroup(
r'4|fou?re?', # adds incorrect foure. no problem :)
['4'],
),
'5': OutletGroup(
r'5|five',
['5'],
),
'6': OutletGroup(
r'6|six|st?icks?',
['6'],
),
'7': OutletGroup(
r'7|seven',
['7'],
),
'8': OutletGroup(
r'8|eight|h?ate',
['8'],
),
'9': OutletGroup(
r'9|nine|nein',
['9'],
),
'10': OutletGroup(
r'10|t[aei]e?n',
['10'],
),
# pytest only:
# '1000': OutletGroup(
# r'10|tho?u?sa?nd?',
# ['1000'],
# ),
# 'livingroom': OutletGroup(
# r'l(?:iving)?r(?:oom)?s?',
# ['1', '2', '3'],
# ),
# ''
# 'fan': OutletGroup(
# r'fan\b', # not fans: see regex with fans.
# # so word boundary needed.
# # word boundary overlaps with next wordboundary
# # see debuggex.com, but no problem because
# # r'fan\b\b\b\b\b' works equally well.
# ['4'],
# ),
# 'redlight': OutletGroup(
# r're?d([ -_]?lights?)?',
# ['4'],
# ),
# 'guestlight': OutletGroup(
# r'gu?e?st([ -_]?li?ghts?)?',
# ['5'],
# ),
# '"fans (noise)"': OutletGroup(
# r'noise?s?|fans',
# ['7', '3'],
# ),
'fireplacelights': OutletGroup(
r'fi?re?p?l?a?c?e?(li?ghts?)?',
['6']
),
'bigtree': OutletGroup(
r'bi?gt?r?e?e?',
['7'],
),
'smalltree': OutletGroup(
r'sma?l?t?r?e?e?',
['8'],
),
'trees': OutletGroup(
r'tre?e?s?',
['7', '8'],
),
'christmas': OutletGroup(
r'(ch?ri?s?t?|x)ma?s',
['7', '8', '9'],
),
'outsidelights': OutletGroup(
r'outs?i?d?e?l?i?g?h?t?s?',
['9'],
),
'vivolights': OutletGroup(
r'vi?v?[oi]?a?n?li?g?h?t?s?',
['4'],
),
'vivo5': OutletGroup(
r'vi?v?[io]?a?n?5',
['5'],
),
# 'coffee': OutletGroup(
# r'co?ff?e?e?',
# [''],
# ),
# 'airfilter': OutletGroup(
# r'(air)?[ ]?(fi?lte?r|pu?ri?fi?e?r)',
# ['7'],
# ),
# 'dehumidifier': OutletGroup(
# r'de?hu?i?d?i?f?i?e?r?',
# ['9']
# ),
# 'officenoise': OutletGroup(
# r'o?ff?i?ce?no?i?s?e?',
# ['8', '9']
# ),
# 'humidifier': OutletGroup(
# r'hu?mi?d(ifier)?s?',
# ['7'],
# ),
# 'cooler': OutletGroup(
# r'co?o?le?r?',
# ['6'],
# ),
# 'lights': OutletGroup(
# r'li?g?h?te?s?',
# [],
# ),
# 'officelight': OutletGroup(
# # are used
# r'off?i?c?e?li?gh?t?',
# [''],
# ),
# 'basement': OutletGroup(
# r'ba?se?me?nt?',
# [''],
# )
}
# Fluent Python implemented
# declare groups_regex, used in the outlets regex below.
groups_list = [outlet_groups[key].regex for key in outlet_groups]
# groups_list now looks like:
# ['1|one|won', '2|t[wo]?o', ..., 'noise?s?']
groups_regex = '|'.join(groups_list)
# groups_regex now looks like:
# '1|one|won|2|t[wo]?o|...|noise?s?'
### END remote frequency outlets ###
####################################
Brew_Settings = collections.namedtuple(
'Brew_Settings', 'cycles brew_time pause')
coffee_settings = {
"default": Brew_Settings(2, 15*60, 15*60),
"clean": Brew_Settings(30, 20, 15*60),
"rinse": Brew_Settings(1, 3*60, 10*60),
"test": Brew_Settings(1, 1, 10)
}
coffee_default_delay = 0
| 25.025381
| 80
| 0.489047
|
91ca0de4990c541793da14b4ab88c2a5900c07e6
| 12,069
|
py
|
Python
|
src/prefect/schedules/schedules.py
|
vnsn/prefect
|
972345597975155dba9e3232bcc430d0a6258a37
|
[
"Apache-2.0"
] | 1
|
2021-05-12T12:47:12.000Z
|
2021-05-12T12:47:12.000Z
|
src/prefect/schedules/schedules.py
|
vnsn/prefect
|
972345597975155dba9e3232bcc430d0a6258a37
|
[
"Apache-2.0"
] | 7
|
2021-06-26T08:05:20.000Z
|
2022-03-26T08:05:32.000Z
|
src/prefect/schedules/schedules.py
|
vnsn/prefect
|
972345597975155dba9e3232bcc430d0a6258a37
|
[
"Apache-2.0"
] | 1
|
2021-10-16T08:33:56.000Z
|
2021-10-16T08:33:56.000Z
|
import heapq
import itertools
import operator
import warnings
from datetime import datetime, timedelta
from typing import Callable, Iterable, List, Optional, cast
import prefect.schedules.adjustments
import prefect.schedules.clocks
import prefect.schedules.filters
class Schedule:
"""
Schedules are used to generate dates for flow runs. Scheduling logic works as follows:
First off, candidate events are emitted by one or more `clocks`. Secondly, if filters were
specified, they are applied in this order: all `filters` must return True, at least one
`or_filter` must return True, then all `not_filters` must return False. Thridly, events
that pass the filters are adjusted based on the `adjustments` functions. Finally, the
resulting `datetime` is emitted.
Example:
```python
from datetime import time, timedelta
from prefect.schedules import Schedule, filters
from prefect.schedules.clocks import IntervalClock
schedule = Schedule(
# emit an event every hour
clocks=[IntervalClock(interval=timedelta(hours=1))],
# only include weekdays
filters=[filters.is_weekday],
# only include 9am and 5pm
or_filters=[
filters.between_times(time(9), time(9)),
filters.between_times(time(17), time(17))
]
)
schedule.next(4) # returns the next 4 occurences of 9am and 5pm on weekdays
```
Args:
- clocks (List[prefect.schedules.clocks.Clock]): one or more clocks that emit events
for this schedule. At least one clock is required.
- filters (List[Callable[[datetime], bool]]): a list of filter functions that will be
used to filter events. Events will only be emitted if all `filters` are True.
- or_filters (List[Callable[[datetime], bool]]): a list of filter functions that
will be used to filter events. Events will only be emitted if at least one of the
`or_filters` are True
- not_filters (List[Callable[[datetime], bool]]): a list of filter functions that
will be used to filter events. Events will only be emitted if all `not_filters` are
False
- adjustments (List[Callable[[datetime], datetime]]): a list of adjustment functions
that will be applied to dates that pass all filters. If more than one adjustment
if provided, they will be applied in sequence.
"""
def __init__(
self,
clocks: List["prefect.schedules.clocks.Clock"],
filters: List[Callable[[datetime], bool]] = None,
or_filters: List[Callable[[datetime], bool]] = None,
not_filters: List[Callable[[datetime], bool]] = None,
adjustments: List[Callable[[datetime], datetime]] = None,
):
if not isinstance(clocks, Iterable):
raise TypeError("clocks should be a list of clocks.")
self.clocks = clocks
self.filters = filters or []
self.or_filters = or_filters or []
self.not_filters = not_filters or []
self.adjustments = adjustments or []
@property
def start_date(self) -> Optional[datetime]:
return min([c.start_date for c in self.clocks if c.start_date], default=None)
@property
def end_date(self) -> Optional[datetime]:
return max([c.end_date for c in self.clocks if c.end_date], default=None)
def next(
self, n: int, after: datetime = None, return_events: bool = False
) -> List[datetime]:
"""
Retrieve the next `n` scheduled times, optionally after a specified date.
Because clocks are potentially infinite, and filters may be prohibitive, this operation
will stop after checking 10,000 events, no matter how many valid dates have been found.
Args:
- n (int): the number of dates to return
- after (datetime): an optional starting point. All returned dates will be after this
time.
- return_events (bool, optional): an optional boolean specifying whether to return
a full Clock Event or just the start_time of the associated event; defaults to
`False`
Returns:
- List[datetime]: a list of datetimes
"""
events = []
counter = 0
for event in self._get_clock_events(after=after):
counter += 1
if self._check_filters(event.start_time):
event.start_time = self._apply_adjustments(event.start_time)
events.append(event if return_events else event.start_time)
if len(events) == n or counter >= 10000:
break
return events # type: ignore
def _get_clock_events(
self, after: datetime = None
) -> Iterable["prefect.schedules.clocks.ClockEvent"]:
"""
A generator of events emitted by the schedule's clocks.
Events are sorted and unique (if two clocks emit the same date, it is only yielded once.)
Args:
- after (datetime): an optional starting point. All returned dates will be after this
time.
Returns:
- Iterable[datetime]: An iterator of dates (as a generator)
"""
clock_events = [clock.events(after=after) for clock in self.clocks]
sorted_events = heapq.merge(*clock_events)
# this next line yields items only if they differ from the previous item, which means
# this generator only yields unique events (since the input is sorted)
#
# code from `unique_justseen()` at
# https://docs.python.org/3/library/itertools.html#itertools-recipes
unique_events = map(
next, map(operator.itemgetter(1), itertools.groupby(sorted_events))
) # type: Iterable[prefect.schedules.clocks.ClockEvent]
yield from unique_events
def _check_filters(self, dt: datetime) -> bool:
"""
Check the schedule's filters:
- all `filters` must pass
- at least one of the `or_filters` must pass
- none of the `not_filters` may pass
Args:
- dt (datetime): the date to check filters against
Returns:
- bool: True if the filters pass; False otherwise
"""
# check that all `filters` pass
all_filters = all(filter_fn(dt) for filter_fn in self.filters)
# check that at least one `or_filter` passes
or_filters = (
any(filter_fn(dt) for filter_fn in self.or_filters)
if self.or_filters
else True
)
# check that no `not_filters` pass
not_filters = (
not any(filter_fn(dt) for filter_fn in self.not_filters)
if self.not_filters
else True
)
# return True if all three filter types passed
return all_filters and or_filters and not_filters
def _apply_adjustments(self, dt: datetime) -> datetime:
"""
Apply the schedule's adjustments to a date.
Args:
- dt (datetime): the date to adjust
Returns:
- datetime: the adjusted date
"""
# run date through adjustment pipeline
for adjust_fn in self.adjustments:
dt = adjust_fn(dt)
return dt
# FIXME the proper signature for this function should be:
# interval (required), start_date (optional), end_date (optional)
# but start_date is currently first to maintain compatibility with an older version of
# Prefect
def IntervalSchedule(
start_date: datetime = None, interval: timedelta = None, end_date: datetime = None
) -> Schedule:
"""
A schedule formed by adding `timedelta` increments to a start_date.
IntervalSchedules only support intervals of one minute or greater.
NOTE: If the `IntervalSchedule` start time is provided with a DST-observing timezone,
then the clock will adjust itself appropriately. Intervals greater than 24
hours will follow DST conventions, while intervals of less than 24 hours will
follow UTC intervals. For example, an hourly clock will fire every UTC hour,
even across DST boundaries. When clocks are set back, this will result in two
runs that *appear* to both be scheduled for 1am local time, even though they are
an hour apart in UTC time. For longer intervals, like a daily clock, the
interval clock will adjust for DST boundaries so that the clock-hour remains
constant. This means that a daily clock that always fires at 9am will observe
DST and continue to fire at 9am in the local time zone.
Note that this behavior is different from the `CronSchedule`.
Args:
- interval (timedelta): interval on which this clock occurs
- start_date (datetime, optional): first date of clock. If None, will be set to
"2019-01-01 00:00:00 UTC"
- end_date (datetime, optional): an optional end date for the clock
Raises:
- ValueError: if provided interval is less than one minute
"""
return Schedule(
clocks=[
prefect.schedules.clocks.IntervalClock(
interval=cast(timedelta, interval), # due to FIXME, above
start_date=start_date,
end_date=end_date,
)
]
)
def CronSchedule(
cron: str,
start_date: datetime = None,
end_date: datetime = None,
day_or: bool = None,
) -> Schedule:
"""
Cron clock.
NOTE: If the `CronSchedule's` start time is provided with a DST-observing timezone,
then the clock will adjust itself. Cron's rules for DST are based on clock times,
not intervals. This means that an hourly cron clock will fire on every new clock
hour, not every elapsed hour; for example, when clocks are set back this will result
in a two-hour pause as the clock will fire *the first time* 1am is reached and
*the first time* 2am is reached, 120 minutes later. Longer clocks, such as one
that fires at 9am every morning, will automatically adjust for DST.
Note that this behavior is different from the `IntervalSchedule`.
Args:
- cron (str): a valid cron string
- start_date (datetime, optional): an optional start date for the clock
- end_date (datetime, optional): an optional end date for the clock
- day_or (bool, optional): Control how croniter handles `day` and `day_of_week` entries.
Defaults to True, matching cron which connects those values using OR.
If the switch is set to False, the values are connected using AND. This behaves like
fcron and enables you to e.g. define a job that executes each 2nd friday of a month
by setting the days of month and the weekday.
Raises:
- ValueError: if the cron string is invalid
"""
return Schedule(
clocks=[
prefect.schedules.clocks.CronClock(
cron=cron, start_date=start_date, end_date=end_date, day_or=day_or
)
]
)
def OneTimeSchedule(start_date: datetime) -> Schedule:
"""
A schedule corresponding to a single date
NOTE: this function is deprecated and maintained only for backwards-compatibility.
"""
warnings.warn(
"The OneTimeSchedule is deprecated and will be removed from "
"Prefect. Use a Schedule with a single-date DatesClock instead.",
UserWarning,
stacklevel=2,
)
return Schedule(clocks=[prefect.schedules.clocks.DatesClock(dates=[start_date])])
def UnionSchedule(schedules: List[Schedule]) -> Schedule:
"""
A schedule formed by combining other schedules.
NOTE: this function is deprecated and maintained only for backwards-compatibility.
"""
warnings.warn(
"The UnionSchedule is deprecated and will be removed from "
"Prefect. Use a Schedule with multiple clocks instead.",
UserWarning,
stacklevel=2,
)
return Schedule(clocks=[c for s in schedules for c in s.clocks])
| 39.312704
| 97
| 0.65341
|
629ae2f112884d7214cce175f93c2b1bb50919c0
| 942
|
py
|
Python
|
django_AUS/urls.py
|
yashpatel7025/django-email-service-AUS
|
1a584c6e3c4df4264b18c05060553f9a80ea9b96
|
[
"MIT"
] | null | null | null |
django_AUS/urls.py
|
yashpatel7025/django-email-service-AUS
|
1a584c6e3c4df4264b18c05060553f9a80ea9b96
|
[
"MIT"
] | null | null | null |
django_AUS/urls.py
|
yashpatel7025/django-email-service-AUS
|
1a584c6e3c4df4264b18c05060553f9a80ea9b96
|
[
"MIT"
] | null | null | null |
"""django_AUS URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('emailapp.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 36.230769
| 77
| 0.723992
|
c1b06eabed7ee0015a1a7a363666b120fb931d4c
| 1,066
|
py
|
Python
|
tcli/cli.py
|
beddari/tcli
|
eda6b2f74201696c2b08da9eb922c0c65a5b4e71
|
[
"MIT"
] | 1
|
2017-10-14T13:41:50.000Z
|
2017-10-14T13:41:50.000Z
|
tcli/cli.py
|
beddari/tcli
|
eda6b2f74201696c2b08da9eb922c0c65a5b4e71
|
[
"MIT"
] | null | null | null |
tcli/cli.py
|
beddari/tcli
|
eda6b2f74201696c2b08da9eb922c0c65a5b4e71
|
[
"MIT"
] | 4
|
2016-12-27T02:37:55.000Z
|
2022-03-15T20:28:44.000Z
|
from pkg_resources import iter_entry_points
import click
from click_plugins import with_plugins
@with_plugins(iter_entry_points('tcli.plugins'))
@click.group()
def cli():
"""tcli is a modular command line tool wrapping and simplifying common
team related tasks."""
@cli.group()
def christmas():
"""This is the christmas module."""
@christmas.command()
@click.option('--count', default=1, help='number of greetings')
@click.argument('name')
def greet(count, name):
for x in range(count):
click.echo('Merry Christmas %s!' % name)
import tcli.utils
from subprocess import call
def add_exec_plugin(name, cmd):
@cli.command(name=name, context_settings=dict(
ignore_unknown_options=True,
))
@click.argument('cmd_args', nargs=-1, type=click.UNPROCESSED)
def exec_plugin(cmd_args):
"""Discovered exec module plugin."""
cmdline = [cmd] + list(cmd_args)
call(cmdline)
FILTER="^%s-(.*)$" % __package__
for name, cmd in tcli.utils.find_plugin_executables(FILTER):
add_exec_plugin(name, cmd)
| 23.688889
| 74
| 0.697936
|
8b14143bdccde5f674ce162a30bbfe1697e2de42
| 1,814
|
py
|
Python
|
scheduled_tasks/economy/get_retail_sales.py
|
Bonedeadskull/Stocksera
|
b36164b958dba5ae3242660088acff2af204a9af
|
[
"MIT"
] | 1
|
2022-03-06T00:27:26.000Z
|
2022-03-06T00:27:26.000Z
|
scheduled_tasks/economy/get_retail_sales.py
|
satyami3/stock
|
d3d3f65a25feb764e6f735f422251fd0ede520fc
|
[
"MIT"
] | null | null | null |
scheduled_tasks/economy/get_retail_sales.py
|
satyami3/stock
|
d3d3f65a25feb764e6f735f422251fd0ede520fc
|
[
"MIT"
] | null | null | null |
import sqlite3
import pandas as pd
conn = sqlite3.connect(r"database/database.db", check_same_thread=False)
db = conn.cursor()
def retail_sales():
"""
Get retail sales and compare it with avg monthly covid cases
"""
df = pd.read_html("https://ycharts.com/indicators/us_retail_and_food_services_sales")
combined_df = df[6][::-1].append(df[5][::-1])
combined_df["Value"] = combined_df["Value"].str.replace("B", "")
combined_df["Value"] = combined_df["Value"].astype(float)
combined_df["Percent Change"] = combined_df["Value"].shift(1)
combined_df["Percent Change"] = combined_df["Percent Change"].astype(float)
combined_df["Percent Change"] = 100 * (combined_df["Value"] - combined_df["Percent Change"]) / combined_df["Percent Change"]
combined_df["Percent Change"] = combined_df["Percent Change"].round(2)
combined_df["Date"] = combined_df["Date"].astype('datetime64[ns]').astype(str)
covid_df = pd.read_csv(r'https://covid.ourworldindata.org/data/owid-covid-data.csv')
usa_df = covid_df[covid_df["iso_code"] == "USA"]
usa_df.index = pd.to_datetime(usa_df["date"])
usa_df = usa_df.groupby(pd.Grouper(freq="M"))
usa_df = usa_df.mean()["new_cases"]
usa_df = pd.DataFrame(usa_df)
usa_df["new_cases"] = usa_df["new_cases"].round(2)
usa_df.reset_index(inplace=True)
usa_df["date"] = usa_df["date"].astype(str)
usa_df.rename(columns={"date": "Date"}, inplace=True)
combined_df = pd.merge(combined_df, usa_df, how='left', on='Date')
combined_df.fillna(0, inplace=True)
print(combined_df)
for index, row in combined_df.iterrows():
db.execute("INSERT OR IGNORE INTO retail_sales VALUES (?, ?, ?, ?)", (row[0], row[1], row[2], row[3]))
conn.commit()
if __name__ == '__main__':
retail_sales()
| 40.311111
| 128
| 0.675303
|
0f407716c8abd997627b4c09f6622c7f93caa49b
| 2,234
|
py
|
Python
|
cache.py
|
janastu/cache-server
|
a0311eb0d90a836680e7d83a5275a1b3a0508378
|
[
"MIT"
] | null | null | null |
cache.py
|
janastu/cache-server
|
a0311eb0d90a836680e7d83a5275a1b3a0508378
|
[
"MIT"
] | null | null | null |
cache.py
|
janastu/cache-server
|
a0311eb0d90a836680e7d83a5275a1b3a0508378
|
[
"MIT"
] | null | null | null |
import flask
from flask import Module, make_response
from urlparse import urlparse
from jpegtran import JPEGImage
import os
import requests
cache = Module(__name__, name="cache")
"""Currently works for jpg image only,
client request can provide args in url,
ex: http://test-cache.com/cache?url='image url'&width='int'&height='int'
"""
@cache.route('/', methods=['GET'])
def create_cache():
"""cache the image at the url passed in query params.
width and height of the thumbnail can be specified in the params.
return cached image thereafter."""
if 'width' in flask.request.args:
width = int(flask.request.args['width'])
else:
width = 200
if 'height' in flask.request.args:
height = int(flask.request.args['height'])
else:
height = 150
dirn = urlparse(flask.request.args['url']).netloc
dirn = os.path.join('cache', dirn) # Store cache under a directory
f = urlparse(flask.request.args['url']).path.split(
'/')[-1] # Name of the file
fp = os.path.join(dirn, f)
fp = os.path.abspath(fp) # Get absolute path of the file
if(os.path.isdir(dirn)):
if os.path.isfile(fp):
"""if the image is already cached, serve it"""
with open(fp, 'r') as f:
image = f.read()
else:
"""cache a low res version of the image"""
image = create_thumbnail(flask.request.args['url'], fp, width,
height)
else:
"""Create a directory with the hostname"""
os.makedirs(dirn)
image = create_thumbnail(flask.request.args['url'], fp, width, height)
response = make_response()
response.data = image
return response
def create_thumbnail(url, fp, width, height):
blob = requests.get(url).content
imgResp = requests.get(flask.request.args['url'])
image = JPEGImage(blob=blob)
"""Check if downscaling image is possible with requested width and
height.
"""
if image.width < width:
width = width/2
if image.height < height:
height = height/2
image.downscale(width, height, 90).save(fp)
with open(fp, 'r') as f:
thumbnail = f.read()
return thumbnail
| 31.027778
| 78
| 0.621755
|
ddb2dfe17ff483a64e5a0d8ce0915849dcbee4d1
| 67
|
py
|
Python
|
zerovl/models/backbones/builder.py
|
zerovl/ZeroVL
|
b48794e74fed0f80adf5fa3010481064411c4182
|
[
"MIT"
] | 14
|
2022-01-19T08:08:29.000Z
|
2022-03-10T05:55:36.000Z
|
zerovl/models/backbones/builder.py
|
zerovl/ZeroVL
|
b48794e74fed0f80adf5fa3010481064411c4182
|
[
"MIT"
] | 2
|
2022-02-25T14:35:47.000Z
|
2022-03-01T03:11:13.000Z
|
zerovl/models/backbones/builder.py
|
zerovl/ZeroVL
|
b48794e74fed0f80adf5fa3010481064411c4182
|
[
"MIT"
] | 3
|
2022-02-09T01:23:11.000Z
|
2022-02-15T11:45:30.000Z
|
from zerovl.utils import Registry
BACKBONE = Registry('backbone')
| 16.75
| 33
| 0.791045
|
d2e9608b20a6cc5d90b428a0db0784c9265f89a1
| 16,363
|
py
|
Python
|
slingen/src/isas/sse3.py
|
danielesgit/slingen
|
e7cfee7f6f2347b57eb61a077746c9309a85411c
|
[
"BSD-3-Clause"
] | 23
|
2018-03-13T07:52:26.000Z
|
2022-03-24T02:32:00.000Z
|
slingen/src/isas/sse3.py
|
danielesgit/slingen
|
e7cfee7f6f2347b57eb61a077746c9309a85411c
|
[
"BSD-3-Clause"
] | 2
|
2018-09-28T18:29:25.000Z
|
2019-02-20T13:22:19.000Z
|
slingen/src/isas/sse3.py
|
danielesgit/slingen
|
e7cfee7f6f2347b57eb61a077746c9309a85411c
|
[
"BSD-3-Clause"
] | 3
|
2018-06-13T13:51:57.000Z
|
2020-01-11T14:47:02.000Z
|
'''
Created on Apr 18, 2012
@author: danieles
'''
from sympy import sympify
from src.irbase import RValue, VecAccess, Pointer, AddressOf, Comment, sa
from src.isas.isabase import ISA, Loader, Storer, LoadReplacer
from src.isas.sse import mmLoaduPs, mmCvtssf32, mmShufflePs
from src.isas.sse2 import SSE2, mmLoaduPd, mmLoadSd, mmShufflePd, mmStoreuPd, mmAddPd, mmMulPd, mmSetzeroPd, mmUnpackhiPd, \
mmUnpackloPd, mmStoreSd, mmCvtsdf64
# from src.irbase import *
# from src.isas.isabase import *
# from src.isas.x86 import *
# from src.isas.sse import *
# from src.isas.sse2 import *
class mmHaddPd(RValue):
def __init__(self, src0, src1):
super(mmHaddPd, self).__init__()
self.srcs += [ src0, src1 ]
def computeSym(self, nameList):
src0 = self.srcs[0].computeSym(nameList)
src1 = self.srcs[1].computeSym(nameList)
return [ src0[0]+src0[1], src1[0]+src1[1] ]
def unparse(self, indent):
return indent + "_mm_hadd_pd(" + self.srcs[0].unparse("") + ", " + self.srcs[1].unparse("") + ")"
def printInst(self, indent):
return indent + "mmHaddPd( " + self.srcs[0].printInst("") + ", " + self.srcs[1].printInst("") + " )"
class mmLoaddupPd(RValue, VecAccess):
def __init__(self, pointer, zeromask=None):
super(mmLoaddupPd, self).__init__()
self.reglen = 2
self.mrmap = [(0,1)]
self.zeromask = [0]*self.reglen
if zeromask is not None: # In this case all the positions have to be zero
self.zeromask = [1]*self.reglen
self.pointer = pointer
def computeSym(self, nameList):
p = self.pointer.computeSym(nameList)
return [ sympify(p+'_0'), sympify(p+'_0') ]
def getZMask(self):
return self.zeromask
def unparse(self, indent):
return indent + "_mm_loaddup_pd(" + self.pointer.unparse("") + ")"
def printInst(self, indent):
return indent + "mmLoaddupPd( " + self.pointer.printInst("") + " )"
def __eq__(self, other):
return isinstance(other, mmLoaddupPd) and self.pointer == other.pointer
def __hash__(self):
return hash((hash("mmLoaddupPd"), self.pointer.mat, self.pointer.at))
class mmMovedupPd(RValue):
def __init__(self, src):
super(mmMovedupPd, self).__init__()
self.srcs += [ src ]
def computeSym(self, nameList):
src = self.srcs[0].computeSym(nameList)
return [ src[0], src[0] ]
def getZMask(self):
s0ZMask = self.srcs[0].getZMask()
return [ s0ZMask[0], s0ZMask[0] ]
def unparse(self, indent):
return indent + "_mm_movedup_pd(" + self.srcs[0].unparse("") + ")"
def printInst(self, indent):
return indent + "mmMovedupPd( " + self.srcs[0].printInst("") + " )"
class mmHaddPs(RValue):
def __init__(self, src0, src1):
super(mmHaddPs, self).__init__()
self.srcs += [ src0, src1 ]
def computeSym(self, nameList):
src0 = self.srcs[0].computeSym(nameList)
src1 = self.srcs[1].computeSym(nameList)
return [ src0[0]+src0[1], src0[2]+src0[3], src1[0]+src1[1], src1[2]+src1[3] ]
def unparse(self, indent):
return indent + "_mm_hadd_ps(" + self.srcs[0].unparse("") + ", " + self.srcs[1].unparse("") + ")"
def printInst(self, indent):
return indent + "mmHaddPs( " + self.srcs[0].printInst("") + ", " + self.srcs[1].printInst("") + " )"
class _Dbl2Loader(Loader):
def __init__(self):
super(_Dbl2Loader, self).__init__()
def loadMatrix(self, mParams):
src, dst = mParams['m'], mParams['nuM']
sL, sR = mParams['mL'], mParams['mR']
dL, dR = mParams['nuML'], mParams['nuMR']
M, N = mParams['M'], mParams['N']
# nuMM = mParams['nuMM']
isCompact = mParams['compact']
instructions = []
if M == 1 and N == 1:
pc = Pointer(dst[dL.of(0),dR.of(0)])
instr = mmStoreuPd(mmLoadSd(AddressOf(sa(src[sL.of(0),sR.of(0)]))), pc)
instructions += [ Comment("1x1 -> 1x2"), instr ]
elif M == 2 and N == 1:
if not isCompact:
es = [ mmLoadSd(Pointer(src[sL.of(i),sR.of(0)])) for i in range(2) ]
pc = Pointer(dst[dL.of(0),dR.of(0)])
instr = mmStoreuPd(mmShufflePd(es[0], es[1], (0,0)), pc)
instructions += [ Comment("2x1 -> 2x1 - incompact"), instr ]
return instructions
class _Dbl2BLAC(object):
def __init__(self):
super(_Dbl2BLAC, self).__init__()
def Add(self, s0Params, s1Params, dParams, opts):
nu = 2
src0, src1, dst = s0Params['nuM'], s1Params['nuM'], dParams['nuM']
s0L, s0R = s0Params['nuML'], s0Params['nuMR']
s1L, s1R = s1Params['nuML'], s1Params['nuMR']
dL, dR = dParams['nuML'], dParams['nuMR']
M, N = dParams['nuMM'], dParams['nuMN']
instructions = []
instructions += [ Comment(str(nu) + "-BLAC: " + str(M) + "x" + str(N) + " + " + str(M) + "x" + str(N)) ]
if M*N == nu:
va = mmLoaduPd(Pointer(src0[s0L.of(0),s0R.of(0)]))
vb = mmLoaduPd(Pointer(src1[s1L.of(0),s1R.of(0)]))
pc = Pointer(dst[dL.of(0),dR.of(0)])
instr = mmStoreuPd(mmAddPd(va, vb), pc)
instructions += [ instr ]
elif M == nu and N == nu:
for i in range(M):
va = mmLoaduPd(Pointer(src0[s0L.of(i),s0R.of(0)]))
vb = mmLoaduPd(Pointer(src1[s1L.of(i),s1R.of(0)]))
pc = Pointer(dst[dL.of(i),dR.of(0)])
instr = mmStoreuPd(mmAddPd(va, vb), pc)
instructions += [ instr ]
return instructions
def Kro(self, s0Params, s1Params, dParams, opts):
nu = 2
src0, src1, dst = s0Params['nuM'], s1Params['nuM'], dParams['nuM']
s0L, s0R = s0Params['nuML'], s0Params['nuMR']
s1L, s1R = s1Params['nuML'], s1Params['nuMR']
dL, dR = dParams['nuML'], dParams['nuMR']
oM, oK, oN, oP = s0Params['M'], s0Params['N'], s1Params['M'], s1Params['N']
M, K, N, P = s0Params['nuMM'], s0Params['nuMN'], s1Params['nuMM'], s1Params['nuMN']
instructions = []
instructions += [ Comment(str(nu) + "-BLAC: " + str(M) + "x" + str(K) + " Kro " + str(N) + "x" + str(P)) ]
if oM*oK*oN*oP == 1:
pc = Pointer(dst[dL.of(0),dR.of(0)])
va = mmLoaduPd(Pointer(src0[s0L.of(0),s0R.of(0)]))
vb = mmLoaduPd(Pointer(src1[s1L.of(0),s1R.of(0)]))
instr = mmStoreuPd(mmMulPd(va, vb), pc)
instructions += [ instr ]
elif oM*oK == 1:
if N*P == nu:
va = mmLoaduPd(Pointer(src0[s0L.of(0),s0R.of(0)]))
dup = mmShufflePd(va, va, (0,0))
vb = mmLoaduPd(Pointer(src1[s1L.of(0),s1R.of(0)]))
pc = Pointer(dst[dL.of(0),dR.of(0)])
instr = mmStoreuPd(mmMulPd(dup, vb), pc)
instructions += [ instr ]
else:
va = mmLoaduPd(Pointer(src0[s0L.of(0),s0R.of(0)]))
dup = mmShufflePd(va, va, (0,0))
for i in range(nu):
vb = mmLoaduPd(Pointer(src1[s1L.of(i),s1R.of(0)]))
pc = Pointer(dst[dL.of(i),dR.of(0)])
instr = mmStoreuPd(mmMulPd(dup, vb), pc)
instructions += [ instr ]
else:
if M*K == nu:
vb = mmLoaduPd(Pointer(src1[s1L.of(0),s1R.of(0)]))
dup = mmShufflePd(vb, vb, (0,0))
va = mmLoaduPd(Pointer(src0[s0L.of(0),s0R.of(0)]))
pc = Pointer(dst[dL.of(0),dR.of(0)])
instr = mmStoreuPd(mmMulPd(va, dup), pc)
instructions += [ instr ]
else:
vb = mmLoaduPd(Pointer(src1[s1L.of(0),s1R.of(0)]))
dup = mmShufflePd(vb, vb, (0,0))
for i in range(nu):
va = mmLoaduPd(Pointer(src0[s0L.of(i),s0R.of(0)]))
pc = Pointer(dst[dL.of(i),dR.of(0)])
instr = mmStoreuPd(mmMulPd(va, dup), pc)
instructions += [ instr ]
return instructions
def Mul(self, s0Params, s1Params, dParams, opts):
nu = 2
src0, src1, dst = s0Params['nuM'], s1Params['nuM'], dParams['nuM']
s0L, s0R = s0Params['nuML'], s0Params['nuMR']
s1L, s1R = s1Params['nuML'], s1Params['nuMR']
dL, dR = dParams['nuML'], dParams['nuMR']
M, K, N = s0Params['nuMM'], s0Params['nuMN'], s1Params['nuMN']
instructions = []
instructions += [ Comment(str(nu) + "-BLAC: " + str(M) + "x" + str(K) + " * " + str(K) + "x" + str(N)) ]
if M == 1:
if N == 1:
va = mmLoaduPd(Pointer(src0[s0L.of(0),s0R.of(0)]))
vb = mmLoaduPd(Pointer(src1[s1L.of(0),s1R.of(0)]))
pc = Pointer(dst[dL.of(0),dR.of(0)])
instr = mmStoreuPd(mmHaddPd(mmMulPd(va, vb), mmSetzeroPd()), pc)
# instr = mmStoreSd(mmHaddPd(mmMulPd(va, vb), mmSetzeroPd()), pc)
instructions += [ instr ]
else:
va = mmLoaduPd(Pointer(src0[s0L.of(0),s0R.of(0)]))
vb0 = mmLoaduPd(Pointer(src1[s1L.of(0),s1R.of(0)]))
vb1 = mmLoaduPd(Pointer(src1[s1L.of(1),s1R.of(0)]))
vbt0 = mmUnpackloPd(vb0, vb1)
vbt1 = mmUnpackhiPd(vb0, vb1)
pc = Pointer(dst[dL.of(0),dR.of(0)])
instr = mmStoreuPd(mmHaddPd(mmMulPd(va, vbt0), mmMulPd(va, vbt1)), pc)
instructions += [ instr ]
else:
if K == 1:
va0 = mmLoaddupPd(Pointer(src0[s0L.of(0),s0R.of(0)]))
va1 = mmLoaddupPd(Pointer(src0[s0L.of(1),s0R.of(0)]))
vb = mmLoaduPd(Pointer(src1[s1L.of(0),s1R.of(0)]))
pc0 = Pointer(dst[dL.of(0),dR.of(0)])
pc1 = Pointer(dst[dL.of(1),dR.of(0)])
instr0 = mmStoreuPd(mmMulPd(va0, vb), pc0)
instr1 = mmStoreuPd(mmMulPd(va1, vb), pc1)
instructions += [ instr0, instr1 ]
else:
if N == 1:
va0 = mmLoaduPd(Pointer(src0[s0L.of(0),s0R.of(0)]))
va1 = mmLoaduPd(Pointer(src0[s0L.of(1),s0R.of(0)]))
vb = mmLoaduPd(Pointer(src1[s1L.of(0),s1R.of(0)]))
pc = Pointer(dst[dL.of(0),dR.of(0)])
instr = mmStoreuPd(mmHaddPd(mmMulPd(va0, vb), mmMulPd(va1, vb)), pc)
instructions += [ instr ]
else:
va0 = mmLoaduPd(Pointer(src0[s0L.of(0),s0R.of(0)]))
va1 = mmLoaduPd(Pointer(src0[s0L.of(1),s0R.of(0)]))
vb0 = mmLoaduPd(Pointer(src1[s1L.of(0),s1R.of(0)]))
vb1 = mmLoaduPd(Pointer(src1[s1L.of(1),s1R.of(0)]))
vbt0 = mmUnpackloPd(vb0, vb1)
vbt1 = mmUnpackhiPd(vb0, vb1)
pc0 = Pointer(dst[dL.of(0),dR.of(0)])
pc1 = Pointer(dst[dL.of(1),dR.of(0)])
instr0 = mmStoreuPd(mmHaddPd(mmMulPd(va0, vbt0), mmMulPd(va0, vbt1)), pc0)
instr1 = mmStoreuPd(mmHaddPd(mmMulPd(va1, vbt0), mmMulPd(va1, vbt1)), pc1)
instructions += [ instr0, instr1 ]
return instructions
def T(self, sParams, dParams, opts):
nu = 2
src, dst = sParams['nuM'], dParams['nuM']
sL, sR = sParams['nuML'], sParams['nuMR']
dL, dR = dParams['nuML'], dParams['nuMR']
M, N = dParams['nuMM'], dParams['nuMN']
instructions = []
instructions += [ Comment(str(nu) + "-BLAC: (" + str(N) + "x" + str(M) + ")^T") ]
if M*N == nu:
va = mmLoaduPd(Pointer(src[sL.of(0),sR.of(0)]))
pc = Pointer(dst[dL.of(0),dR.of(0)])
instr = mmStoreuPd(va, pc)
instructions += [ instr ]
else:
va0 = mmLoaduPd(Pointer(src[sL.of(0),sR.of(0)]))
va1 = mmLoaduPd(Pointer(src[sL.of(1),sR.of(0)]))
pc0 = Pointer(dst[dL.of(0),dR.of(0)])
pc1 = Pointer(dst[dL.of(1),dR.of(0)])
vt0 = mmUnpackloPd(va0, va1)
vt1 = mmUnpackhiPd(va0, va1)
instr0 = mmStoreuPd(vt0, pc0)
instr1 = mmStoreuPd(vt1, pc1)
instructions += [ instr0, instr1 ]
return instructions
class _Dbl2Storer(Storer):
def __init__(self):
super(_Dbl2Storer, self).__init__()
def storeMatrix(self, mParams):
src, dst = mParams['nuM'], mParams['m']
sL, sR = mParams['nuML'], mParams['nuMR']
dL, dR = mParams['mL'], mParams['mR']
M, N = mParams['M'], mParams['N']
isCompact = mParams['compact']
instructions = []
if M == 1 and N == 1:
nuv = mmLoaduPd(Pointer(src[sL.of(0),sR.of(0)]))
pc = AddressOf(sa(dst[dL.of(0),dR.of(0)]))
instr = mmStoreSd(nuv, pc)
instructions += [ Comment("1x2 -> 1x1"), instr ]
elif M == 2 and N == 1:
if not isCompact:
nuv = mmLoaduPd(Pointer(src[sL.of(0),sR.of(0)]))
e = mmShufflePd(nuv, nuv, (1,1))
pcs = [ Pointer(dst[dL.of(i),dR.of(0)]) for i in range(2) ]
instr0 = mmStoreSd(nuv, pcs[0])
instr1 = mmStoreSd(e, pcs[1])
instructions += [ Comment("2x1 -> 2x1 - (Store) Incompact"), instr0, instr1 ]
return instructions
class SSE3LoadReplacer(LoadReplacer):
def __init__(self, opts):
super(SSE3LoadReplacer, self).__init__(opts)
def ScaLoad(self, src, repList): #repList is a list of tuples (line, dst)
isFlt = (self.opts['precision'] == 'float')
if src.pointer.at[1] == 0:
return mmCvtssf32(mmLoaduPs(repList[0][1].pointer)) if isFlt else mmCvtsdf64(mmLoaduPd(repList[0][1].pointer))
if isFlt:
return mmCvtssf32(mmShufflePs(mmLoaduPs(repList[0][1].pointer), mmLoaduPs(repList[0][1].pointer), (0, 0, 0, src.pointer.at[1])))
return mmCvtsdf64(mmShufflePd(mmLoaduPd(repList[0][1].pointer), mmLoaduPd(repList[0][1].pointer), (0, src.pointer.at[1])))
def mmLoaddupPd(self, src, repList):
sList = sorted(repList, key=lambda t: t[0], reverse=True)
dst = sList[0][1]
if dst.reglen == 2 and dst.mrmap == [0,1]:
at = src.pointer.getAt()
return mmShufflePd(mmLoaduPd(dst.pointer), mmLoaduPd(dst.pointer), (at[0],at[0]))
def mmLoaduPd(self, src, repList):
if len(repList) == 2 and all(map(lambda d: d[1].reglen == 2 and d[1].mrmap == [0], repList)):
dstList = [ t[1] for t in repList]
sList = sorted(dstList, key = lambda dst: dst.pointer, reverse=True) #higher pointer to lower
return mmUnpackloPd(mmLoadSd(sList[1].pointer),mmLoadSd(sList[0].pointer))
class SSE3(ISA):
def __init__(self, opts):
super(SSE3, self).__init__()
self.name = "SSE3"
sse2 = SSE2(opts)
fp_m128d = { 'type': '__m128d' }
fp_m128d['arith'] = [ mmHaddPd ]
fp_m128d['load'] = [ mmLoaddupPd ]
fp_m128d['misc'] = [ ]
fp_m128d['cvt'] = [ ]
fp_m128d['set'] = [ ]
fp_m128d['move'] = [ mmMovedupPd ]
fp_m128d['store'] = [ ]
fp_m128d['loader'] = _Dbl2Loader()
fp_m128d['nublac'] = _Dbl2BLAC()
fp_m128d['storer'] = _Dbl2Storer()
fp_m128d['loadreplacer'] = SSE3LoadReplacer(opts)
fp_m128 = { 'type': '__m128' }
fp_m128['arith'] = [ mmHaddPs ]
fp_m128['load'] = [ ]
fp_m128['misc'] = [ ]
fp_m128['cvt'] = [ ]
fp_m128['set'] = [ ]
fp_m128['move'] = [ ]
fp_m128['store'] = [ ]
self.updateType(fp_m128, sse2.types['fp'][('float',4)], ['arith', 'load', 'misc', 'cvt', 'set', 'move', 'store'])
self.updateType(fp_m128d, sse2.types['fp'][('double',2)], ['arith', 'load', 'misc', 'cvt', 'set', 'move', 'store'])
self.types = { 'fp': { ('double', 2): fp_m128d, ('float', 4): fp_m128} }
| 41.95641
| 140
| 0.522276
|
2f2511f1181a8e10ce32c78a2a41f2314ad67450
| 25,826
|
py
|
Python
|
model-code/refactored-version/mlp-tabular/helper_files/helper_data.py
|
CHNxindong/corn-ordinal-neuralnet
|
7f8a45614cb6488e9c019c5e9d3a5aee0d714e94
|
[
"MIT"
] | 1
|
2021-12-15T08:16:23.000Z
|
2021-12-15T08:16:23.000Z
|
model-code/refactored-version/mlp-tabular/helper_files/helper_data.py
|
CHNxindong/corn-ordinal-neuralnet
|
7f8a45614cb6488e9c019c5e9d3a5aee0d714e94
|
[
"MIT"
] | null | null | null |
model-code/refactored-version/mlp-tabular/helper_files/helper_data.py
|
CHNxindong/corn-ordinal-neuralnet
|
7f8a45614cb6488e9c019c5e9d3a5aee0d714e94
|
[
"MIT"
] | null | null | null |
import random
import pandas as pd
import torch
from torch.utils.data import sampler
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data import SubsetRandomSampler
from torchvision import transforms
from torchvision import datasets
def label_to_levels(label, num_classes, dtype=torch.float32):
"""Converts integer class label to extended binary label vector
Parameters
----------
label : int
Class label to be converted into a extended
binary vector. Should be smaller than num_classes-1.
num_classes : int
The number of class clabels in the dataset. Assumes
class labels start at 0. Determines the size of the
output vector.
dtype : torch data type (default=torch.float32)
Data type of the torch output vector for the
extended binary labels.
Returns
----------
levels : torch.tensor, shape=(num_classes-1,)
Extended binary label vector. Type is determined
by the `dtype` parameter.
Examples
----------
>>> label_to_levels(0, num_classes=5)
tensor([0., 0., 0., 0.])
>>> label_to_levels(1, num_classes=5)
tensor([1., 0., 0., 0.])
>>> label_to_levels(3, num_classes=5)
tensor([1., 1., 1., 0.])
>>> label_to_levels(4, num_classes=5)
tensor([1., 1., 1., 1.])
"""
if not label <= num_classes-1:
raise ValueError('Class label must be smaller or '
'equal to %d (num_classes-1). Got %d.'
% (num_classes-1, label))
if isinstance(label, torch.Tensor):
int_label = label.item()
else:
int_label = label
levels = [1]*int_label + [0]*(num_classes - 1 - int_label)
levels = torch.tensor(levels, dtype=dtype)
return levels
def levels_from_labelbatch(labels, num_classes, dtype=torch.float32):
"""
Converts a list of integer class label to extended binary label vectors
Parameters
----------
labels : list or 1D orch.tensor, shape=(num_labels,)
A list or 1D torch.tensor with integer class labels
to be converted into extended binary label vectors.
num_classes : int
The number of class clabels in the dataset. Assumes
class labels start at 0. Determines the size of the
output vector.
dtype : torch data type (default=torch.float32)
Data type of the torch output vector for the
extended binary labels.
Returns
----------
levels : torch.tensor, shape=(num_labels, num_classes-1)
Examples
----------
>>> levels_from_labelbatch(labels=[2, 1, 4], num_classes=5)
tensor([[1., 1., 0., 0.],
[1., 0., 0., 0.],
[1., 1., 1., 1.]])
"""
levels = []
for label in labels:
levels_from_label = label_to_levels(
label=label, num_classes=num_classes, dtype=dtype)
levels.append(levels_from_label)
levels = torch.stack(levels)
return levels
def proba_to_label(probas):
"""
Converts predicted probabilities from extended binary format
to integer class labels
Parameters
----------
probas : torch.tensor, shape(n_examples, n_labels)
Torch tensor consisting of probabilities returned by CORAL model.
Examples
----------
>>> # 3 training examples, 6 classes
>>> probas = torch.tensor([[0.934, 0.861, 0.323, 0.492, 0.295],
... [0.496, 0.485, 0.267, 0.124, 0.058],
... [0.985, 0.967, 0.920, 0.819, 0.506]])
>>> proba_to_label(probas)
tensor([2, 0, 5])
"""
predict_levels = probas > 0.5
predicted_labels = torch.sum(predict_levels, dim=1)
return predicted_labels
class BalancedBatchSampler(torch.utils.data.sampler.Sampler):
# adopted from https://github.com/galatolofederico/pytorch-balanced-batch/blob/master/sampler.py
def __init__(self, dataset, labels=None):
self.labels = labels
self.dataset = dict()
self.balanced_max = 0
# Save all the indices for all the classes
for idx in range(0, len(dataset)):
label = self._get_label(dataset, idx)
if label not in self.dataset:
self.dataset[label] = list()
self.dataset[label].append(idx)
self.balanced_max = len(self.dataset[label]) \
if len(self.dataset[label]) > self.balanced_max else self.balanced_max
# Oversample the classes with fewer elements than the max
for label in self.dataset:
while len(self.dataset[label]) < self.balanced_max:
self.dataset[label].append(random.choice(self.dataset[label]))
self.keys = list(self.dataset.keys())
self.currentkey = 0
self.indices = [-1]*len(self.keys)
def __iter__(self):
while self.indices[self.currentkey] < self.balanced_max - 1:
self.indices[self.currentkey] += 1
yield self.dataset[self.keys[self.currentkey]][self.indices[self.currentkey]]
self.currentkey = (self.currentkey + 1) % len(self.keys)
self.indices = [-1]*len(self.keys)
def _get_label(self, dataset, idx, labels = None):
if self.labels is not None:
return self.labels[idx].item()
else:
# Trying guessing
dataset_type = type(dataset)
if dataset_type is torchvision.datasets.MNIST:
return dataset.train_labels[idx].item()
elif dataset_type is torchvision.datasets.ImageFolder:
return dataset.imgs[idx][1]
else:
raise Exception("You should pass the tensor of labels to the constructor as second argument")
def __len__(self):
return self.balanced_max*len(self.keys)
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Parameters:
------------
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
------------
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
def get_dataloaders_mnist(batch_size, num_workers=0,
validation_fraction=None,
train_transforms=None,
test_transforms=None):
if train_transforms is None:
train_transforms = transforms.ToTensor()
if test_transforms is None:
test_transforms = transforms.ToTensor()
train_dataset = datasets.MNIST(root='data',
train=True,
transform=train_transforms,
download=True)
valid_dataset = datasets.MNIST(root='data',
train=True,
transform=test_transforms)
test_dataset = datasets.MNIST(root='data',
train=False,
transform=test_transforms)
if validation_fraction is not None:
num = int(validation_fraction * 60000)
train_indices = torch.arange(0, 60000 - num)
valid_indices = torch.arange(60000 - num, 60000)
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(valid_indices)
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=batch_size,
num_workers=num_workers,
sampler=valid_sampler)
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
sampler=train_sampler)
else:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False)
if validation_fraction is None:
return train_loader, test_loader
else:
return train_loader, valid_loader, test_loader
def get_dataloaders_cifar10(batch_size, num_workers=0,
validation_fraction=None,
train_transforms=None,
test_transforms=None):
if train_transforms is None:
train_transforms = transforms.ToTensor()
if test_transforms is None:
test_transforms = transforms.ToTensor()
train_dataset = datasets.CIFAR10(root='data',
train=True,
transform=train_transforms,
download=True)
valid_dataset = datasets.CIFAR10(root='data',
train=True,
transform=test_transforms)
test_dataset = datasets.CIFAR10(root='data',
train=False,
transform=test_transforms)
if validation_fraction is not None:
num = int(validation_fraction * 50000)
train_indices = torch.arange(0, 50000 - num)
valid_indices = torch.arange(50000 - num, 50000)
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(valid_indices)
valid_loader = DataLoader(dataset=valid_dataset,
batch_size=batch_size,
num_workers=num_workers,
sampler=valid_sampler)
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
sampler=train_sampler)
else:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=False)
if validation_fraction is None:
return train_loader, test_loader
else:
return train_loader, valid_loader, test_loader
######################################################################################
class HotspotDataset_v1(Dataset):
def __init__(self, csv_path):
df = pd.read_csv(csv_path)
self.y = torch.from_numpy(df['ddG'].values).to(torch.int64)
df = df.drop('ddG', axis=1)
self.features = torch.from_numpy(df.values).to(torch.float32)
def __getitem__(self, index):
features = self.features[index]
label = self.y[index]
return features, label
def __len__(self):
return self.y.shape[0]
def get_dataloaders_hotspot_v1(batch_size, train_csv_path, test_csv_path, balanced=False, num_workers=0):
train_dataset = HotspotDataset_v1(csv_path=train_csv_path)
test_dataset = HotspotDataset_v1(csv_path=test_csv_path)
if balanced:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=False,
sampler=BalancedBatchSampler(train_dataset, labels=train_dataset.y),
num_workers=num_workers)
else:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=True,
num_workers=num_workers)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
drop_last=False,
shuffle=False,
num_workers=num_workers)
return train_loader, test_loader
class HotspotDataset_v2_2class(Dataset):
def __init__(self, csv_path):
feature_list = ['avg bond number', 'Hbond',
'Hphob', 'consurf', "B' side chain", "hotspot ratio"]
df = pd.read_csv(csv_path)
self.y = torch.from_numpy(df['2-class'].values).to(torch.int64)
self.features = torch.from_numpy(df[feature_list].values).to(torch.float32)
def __getitem__(self, index):
features = self.features[index]
label = self.y[index]
return features, label
def __len__(self):
return self.y.shape[0]
def get_dataloaders_hotspot_v2(batch_size, train_csv_path, test_csv_path, balanced=False, num_workers=0, num_classes=2):
if num_classes == 2:
train_dataset = HotspotDataset_v2_2class(csv_path=train_csv_path)
test_dataset = HotspotDataset_v2_2class(csv_path=test_csv_path)
elif num_classes == 3:
raise NotImplementedError('Not implemented yet')
else:
raise ValueError('num_classes option invalid')
if balanced:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=False,
sampler=BalancedBatchSampler(train_dataset, labels=train_dataset.y),
num_workers=num_workers)
else:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=True,
num_workers=num_workers)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
drop_last=False,
shuffle=False,
num_workers=num_workers)
return train_loader, test_loader
#############################################
class HotspotDataset_v3_2class(Dataset):
def __init__(self, csv_path):
feature_list = ['avg bond number', 'Hbond',
'Hphob', 'consurf', "B' side chain", "hotspot ratio"]
df = pd.read_csv(csv_path)
self.y = torch.from_numpy(df['2-class'].values).to(torch.int64)
self.features = torch.from_numpy(df[feature_list].values).to(torch.float32)
## add One-hot encoded amino acids
codes = ['A', 'R', 'N', 'D', 'C', 'E', 'Q', 'G', 'H',
'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
code_to_int = {c:i for i,c in enumerate(codes)}
df['residue'] = df['residue'].map(code_to_int)
tensor = torch.from_numpy(df['residue'].values)
onehot = torch.nn.functional.one_hot(tensor).to(torch.float32)
self.features = torch.cat((self.features, onehot), dim=1)
def __getitem__(self, index):
features = self.features[index]
label = self.y[index]
return features, label
def __len__(self):
return self.y.shape[0]
def get_dataloaders_hotspot_v3(batch_size, train_csv_path, test_csv_path, balanced=False, num_workers=0, num_classes=2):
if num_classes == 2:
train_dataset = HotspotDataset_v3_2class(csv_path=train_csv_path)
test_dataset = HotspotDataset_v3_2class(csv_path=test_csv_path)
elif num_classes == 3:
raise NotImplementedError('Not implemented yet')
else:
raise ValueError('num_classes option invalid')
if balanced:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=False,
sampler=BalancedBatchSampler(train_dataset, labels=train_dataset.y),
num_workers=num_workers)
else:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=True,
num_workers=num_workers)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
drop_last=False,
shuffle=False,
num_workers=num_workers)
return train_loader, test_loader
#############################################
class HotspotDataset_v3_2_2class(Dataset):
def __init__(self, csv_path):
feature_list = ['avg bond number', 'Hbond',
'Hphob', 'consurf', "B' side chain"]
df = pd.read_csv(csv_path)
self.y = torch.from_numpy(df['2-class'].values).to(torch.int64)
self.features = torch.from_numpy(df[feature_list].values).to(torch.float32)
## add One-hot encoded amino acids
codes = ['A', 'R', 'N', 'D', 'C', 'E', 'Q', 'G', 'H',
'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
code_to_int = {c:i for i,c in enumerate(codes)}
df['residue'] = df['residue'].map(code_to_int)
tensor = torch.from_numpy(df['residue'].values)
onehot = torch.nn.functional.one_hot(tensor).to(torch.float32)
self.features = torch.cat((self.features, onehot), dim=1)
def __getitem__(self, index):
features = self.features[index]
label = self.y[index]
return features, label
def __len__(self):
return self.y.shape[0]
def get_dataloaders_hotspot_v3_2(batch_size, train_csv_path, test_csv_path, balanced=False, num_workers=0, num_classes=2):
if num_classes == 2:
train_dataset = HotspotDataset_v3_2_2class(csv_path=train_csv_path)
test_dataset = HotspotDataset_v3_2_2class(csv_path=test_csv_path)
elif num_classes == 3:
raise NotImplementedError('Not implemented yet')
else:
raise ValueError('num_classes option invalid')
if balanced:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=False,
sampler=BalancedBatchSampler(train_dataset, labels=train_dataset.y),
num_workers=num_workers)
else:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=True,
num_workers=num_workers)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
drop_last=False,
shuffle=False,
num_workers=num_workers)
return train_loader, test_loader
#############################################
class HotspotDataset_v4_2class(Dataset):
def __init__(self, csv_path):
feature_list = ['avg bond number', 'Hbond',
'Hphob', 'consurf', "B' side chain", "hotspot ratio"]
df = pd.read_csv(csv_path)
self.y = torch.from_numpy(df['2-class'].values).to(torch.int64)
self.features = torch.from_numpy(df[feature_list].values).to(torch.float32)
# convert aa char to int
codes = ['A', 'R', 'N', 'D', 'C', 'E', 'Q', 'G', 'H',
'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
code_to_int = {c:i for i,c in enumerate(codes)}
self.residues = df['residue'].map(code_to_int)
def __getitem__(self, index):
features = self.features[index]
residue = self.residues[index]
label = self.y[index]
return (features, residue), label
def __len__(self):
return self.y.shape[0]
def get_dataloaders_hotspot_v4(batch_size, train_csv_path, test_csv_path, balanced=False, num_workers=0, num_classes=2):
if num_classes == 2:
train_dataset = HotspotDataset_v4_2class(csv_path=train_csv_path)
test_dataset = HotspotDataset_v4_2class(csv_path=test_csv_path)
elif num_classes == 3:
raise NotImplementedError('Not implemented yet')
else:
raise ValueError('num_classes option invalid')
if balanced:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=False,
sampler=BalancedBatchSampler(train_dataset, labels=train_dataset.y),
num_workers=num_workers)
else:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=True,
num_workers=num_workers)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
drop_last=False,
shuffle=False,
num_workers=num_workers)
return train_loader, test_loader
#############################################
class HotspotDataset_v4_2_2class(Dataset):
def __init__(self, csv_path):
feature_list = ['avg bond number', 'Hbond',
'Hphob', 'consurf', "B' side chain"]
df = pd.read_csv(csv_path)
self.y = torch.from_numpy(df['2-class'].values).to(torch.int64)
self.features = torch.from_numpy(df[feature_list].values).to(torch.float32)
# convert aa char to int
codes = ['A', 'R', 'N', 'D', 'C', 'E', 'Q', 'G', 'H',
'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
code_to_int = {c:i for i,c in enumerate(codes)}
self.residues = df['residue'].map(code_to_int)
def __getitem__(self, index):
features = self.features[index]
residue = self.residues[index]
label = self.y[index]
return (features, residue), label
def __len__(self):
return self.y.shape[0]
class HotspotDataset_v4_2_3class(Dataset):
def __init__(self, csv_path):
feature_list = ['avg bond number', 'Hbond',
'Hphob', 'consurf', "B' side chain"]
df = pd.read_csv(csv_path)
self.y = torch.from_numpy(df['3-class'].values).to(torch.int64)
self.features = torch.from_numpy(df[feature_list].values).to(torch.float32)
# convert aa char to int
codes = ['A', 'R', 'N', 'D', 'C', 'E', 'Q', 'G', 'H',
'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
code_to_int = {c:i for i,c in enumerate(codes)}
self.residues = df['residue'].map(code_to_int)
def __getitem__(self, index):
features = self.features[index]
residue = self.residues[index]
label = self.y[index]
return (features, residue), label
def __len__(self):
return self.y.shape[0]
def get_dataloaders_hotspot_v4_2(batch_size, train_csv_path, test_csv_path, balanced=False, num_workers=0, num_classes=2):
if num_classes == 2:
train_dataset = HotspotDataset_v4_2_2class(csv_path=train_csv_path)
test_dataset = HotspotDataset_v4_2_2class(csv_path=test_csv_path)
elif num_classes == 3:
train_dataset = HotspotDataset_v4_2_3class(csv_path=train_csv_path)
test_dataset = HotspotDataset_v4_2_3class(csv_path=test_csv_path)
else:
raise ValueError('num_classes option invalid')
if balanced:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=False,
sampler=BalancedBatchSampler(train_dataset, labels=train_dataset.y),
num_workers=num_workers)
else:
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=True,
num_workers=num_workers)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
drop_last=False,
shuffle=False,
num_workers=num_workers)
return train_loader, test_loader
| 35.969359
| 122
| 0.54511
|
3ff2b811b6ad6ad8eea82f3e6a1ce556dfa8a7c1
| 5,014
|
py
|
Python
|
test/benchmark/basic_15q.py
|
jakelishman/qiskit-aer
|
7512ecede820e0d2bc7ad7b6704bcf06a861ca3a
|
[
"Apache-2.0"
] | 313
|
2018-12-19T09:19:12.000Z
|
2022-03-21T18:15:41.000Z
|
test/benchmark/basic_15q.py
|
jakelishman/qiskit-aer
|
7512ecede820e0d2bc7ad7b6704bcf06a861ca3a
|
[
"Apache-2.0"
] | 933
|
2018-12-21T02:56:49.000Z
|
2022-03-30T01:19:54.000Z
|
test/benchmark/basic_15q.py
|
chriseclectic/qiskit-aer
|
61b028b7ccd1d6e96c8de48a10648c0bc3c07ff9
|
[
"Apache-2.0"
] | 313
|
2018-12-19T14:52:55.000Z
|
2022-02-28T20:20:14.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Basic Circuit Benchmarking with 15 qubits
"""
from qiskit.circuit.library import IntegerComparator, WeightedAdder, QuadraticForm
from benchmark.simulator_benchmark import SimulatorBenchmarkSuite
from benchmark.basic import BasicSimulatorBenchmarkSuite
DEFAULT_QUBITS = [ 15 ]
DEFAULT_RUNTIME = [
SimulatorBenchmarkSuite.RUNTIME_STATEVECTOR_CPU,
SimulatorBenchmarkSuite.RUNTIME_MPS_CPU,
SimulatorBenchmarkSuite.RUNTIME_STATEVECTOR_GPU
]
DEFAULT_MEASUREMENT_METHODS = [
SimulatorBenchmarkSuite.MEASUREMENT_SAMPLING
]
DEFAULT_MEASUREMENT_COUNTS = SimulatorBenchmarkSuite.DEFAULT_MEASUREMENT_COUNTS
DEFAULT_NOISE_MODELS = [
SimulatorBenchmarkSuite.NOISE_IDEAL
]
class ArithmeticCircuits(BasicSimulatorBenchmarkSuite):
def __init__(self,
apps = {
'integer_comparator': 10,
'weighted_adder': 1,
'quadratic_form': 10
},
qubits = DEFAULT_QUBITS,
runtime_names = DEFAULT_RUNTIME,
measures = DEFAULT_MEASUREMENT_METHODS,
measure_counts = DEFAULT_MEASUREMENT_COUNTS,
noise_model_names = DEFAULT_NOISE_MODELS):
super().__init__('arithmetic_circuits',
apps,
qubits=qubits,
runtime_names=runtime_names,
measures=measures,
measure_counts=measure_counts,
noise_model_names=noise_model_names)
class BasicChangeCircuits(BasicSimulatorBenchmarkSuite):
def __init__(self,
apps = {'qft':1 },
qubits = DEFAULT_QUBITS,
runtime_names = DEFAULT_RUNTIME,
measures = DEFAULT_MEASUREMENT_METHODS,
measure_counts = DEFAULT_MEASUREMENT_COUNTS,
noise_model_names = DEFAULT_NOISE_MODELS):
super().__init__('basic_change_circuits',
apps,
qubits=qubits,
runtime_names=runtime_names,
measures=measures,
measure_counts=measure_counts,
noise_model_names=noise_model_names)
class NLocalCircuits(BasicSimulatorBenchmarkSuite):
def __init__(self,
apps = {
'real_amplitudes': 10,
'real_amplitudes_linear': 10,
'efficient_su2': 10,
'efficient_su2_linear': 10,
#'excitation_preserving': 10,
#'excitation_preserving_linear': 10
},
qubits = DEFAULT_QUBITS,
runtime_names = DEFAULT_RUNTIME,
measures = DEFAULT_MEASUREMENT_METHODS,
measure_counts = DEFAULT_MEASUREMENT_COUNTS,
noise_model_names = DEFAULT_NOISE_MODELS):
super().__init__('n_local_circuits',
apps,
qubits=qubits,
runtime_names=runtime_names,
measures=measures,
measure_counts=measure_counts,
noise_model_names=noise_model_names)
class ParticularQuantumCircuits(BasicSimulatorBenchmarkSuite):
def __init__(self,
apps = {
'fourier_checking': 10,
'graph_state': 10,
'hidden_linear_function': 10,
'iqp': 10,
'quantum_volume': 1,
'phase_estimation': 1 },
qubits = DEFAULT_QUBITS,
runtime_names = DEFAULT_RUNTIME,
measures = DEFAULT_MEASUREMENT_METHODS,
measure_counts = DEFAULT_MEASUREMENT_COUNTS,
noise_model_names=DEFAULT_NOISE_MODELS):
super().__init__('particular_quantum_circuits',
apps, qubits=qubits,
runtime_names=runtime_names,
measures=measures,
measure_counts=measure_counts,
noise_model_names=noise_model_names)
if __name__ == "__main__":
benrhmarks = [ ArithmeticCircuits(), BasicChangeCircuits(), NLocalCircuits(), ParticularQuantumCircuits() ]
for benrhmark in benrhmarks:
benrhmark.run_manual()
| 39.480315
| 111
| 0.581572
|
68add56d8a0d61c0a2415b6142bf88e0d559ee57
| 5,557
|
py
|
Python
|
tests/test_all_sum_reduce.py
|
BuildJet/distdl
|
28b0dcf2c0a762de924cc310398a2eab9c35297f
|
[
"BSD-2-Clause"
] | 25
|
2020-06-25T21:11:55.000Z
|
2022-03-24T04:56:23.000Z
|
tests/test_all_sum_reduce.py
|
BuildJet/distdl
|
28b0dcf2c0a762de924cc310398a2eab9c35297f
|
[
"BSD-2-Clause"
] | 97
|
2020-06-08T17:09:59.000Z
|
2022-03-26T00:47:11.000Z
|
tests/test_all_sum_reduce.py
|
BuildJet/distdl
|
28b0dcf2c0a762de924cc310398a2eab9c35297f
|
[
"BSD-2-Clause"
] | 8
|
2020-06-08T17:00:54.000Z
|
2022-03-20T20:20:24.000Z
|
import os
import numpy as np
import pytest
from adjoint_test import check_adjoint_test_tight
use_cuda = 'USE_CUDA' in os.environ
adjoint_parametrizations = []
# Main functionality
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 12), [2, 2, 3], # P_x_ranks, P_x_topo
[3, 4], # x_global_shape
tuple(), # axes_reduce
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-3D-0D_reduction",
marks=[pytest.mark.mpi(min_size=12)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 12), [2, 2, 3], # P_x_ranks, P_x_topo
[3, 4], # x_global_shape
(0, ), # axes_reduce
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-3D-1D_reduction",
marks=[pytest.mark.mpi(min_size=12)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 12), [2, 2, 3], # P_x_ranks, P_x_topo
[3, 4], # x_global_shape
(1, ), # axes_reduce
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-3D-1D_reduction",
marks=[pytest.mark.mpi(min_size=12)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 12), [2, 2, 3], # P_x_ranks, P_x_topo
[3, 4], # x_global_shape
(2, ), # axes_reduce
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-3D-1D_reduction",
marks=[pytest.mark.mpi(min_size=12)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 12), [2, 2, 3], # P_x_ranks, P_x_topo
[3, 4], # x_global_shape
(0, 1), # axes_reduce
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-3D-2D_reduction",
marks=[pytest.mark.mpi(min_size=12)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 12), [2, 2, 3], # P_x_ranks, P_x_topo
[3, 4], # x_global_shape
(0, 2), # axes_reduce
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-3D-2D_reduction",
marks=[pytest.mark.mpi(min_size=12)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 12), [2, 2, 3], # P_x_ranks, P_x_topo
[3, 4], # x_global_shape
(1, 2), # axes_reduce
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-3D-2D_reduction",
marks=[pytest.mark.mpi(min_size=12)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 12), [2, 2, 3], # P_x_ranks, P_x_topo
[3, 4], # x_global_shape
(0, 1, 2), # axes_reduce
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-3D-3D_reduction",
marks=[pytest.mark.mpi(min_size=12)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 12), [12], # P_x_ranks, P_x_topo
[30, 344], # x_global_shape
(0, ), # axes_reduce
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-mock_weight_reduction",
marks=[pytest.mark.mpi(min_size=12)]
)
)
# For example of indirect, see https://stackoverflow.com/a/28570677
@pytest.mark.parametrize("P_x_ranks, P_x_shape,"
"x_global_shape,"
"axes_reduce,"
"comm_split_fixture",
adjoint_parametrizations,
indirect=["comm_split_fixture"])
def test_all_sum_reduce_adjoint(barrier_fence_fixture,
comm_split_fixture,
P_x_ranks, P_x_shape,
x_global_shape,
axes_reduce):
import numpy as np
import torch
from distdl.backends.mpi.partition import MPIPartition
from distdl.nn.all_sum_reduce import AllSumReduce
from distdl.utilities.torch import zero_volume_tensor
device = torch.device('cuda' if use_cuda else 'cpu')
# Isolate the minimum needed ranks
base_comm, active = comm_split_fixture
if not active:
return
P_world = MPIPartition(base_comm)
# Create the partitions
P_x_base = P_world.create_partition_inclusive(P_x_ranks)
P_x = P_x_base.create_cartesian_topology_partition(P_x_shape)
# have different shape. Then, the output size will also be different, which
# we will have to get from `y` itself.
x_local_shape = np.asarray(x_global_shape)
layer = AllSumReduce(P_x, axes_reduce)
layer = layer.to(device)
x = zero_volume_tensor(device=device)
if P_x.active:
x = 10*torch.ones(*x_local_shape, device=device)
x.requires_grad = True
dy = zero_volume_tensor(device=device)
if P_x.active:
# Adjoint Input
dy = 0.1*torch.ones(*x_local_shape, device=device)
# y = F @ x
y = layer(x)
# dx = F* @ dy
y.backward(dy)
dx = x.grad
x = x.detach()
dx = dx.detach()
dy = dy.detach()
y = y.detach()
reduced_entry_value = 1
for k in range(len(P_x_shape)):
if k in axes_reduce:
reduced_entry_value *= P_x_shape[k]
assert(torch.all(y == 10*reduced_entry_value))
assert(torch.all(dx == 0.1*reduced_entry_value))
check_adjoint_test_tight(P_world, x, dx, y, dy)
P_world.deactivate()
P_x_base.deactivate()
P_x.deactivate()
| 30.037838
| 80
| 0.605363
|
98b372f14863a8d3b85948de96dbca0de95569a1
| 137
|
py
|
Python
|
message_passing_nn/fixtures/filenames.py
|
kovanostra/message-passing-nn
|
6617a4753173c8fffc60140b9d8d0f497b33aed4
|
[
"MIT"
] | 12
|
2020-06-14T03:00:16.000Z
|
2022-01-05T09:51:07.000Z
|
message_passing_nn/fixtures/filenames.py
|
kovanostra/message-passing-nn
|
6617a4753173c8fffc60140b9d8d0f497b33aed4
|
[
"MIT"
] | 1
|
2020-12-13T10:37:03.000Z
|
2020-12-13T10:37:03.000Z
|
message_passing_nn/fixtures/filenames.py
|
kovanostra/message-passing-nn
|
6617a4753173c8fffc60140b9d8d0f497b33aed4
|
[
"MIT"
] | 1
|
2020-11-21T13:04:56.000Z
|
2020-11-21T13:04:56.000Z
|
RESULTS_CSV = 'results.csv'
DISTANCE_MAPS = 'distance_maps.pickle'
MODEL_STATE_DICTIONARY = 'model_state_dictionary.pth'
EPOCH = "Epoch"
| 27.4
| 53
| 0.80292
|
59acace20d0543df7ab156a30f0da2255f5cae51
| 819
|
py
|
Python
|
app/core/tests/test_commands.py
|
sushan531/recipe-rest-api
|
49a290efd0613af63ef1cdc1be924f902f0a743f
|
[
"MIT"
] | null | null | null |
app/core/tests/test_commands.py
|
sushan531/recipe-rest-api
|
49a290efd0613af63ef1cdc1be924f902f0a743f
|
[
"MIT"
] | 12
|
2020-02-12T01:23:36.000Z
|
2022-03-11T23:56:28.000Z
|
app/core/tests/test_commands.py
|
sushan531/recipe-rest-api
|
49a290efd0613af63ef1cdc1be924f902f0a743f
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTest(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db ins available"""
with patch("django.db.utils.ConnectionHandler.__getitem__") as gi:
gi.return_value = True
call_command("wait_for_db")
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch("django.db.utils.ConnectionHandler.__getitem__") as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command("wait_for_db")
self.assertEqual(gi.call_count, 6)
| 35.608696
| 74
| 0.678877
|
94ac85a017530b7961f22a99bca50682b4ec9b6b
| 63
|
py
|
Python
|
filters.py
|
agnethesoraa/placepuppy
|
2047a9cf364f21038234bf802ce367d791679f9c
|
[
"MIT"
] | 2
|
2015-03-22T20:33:44.000Z
|
2015-03-22T20:33:53.000Z
|
filters.py
|
agnethesoraa/placepuppy
|
2047a9cf364f21038234bf802ce367d791679f9c
|
[
"MIT"
] | null | null | null |
filters.py
|
agnethesoraa/placepuppy
|
2047a9cf364f21038234bf802ce367d791679f9c
|
[
"MIT"
] | null | null | null |
def greyscale(im):
return im.convert('L').convert('RGB')
| 12.6
| 41
| 0.634921
|
e67e56f3f3c76705661f441c85ce10c28d447d49
| 253
|
py
|
Python
|
05_debugging/solutions/bug_04.py
|
ASU-CompMethodsPhysics-PHY494/PHY494-resources-2020
|
20e08c20995eab567063b1845487e84c0e690e96
|
[
"CC-BY-4.0"
] | null | null | null |
05_debugging/solutions/bug_04.py
|
ASU-CompMethodsPhysics-PHY494/PHY494-resources-2020
|
20e08c20995eab567063b1845487e84c0e690e96
|
[
"CC-BY-4.0"
] | null | null | null |
05_debugging/solutions/bug_04.py
|
ASU-CompMethodsPhysics-PHY494/PHY494-resources-2020
|
20e08c20995eab567063b1845487e84c0e690e96
|
[
"CC-BY-4.0"
] | null | null | null |
# bug 4
# https://asu-compmethodsphysics-phy494.github.io/ASU-PHY494/2019/02/05/05_Debugging/#activity-fix-as-many-bugs-as-possible
# Define the sinc-function sinc(x) = \sin(x)/x:
import math
def sinc(x):
return math.sin(x)/x
print(sinc(3.145))
| 19.461538
| 123
| 0.715415
|
7e1deb20fbdb7babdd6ad6d6bcd83c10e3bffa1e
| 27,909
|
py
|
Python
|
models/stylegan_networks.py
|
rjean/contrastive-unpaired-translation
|
40bff6988345c27651c7087b622c4d135ca62dcc
|
[
"BSD-3-Clause"
] | null | null | null |
models/stylegan_networks.py
|
rjean/contrastive-unpaired-translation
|
40bff6988345c27651c7087b622c4d135ca62dcc
|
[
"BSD-3-Clause"
] | null | null | null |
models/stylegan_networks.py
|
rjean/contrastive-unpaired-translation
|
40bff6988345c27651c7087b622c4d135ca62dcc
|
[
"BSD-3-Clause"
] | null | null | null |
"""
The network architectures is based on PyTorch implemenation of StyleGAN2Encoder.
Original PyTorch repo: https://github.com/rosinality/style-based-gan-pytorch
Origianl StyelGAN2 paper: https://github.com/NVlabs/stylegan2
We use the network architeture for our single-image traning setting.
"""
import math
import numpy as np
import random
import torch
from torch import nn
from torch.nn import functional as F
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return F.leaky_relu(input + bias, negative_slope) * scale
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(1, channel, 1, 1))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
# print("FusedLeakyReLU: ", input.abs().mean())
out = fused_leaky_relu(input, self.bias,
self.negative_slope,
self.scale)
# print("FusedLeakyReLU: ", out.abs().mean())
return out
def upfirdn2d_native(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
):
_, minor, in_h, in_w = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, minor, in_h, 1, in_w, 1)
out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0])
out = out.view(-1, minor, in_h * up_y, in_w * up_x)
out = F.pad(
out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
)
out = out[
:,
:,
max(-pad_y0, 0): out.shape[2] - max(-pad_y1, 0),
max(-pad_x0, 0): out.shape[3] - max(-pad_x1, 0),
]
# out = out.permute(0, 3, 1, 2)
out = out.reshape(
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
)
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(
-1,
minor,
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
)
# out = out.permute(0, 2, 3, 1)
return out[:, :, ::down_y, ::down_x]
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1])
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if len(k.shape) == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * (factor ** 2)
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
return out
class Downsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel)
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
return out
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * (upsample_factor ** 2)
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class EqualConv2d(nn.Module):
def __init__(
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
):
super().__init__()
self.weight = nn.Parameter(
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
)
self.scale = math.sqrt(1) / math.sqrt(in_channel * (kernel_size ** 2))
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def forward(self, input):
# print("Before EqualConv2d: ", input.abs().mean())
out = F.conv2d(
input,
self.weight * self.scale,
bias=self.bias,
stride=self.stride,
padding=self.padding,
)
# print("After EqualConv2d: ", out.abs().mean(), (self.weight * self.scale).abs().mean())
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
)
class EqualLinear(nn.Module):
def __init__(
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = (math.sqrt(1) / math.sqrt(in_dim)) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(
input, self.weight * self.scale, bias=self.bias * self.lr_mul
)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class ScaledLeakyReLU(nn.Module):
def __init__(self, negative_slope=0.2):
super().__init__()
self.negative_slope = negative_slope
def forward(self, input):
out = F.leaky_relu(input, negative_slope=self.negative_slope)
return out * math.sqrt(2)
class ModulatedConv2d(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
demodulate=True,
upsample=False,
downsample=False,
blur_kernel=[1, 3, 3, 1],
):
super().__init__()
self.eps = 1e-8
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = (len(blur_kernel) - factor) - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = math.sqrt(1) / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
)
if style_dim is not None and style_dim > 0:
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
f'upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
if style is not None:
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
else:
style = torch.ones(batch, 1, in_channel, 1, 1).to(input.device)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
)
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class ConstantInput(nn.Module):
def __init__(self, channel, size=4):
super().__init__()
self.input = nn.Parameter(torch.randn(1, channel, size, size))
def forward(self, input):
batch = input.shape[0]
out = self.input.repeat(batch, 1, 1, 1)
return out
class StyledConv(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim=None,
upsample=False,
blur_kernel=[1, 3, 3, 1],
demodulate=True,
inject_noise=True,
):
super().__init__()
self.inject_noise = inject_noise
self.conv = ModulatedConv2d(
in_channel,
out_channel,
kernel_size,
style_dim,
upsample=upsample,
blur_kernel=blur_kernel,
demodulate=demodulate,
)
self.noise = NoiseInjection()
# self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
# self.activate = ScaledLeakyReLU(0.2)
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input, style=None, noise=None):
out = self.conv(input, style)
if self.inject_noise:
out = self.noise(out, noise=noise)
# out = out + self.bias
out = self.activate(out)
return out
class ToRGB(nn.Module):
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
def forward(self, input, style, skip=None):
out = self.conv(input, style)
out = out + self.bias
if skip is not None:
skip = self.upsample(skip)
out = out + skip
return out
class Generator(nn.Module):
def __init__(
self,
size,
style_dim,
n_mlp,
channel_multiplier=2,
blur_kernel=[1, 3, 3, 1],
lr_mlp=0.01,
):
super().__init__()
self.size = size
self.style_dim = style_dim
layers = [PixelNorm()]
for i in range(n_mlp):
layers.append(
EqualLinear(
style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
)
)
self.style = nn.Sequential(*layers)
self.channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
self.input = ConstantInput(self.channels[4])
self.conv1 = StyledConv(
self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
)
self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
self.log_size = int(math.log(size, 2))
self.num_layers = (self.log_size - 2) * 2 + 1
self.convs = nn.ModuleList()
self.upsamples = nn.ModuleList()
self.to_rgbs = nn.ModuleList()
self.noises = nn.Module()
in_channel = self.channels[4]
for layer_idx in range(self.num_layers):
res = (layer_idx + 5) // 2
shape = [1, 1, 2 ** res, 2 ** res]
self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape))
for i in range(3, self.log_size + 1):
out_channel = self.channels[2 ** i]
self.convs.append(
StyledConv(
in_channel,
out_channel,
3,
style_dim,
upsample=True,
blur_kernel=blur_kernel,
)
)
self.convs.append(
StyledConv(
out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
)
)
self.to_rgbs.append(ToRGB(out_channel, style_dim))
in_channel = out_channel
self.n_latent = self.log_size * 2 - 2
def make_noise(self):
device = self.input.input.device
noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
for i in range(3, self.log_size + 1):
for _ in range(2):
noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
return noises
def mean_latent(self, n_latent):
latent_in = torch.randn(
n_latent, self.style_dim, device=self.input.input.device
)
latent = self.style(latent_in).mean(0, keepdim=True)
return latent
def get_latent(self, input):
return self.style(input)
def forward(
self,
styles,
return_latents=False,
inject_index=None,
truncation=1,
truncation_latent=None,
input_is_latent=False,
noise=None,
randomize_noise=True,
):
if not input_is_latent:
styles = [self.style(s) for s in styles]
if noise is None:
if randomize_noise:
noise = [None] * self.num_layers
else:
noise = [
getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
]
if truncation < 1:
style_t = []
for style in styles:
style_t.append(
truncation_latent + truncation * (style - truncation_latent)
)
styles = style_t
if len(styles) < 2:
inject_index = self.n_latent
if len(styles[0].shape) < 3:
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else:
latent = styles[0]
else:
if inject_index is None:
inject_index = random.randint(1, self.n_latent - 1)
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
latent = torch.cat([latent, latent2], 1)
out = self.input(latent)
out = self.conv1(out, latent[:, 0], noise=noise[0])
skip = self.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
):
out = conv1(out, latent[:, i], noise=noise1)
out = conv2(out, latent[:, i + 1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip)
i += 2
image = skip
if return_latents:
return image, latent
else:
return image, None
class ConvLayer(nn.Sequential):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
downsample=False,
blur_kernel=[1, 3, 3, 1],
bias=True,
activate=True,
):
layers = []
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
stride = 2
self.padding = 0
else:
stride = 1
self.padding = kernel_size // 2
layers.append(
EqualConv2d(
in_channel,
out_channel,
kernel_size,
padding=self.padding,
stride=stride,
bias=bias and not activate,
)
)
if activate:
if bias:
layers.append(FusedLeakyReLU(out_channel))
else:
layers.append(ScaledLeakyReLU(0.2))
super().__init__(*layers)
class ResBlock(nn.Module):
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1], downsample=True, skip_gain=1.0):
super().__init__()
self.skip_gain = skip_gain
self.conv1 = ConvLayer(in_channel, in_channel, 3)
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=downsample, blur_kernel=blur_kernel)
if in_channel != out_channel or downsample:
self.skip = ConvLayer(
in_channel, out_channel, 1, downsample=downsample, activate=False, bias=False
)
else:
self.skip = nn.Identity()
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
skip = self.skip(input)
out = (out * self.skip_gain + skip) / math.sqrt(self.skip_gain ** 2 + 1.0)
return out
class StyleGAN2Discriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, no_antialias=False, size=None, opt=None):
super().__init__()
self.opt = opt
self.stddev_group = 16
if size is None:
size = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size)))))
if "patch" in self.opt.netD and self.opt.D_patch_size is not None:
size = 2 ** int(np.log2(self.opt.D_patch_size))
blur_kernel = [1, 3, 3, 1]
channel_multiplier = ndf / 64
channels = {
4: min(384, int(4096 * channel_multiplier)),
8: min(384, int(2048 * channel_multiplier)),
16: min(384, int(1024 * channel_multiplier)),
32: min(384, int(512 * channel_multiplier)),
64: int(256 * channel_multiplier),
128: int(128 * channel_multiplier),
256: int(64 * channel_multiplier),
512: int(32 * channel_multiplier),
1024: int(16 * channel_multiplier),
}
convs = [ConvLayer(3, channels[size], 1)]
log_size = int(math.log(size, 2))
in_channel = channels[size]
if "smallpatch" in self.opt.netD:
final_res_log2 = 4
elif "patch" in self.opt.netD:
final_res_log2 = 3
else:
final_res_log2 = 2
for i in range(log_size, final_res_log2, -1):
out_channel = channels[2 ** (i - 1)]
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
in_channel = out_channel
self.convs = nn.Sequential(*convs)
if False and "tile" in self.opt.netD:
in_channel += 1
self.final_conv = ConvLayer(in_channel, channels[4], 3)
if "patch" in self.opt.netD:
self.final_linear = ConvLayer(channels[4], 1, 3, bias=False, activate=False)
else:
self.final_linear = nn.Sequential(
EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
EqualLinear(channels[4], 1),
)
def forward(self, input, get_minibatch_features=False):
if "patch" in self.opt.netD and self.opt.D_patch_size is not None:
h, w = input.size(2), input.size(3)
y = torch.randint(h - self.opt.D_patch_size, ())
x = torch.randint(w - self.opt.D_patch_size, ())
input = input[:, :, y:y + self.opt.D_patch_size, x:x + self.opt.D_patch_size]
out = input
for i, conv in enumerate(self.convs):
out = conv(out)
# print(i, out.abs().mean())
# out = self.convs(input)
batch, channel, height, width = out.shape
if False and "tile" in self.opt.netD:
group = min(batch, self.stddev_group)
stddev = out.view(
group, -1, 1, channel // 1, height, width
)
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
stddev = stddev.mean([2, 3, 4], keepdim=True).squeeze(2)
stddev = stddev.repeat(group, 1, height, width)
out = torch.cat([out, stddev], 1)
out = self.final_conv(out)
# print(out.abs().mean())
if "patch" not in self.opt.netD:
out = out.view(batch, -1)
out = self.final_linear(out)
return out
class TileStyleGAN2Discriminator(StyleGAN2Discriminator):
def forward(self, input):
B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)
size = self.opt.D_patch_size
Y = H // size
X = W // size
input = input.view(B, C, Y, size, X, size)
input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)
return super().forward(input)
class StyleGAN2Encoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None):
super().__init__()
assert opt is not None
self.opt = opt
channel_multiplier = ngf / 32
channels = {
4: min(512, int(round(4096 * channel_multiplier))),
8: min(512, int(round(2048 * channel_multiplier))),
16: min(512, int(round(1024 * channel_multiplier))),
32: min(512, int(round(512 * channel_multiplier))),
64: int(round(256 * channel_multiplier)),
128: int(round(128 * channel_multiplier)),
256: int(round(64 * channel_multiplier)),
512: int(round(32 * channel_multiplier)),
1024: int(round(16 * channel_multiplier)),
}
blur_kernel = [1, 3, 3, 1]
cur_res = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size)))))
convs = [nn.Identity(),
ConvLayer(3, channels[cur_res], 1)]
num_downsampling = self.opt.stylegan2_G_num_downsampling
for i in range(num_downsampling):
in_channel = channels[cur_res]
out_channel = channels[cur_res // 2]
convs.append(ResBlock(in_channel, out_channel, blur_kernel, downsample=True))
cur_res = cur_res // 2
for i in range(n_blocks // 2):
n_channel = channels[cur_res]
convs.append(ResBlock(n_channel, n_channel, downsample=False))
self.convs = nn.Sequential(*convs)
def forward(self, input, layers=[], get_features=False):
feat = input
feats = []
if -1 in layers:
layers.append(len(self.convs) - 1)
for layer_id, layer in enumerate(self.convs):
feat = layer(feat)
# print(layer_id, " features ", feat.abs().mean())
if layer_id in layers:
feats.append(feat)
if get_features:
return feat, feats
else:
return feat
class StyleGAN2Decoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None):
super().__init__()
assert opt is not None
self.opt = opt
blur_kernel = [1, 3, 3, 1]
channel_multiplier = ngf / 32
channels = {
4: min(512, int(round(4096 * channel_multiplier))),
8: min(512, int(round(2048 * channel_multiplier))),
16: min(512, int(round(1024 * channel_multiplier))),
32: min(512, int(round(512 * channel_multiplier))),
64: int(round(256 * channel_multiplier)),
128: int(round(128 * channel_multiplier)),
256: int(round(64 * channel_multiplier)),
512: int(round(32 * channel_multiplier)),
1024: int(round(16 * channel_multiplier)),
}
num_downsampling = self.opt.stylegan2_G_num_downsampling
cur_res = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size))))) // (2 ** num_downsampling)
convs = []
for i in range(n_blocks // 2):
n_channel = channels[cur_res]
convs.append(ResBlock(n_channel, n_channel, downsample=False))
for i in range(num_downsampling):
in_channel = channels[cur_res]
out_channel = channels[cur_res * 2]
inject_noise = "small" not in self.opt.netG
convs.append(
StyledConv(in_channel, out_channel, 3, upsample=True, blur_kernel=blur_kernel, inject_noise=inject_noise)
)
cur_res = cur_res * 2
convs.append(ConvLayer(channels[cur_res], 3, 1))
self.convs = nn.Sequential(*convs)
def forward(self, input):
return self.convs(input)
class StyleGAN2Generator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None):
super().__init__()
self.opt = opt
self.encoder = StyleGAN2Encoder(input_nc, output_nc, ngf, use_dropout, n_blocks, padding_type, no_antialias, opt)
self.decoder = StyleGAN2Decoder(input_nc, output_nc, ngf, use_dropout, n_blocks, padding_type, no_antialias, opt)
def forward(self, input, layers=[], encode_only=False):
feat, feats = self.encoder(input, layers, True)
if encode_only:
return feats
else:
fake = self.decoder(feat)
if len(layers) > 0:
return fake, feats
else:
return fake
| 30.501639
| 137
| 0.558959
|
79c7500650275d9f68793fed9847b9736ee9500f
| 159
|
py
|
Python
|
src/day04.py
|
zhangxinyong12/my-python-demo
|
3ac121073749ad9caf42531603e921b04e1e7c6c
|
[
"MIT"
] | null | null | null |
src/day04.py
|
zhangxinyong12/my-python-demo
|
3ac121073749ad9caf42531603e921b04e1e7c6c
|
[
"MIT"
] | null | null | null |
src/day04.py
|
zhangxinyong12/my-python-demo
|
3ac121073749ad9caf42531603e921b04e1e7c6c
|
[
"MIT"
] | null | null | null |
#
def add(a):
try:
print(10 + a)
except Exception as err:
print('错误信息', err)
finally:
print('最终都会执行的')
add(23)
add('23')
| 12.230769
| 28
| 0.496855
|
9193817cff4b9e98e09a8d4103a308ac16f43959
| 584
|
py
|
Python
|
campaign/migrations/0001_initial.py
|
tomas1207/BackendArmaGTI
|
16274eb761e7f55900b1e6585dfb4b21abf25377
|
[
"MIT"
] | null | null | null |
campaign/migrations/0001_initial.py
|
tomas1207/BackendArmaGTI
|
16274eb761e7f55900b1e6585dfb4b21abf25377
|
[
"MIT"
] | null | null | null |
campaign/migrations/0001_initial.py
|
tomas1207/BackendArmaGTI
|
16274eb761e7f55900b1e6585dfb4b21abf25377
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.4 on 2021-02-25 16:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='campaign',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('ismaincampaing', models.BooleanField()),
('status', models.BooleanField()),
],
),
]
| 24.333333
| 114
| 0.554795
|
0438e3735efc6894ea23c4362dafa8d516676f65
| 2,727
|
py
|
Python
|
egs/voxceleb/adv.v2/steps_backend/eval-be-cos.py
|
hyperion-ml/hyperion
|
c4c9eee0acab1ba572843373245da12d00dfffaa
|
[
"Apache-2.0"
] | 14
|
2021-12-19T04:24:15.000Z
|
2022-03-18T03:24:04.000Z
|
egs/voxceleb/v1/steps_be/eval-be-v2.py
|
hyperion-ml/hyperion
|
c4c9eee0acab1ba572843373245da12d00dfffaa
|
[
"Apache-2.0"
] | null | null | null |
egs/voxceleb/v1/steps_be/eval-be-v2.py
|
hyperion-ml/hyperion
|
c4c9eee0acab1ba572843373245da12d00dfffaa
|
[
"Apache-2.0"
] | 5
|
2021-12-14T20:41:27.000Z
|
2022-02-24T14:18:11.000Z
|
#!/usr/bin/env python
"""
Copyright 2019 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import sys
import os
from jsonargparse import (
ArgumentParser,
ActionConfigFile,
ActionParser,
namespace_to_dict,
)
import time
import logging
import numpy as np
from hyperion.hyp_defs import float_cpu, config_logger
from hyperion.utils import TrialNdx, TrialScores
from hyperion.utils.math import cosine_scoring
from hyperion.helpers import TrialDataReader as TDR
from hyperion.helpers import PLDAFactory as F
from hyperion.transforms import TransformList
def eval_plda(
iv_file,
ndx_file,
enroll_file,
test_file,
preproc_file,
score_file,
model_part_idx,
num_model_parts,
seg_part_idx,
num_seg_parts,
**kwargs
):
logging.info("loading data")
if preproc_file is not None:
preproc = TransformList.load(preproc_file)
else:
preproc = None
tdr = TDR(
iv_file,
ndx_file,
enroll_file,
test_file,
preproc,
model_part_idx,
num_model_parts,
seg_part_idx,
num_seg_parts,
)
x_e, x_t, enroll, ndx = tdr.read()
t1 = time.time()
logging.info("computing llr")
scores = cosine_scoring(x_e, x_t)
dt = time.time() - t1
num_trials = len(enroll) * x_t.shape[0]
logging.info(
"scoring elapsed time: %.2f s. elapsed time per trial: %.2f ms."
% (dt, dt / num_trials * 1000)
)
if num_model_parts > 1 or num_seg_parts > 1:
score_file = "%s-%03d-%03d" % (score_file, model_part_idx, seg_part_idx)
logging.info("saving scores to %s" % (score_file))
s = TrialScores(enroll, ndx.seg_set, scores, score_mask=ndx.trial_mask)
s.save_txt(score_file)
if __name__ == "__main__":
parser = ArgumentParser(description="Eval cosine-scoring")
parser.add_argument("--iv-file", dest="iv_file", required=True)
parser.add_argument("--ndx-file", dest="ndx_file", default=None)
parser.add_argument("--enroll-file", dest="enroll_file", required=True)
parser.add_argument("--test-file", dest="test_file", default=None)
parser.add_argument("--preproc-file", dest="preproc_file", default=None)
TDR.add_argparse_args(parser)
parser.add_argument("--score-file", dest="score_file", required=True)
parser.add_argument(
"-v", "--verbose", dest="verbose", default=1, choices=[0, 1, 2, 3], type=int
)
args = parser.parse_args()
config_logger(args.verbose)
del args.verbose
logging.debug(args)
assert args.test_file is not None or args.ndx_file is not None
eval_plda(**namespace_to_dict(args))
| 26.475728
| 84
| 0.675101
|
0033e80175778cca9bda8d46dc74612ac12ee6f9
| 278
|
py
|
Python
|
examples/simple/utils/changeFileName.py
|
M-Chase/Iris_EfficientNet
|
4bbca8d4b466656f7ed1b1b7e8f924427b739124
|
[
"Apache-2.0"
] | null | null | null |
examples/simple/utils/changeFileName.py
|
M-Chase/Iris_EfficientNet
|
4bbca8d4b466656f7ed1b1b7e8f924427b739124
|
[
"Apache-2.0"
] | null | null | null |
examples/simple/utils/changeFileName.py
|
M-Chase/Iris_EfficientNet
|
4bbca8d4b466656f7ed1b1b7e8f924427b739124
|
[
"Apache-2.0"
] | null | null | null |
import os
path = r'E:\A_py_project\EfficientNet-PyTorch\examples\simple\data\newData\test\2\R'
j=0
files = os.listdir(path)
print(files)
for i in files:
print(i)
os.rename(os.path.join(path+"\\"+i),os.path.join(path+"\\"+"R_"+i))
# print(os.path)
# os.rename(os)
| 27.8
| 84
| 0.661871
|
7705206475106b50409fecb08f5a7929640a2838
| 2,535
|
py
|
Python
|
sbi_compare_ops_db.py
|
haroldham/steembasicincome
|
ce5b78ce35dba9cedad6a44eda4cb332bad53e49
|
[
"MIT"
] | null | null | null |
sbi_compare_ops_db.py
|
haroldham/steembasicincome
|
ce5b78ce35dba9cedad6a44eda4cb332bad53e49
|
[
"MIT"
] | null | null | null |
sbi_compare_ops_db.py
|
haroldham/steembasicincome
|
ce5b78ce35dba9cedad6a44eda4cb332bad53e49
|
[
"MIT"
] | null | null | null |
from beem import Steem
from beem.nodelist import NodeList
from beem.blockchain import Blockchain
import os
import json
import time
from steembi.transfer_ops_storage import AccountTrx
from steembi.storage import AccountsDB
import dataset
if __name__ == "__main__":
config_file = 'config.json'
if not os.path.isfile(config_file):
raise Exception("config.json is missing!")
else:
with open(config_file) as json_data_file:
config_data = json.load(json_data_file)
databaseConnector = config_data["databaseConnector"]
databaseConnector2 = config_data["databaseConnector2"]
other_accounts = config_data["other_accounts"]
hive_blockchain = config_data["hive_blockchain"]
start_prep_time = time.time()
db = dataset.connect(databaseConnector)
db2 = dataset.connect(databaseConnector2)
accountStorage = AccountsDB(db2)
accounts = accountStorage.get()
# Update current node list from @fullnodeupdate
nodes = NodeList()
nodes.update_nodes()
stm = Steem(node=nodes.get_nodes(hive=hive_blockchain))
print("Check account history ops.")
blockchain = Blockchain(steem_instance=stm)
accountTrx = {}
for account in accounts:
accountTrx[account] = AccountTrx(db, account)
if not accountTrx[account].exists_table():
accountTrx[account].create_table() # FIXME Method does not exist
# temp
accountTrx["sbi"] = AccountTrx(db, "sbi")
ops1 = accountTrx["steembasicincome"].get_all(op_types=["transfer", "delegate_vesting_shares"])
ops2 = accountTrx["sbi"].get_all(op_types=["transfer", "delegate_vesting_shares"])
print("ops loaded: length: %d - %d" % (len(ops1), len(ops2)))
index = 0
while index < len(ops1) and index < len(ops2):
op1 = ops1[index]
op2 = ops2[index]
start_block = op1["block"]
virtual_op = op1["virtual_op"]
trx_in_block = op1["trx_in_block"]
op_in_trx = op1["op_in_trx"]
start_block = op2["block"]
virtual_op = op2["virtual_op"]
trx_in_block = op2["trx_in_block"]
op_in_trx = op2["op_in_trx"]
dict1 = json.loads(op1["op_dict"])
dict2 = json.loads(op2["op_dict"])
if dict1["timestamp"] != dict2["timestamp"]:
print("%s - %s" % (dict1["timestamp"], dict2["timestamp"]))
print("block: %d - %d" % (op1["block"], op2["block"]))
print("index: %d - %d" % (op1["op_acc_index"], op2["op_acc_index"]))
index += 1
| 35.704225
| 99
| 0.654832
|
2cf10f3a86193ddc1dc65843884e93f1243f6b1c
| 241
|
py
|
Python
|
examples/Arctic_HYCOM_GLBy/remap_test.py
|
bilgetutak/pyroms
|
3b0550f26f4ac181b7812e14a7167cd1ca0797f0
|
[
"BSD-3-Clause"
] | 75
|
2016-04-05T07:15:57.000Z
|
2022-03-04T22:49:54.000Z
|
examples/Arctic_HYCOM_GLBy/remap_test.py
|
hadfieldnz/pyroms-mgh
|
cd0fe39075825f97a7caf64e2c4c5a19f23302fd
|
[
"BSD-3-Clause"
] | 27
|
2017-02-26T04:27:49.000Z
|
2021-12-01T17:26:56.000Z
|
examples/Arctic_HYCOM_GLBy/remap_test.py
|
hadfieldnz/pyroms-mgh
|
cd0fe39075825f97a7caf64e2c4c5a19f23302fd
|
[
"BSD-3-Clause"
] | 56
|
2016-05-11T06:19:14.000Z
|
2022-03-22T19:04:17.000Z
|
import xarray as xr
from regrid_GLBy import regrid_GLBy
gsource = xr.open_dataset('/import/AKWATERS/kshedstrom/HYCOM/Svalbard/data/HYCOM_GLBy0.08_2018_345.nc')
myssh = regrid_GLBy(gsource.ssh, method='bilinear')
myssh.to_netcdf('myssh.nc')
| 34.428571
| 103
| 0.809129
|
5995139dfd8cedea5fe3e2e2073afd74157ca3d4
| 1,183
|
py
|
Python
|
neural_structured_learning/lib/abstract_gen_neighbor.py
|
srihari-humbarwadi/neural-structured-learning
|
345b8d644dd7745179263bf6dc9aeb8a921528f4
|
[
"Apache-2.0"
] | 939
|
2019-08-28T06:50:30.000Z
|
2022-03-30T02:37:07.000Z
|
neural_structured_learning/lib/abstract_gen_neighbor.py
|
srihari-humbarwadi/neural-structured-learning
|
345b8d644dd7745179263bf6dc9aeb8a921528f4
|
[
"Apache-2.0"
] | 80
|
2019-09-01T19:47:30.000Z
|
2022-02-02T20:38:38.000Z
|
neural_structured_learning/lib/abstract_gen_neighbor.py
|
srihari-humbarwadi/neural-structured-learning
|
345b8d644dd7745179263bf6dc9aeb8a921528f4
|
[
"Apache-2.0"
] | 196
|
2019-09-01T19:38:53.000Z
|
2022-02-08T01:25:57.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for generating neighbors.
This abstract class will be inherited by classes with actual implementation for
generating neigbors (e.g., adversarial neighbors or graph neighbors).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class GenNeighbor(object):
"""Abstract class for generating neighbors.
This class is to be inherited by the class that actually implements the method
to generate neighbors.
"""
def __init__(self):
raise NotImplementedError
def gen_neighbor(self):
raise NotImplementedError
| 31.131579
| 80
| 0.775993
|
a82981089bdf52844b371d8a7eb33ca2c17ae400
| 405
|
py
|
Python
|
barsystem/src/barsystem/migrations/0031_person_member.py
|
TkkrLab/barsystem
|
17d138f19c8f6a61b14477f034d8519bb83e00fb
|
[
"MIT"
] | 1
|
2016-03-28T16:19:53.000Z
|
2016-03-28T16:19:53.000Z
|
barsystem/src/barsystem/migrations/0031_person_member.py
|
TkkrLab/barsystem
|
17d138f19c8f6a61b14477f034d8519bb83e00fb
|
[
"MIT"
] | 11
|
2015-09-22T20:34:34.000Z
|
2017-04-12T13:55:27.000Z
|
barsystem/src/barsystem/migrations/0031_person_member.py
|
TkkrLab/barsystem
|
17d138f19c8f6a61b14477f034d8519bb83e00fb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('barsystem', '0030_auto_20150717_1557'),
]
operations = [
migrations.AddField(
model_name='person',
name='member',
field=models.BooleanField(default=False),
),
]
| 20.25
| 53
| 0.607407
|
0c3a3035d477dae678d2b0d15f9c4ec508384b15
| 325
|
py
|
Python
|
assets/programovani-je-hra/3.2.1.py
|
JiriKalvoda/slama.dev
|
4856d246858dd98a1852365b028873b61f5a6775
|
[
"MIT"
] | 7
|
2019-09-15T19:55:11.000Z
|
2021-12-27T11:40:20.000Z
|
assets/programovani-je-hra/3.2.1.py
|
JiriKalvoda/slama.dev
|
4856d246858dd98a1852365b028873b61f5a6775
|
[
"MIT"
] | 38
|
2020-05-18T12:49:13.000Z
|
2022-03-23T12:51:28.000Z
|
assets/programovani-je-hra/3.2.1.py
|
JiriKalvoda/slama.dev
|
4856d246858dd98a1852365b028873b61f5a6775
|
[
"MIT"
] | 17
|
2020-02-16T19:49:25.000Z
|
2022-02-06T14:28:35.000Z
|
x = 100
y = 120
dx = 3
dy = 5
r = 30
def setup():
size(400, 400)
def draw():
global x, y, dx, dy, r
background(255)
ellipse(x, y, r * 2, r * 2)
if x >= width - r:
dx = -dx
if x <= r:
dx = -dx
if y >= height - r:
dy = -dy
if y <= r:
dy = -dy
x = x + dx
y = y + dy
| 10.833333
| 29
| 0.406154
|
11b9510d58eebf146febccb11245c3fe0e9ab25d
| 311
|
py
|
Python
|
FirstExamPrepare/wormhole.py
|
nikolayvutov/Python
|
55163496dac452a7110b7f76edc6894ee195f1fe
|
[
"MIT"
] | null | null | null |
FirstExamPrepare/wormhole.py
|
nikolayvutov/Python
|
55163496dac452a7110b7f76edc6894ee195f1fe
|
[
"MIT"
] | null | null | null |
FirstExamPrepare/wormhole.py
|
nikolayvutov/Python
|
55163496dac452a7110b7f76edc6894ee195f1fe
|
[
"MIT"
] | null | null | null |
tel = list(map(int, input().split(' ')))
current_index, answer = 0, 0
while current_index != len(tel):
if tel[current_index] == 0:
answer += 1
current_index += 1
else:
old_index = current_index
current_index = tel[current_index]
tel[old_index] = 0
print(answer)
| 23.923077
| 42
| 0.601286
|
c847d6463847794fb818daac30d4770560cd91c0
| 21,472
|
py
|
Python
|
algorithms/mathematics/find_all_possible_arithmetic_expressions_and_solve.py
|
josephedradan/algorithms
|
6caa107b0df245653eab81143ebf0d9c7e5515fb
|
[
"MIT"
] | null | null | null |
algorithms/mathematics/find_all_possible_arithmetic_expressions_and_solve.py
|
josephedradan/algorithms
|
6caa107b0df245653eab81143ebf0d9c7e5515fb
|
[
"MIT"
] | null | null | null |
algorithms/mathematics/find_all_possible_arithmetic_expressions_and_solve.py
|
josephedradan/algorithms
|
6caa107b0df245653eab81143ebf0d9c7e5515fb
|
[
"MIT"
] | null | null | null |
"""
Created by Joseph Edradan
Github: https://github.com/josephedradan
Date created: 1/13/2021
Purpose:
Given a list of numbers and a list of operators, Make all possible permutations of the combinations of numbers and
operators. Then use all possible permutations of the combinations of numbers and operators to find all possible
ways to evaluate each arithmetic expression.
Basically, find all permutations of operands and operators and all the ways that each permutation can be
evaluated and then find each ones corresponding result.
Details:
Description:
Notes:
Was meant to solve the problem posed in the video titled "Can you solve this puzzle. Make 24 from 6 4 3 and 1."
(https://www.youtube.com/watch?v=Jnf18uqZRyw)
Rules:
You have 4 numbers: 1, 3, 4, and 6 and you have the 4 basic mathematical operators. You have to use all of the
numbers and some of operators to reach the target of 24.
IMPORTANT NOTES:
Explanation:
get_dict_key_result_value_set_arithmetic_expression(list_of_numbers, list_of_operators):
list_of_permutations_numbers = Permutate list_of_numbers
If treat_list_operators_as_allowed_to_use is True:
list_of_permutations_operators = Combination with replacement of list_of_operators
Else If treat_list_operators_as_allowed_to_use is False:
list_of_permutations_operators = Permutate list_of_operators
# Big function call here
list_list_item_mathematical =
*Interweave Every list_of_permutations_numbers with Every list_of_permutations_operators
# Create a dict with default value as a set (set to prevent repeating arithmetic expressions from the dfs)
dict_result_arithmetic_expression = defaultdict(set)
For every list_item_mathematical in list_list_item_mathematical:
list_arithmetic_expression =
*DFS permutate to get every Arithmetic Expression of the current list_item_mathematical
For every arithmetic_expression in list_arithmetic_expression:
# The Key of the dict is the solved result of the arithmetic_expression and the Value is the
# corresponding arithmetic_expression
dict_result_arithmetic_expression[arithmetic_expression.get_return()].add(arithmetic_expression)
return dict_result_arithmetic_expression
Reference:
The simple puzzle that most people can't solve
https://www.youtube.com/watch?v=Jnf18uqZRyw
Python number base class OR how to determine a value is a number
https://stackoverflow.com/questions/44756406/python-number-base-class-or-how-to-determine-a-value-is-a-number
How do I type hint a method with the type of the enclosing class?
https://stackoverflow.com/questions/33533148/how-do-i-type-hint-a-method-with-the-type-of-the-enclosing-class
Notes:
from __future__ import annotations
"""
from __future__ import annotations
import traceback
from collections import defaultdict
from itertools import permutations, combinations_with_replacement, chain
from numbers import Real
from typing import Union, Set, Dict
class ArithmeticExpression:
"""
Arithmetic Expression that returns a value
"""
def __init__(self,
operand_lhs: Union[Real, ArithmeticExpression],
operand_rhs: Union[Real, ArithmeticExpression],
operator: str):
# Operands
self.operand_lhs = operand_lhs
self.operand_rhs = operand_rhs
# Operators
self.operator = operator
def __add__(self, other: Union[Real, ArithmeticExpression]) -> ArithmeticExpression:
return ArithmeticExpression(self, other, "+")
def __sub__(self, other: Union[Real, ArithmeticExpression]) -> ArithmeticExpression:
return ArithmeticExpression(self, other, "-")
def __mul__(self, other: Union[Real, ArithmeticExpression]) -> ArithmeticExpression:
return ArithmeticExpression(self, other, "*")
def __truediv__(self, other: Union[Real, ArithmeticExpression]) -> ArithmeticExpression:
return ArithmeticExpression(self, other, "/")
def __floordiv__(self, other: Union[Real, ArithmeticExpression]) -> ArithmeticExpression:
return ArithmeticExpression(self, other, "//")
def __pow__(self, power: Union[Real, ArithmeticExpression], modulo=None) -> ArithmeticExpression:
return ArithmeticExpression(self, power, "**")
def __mod__(self, other: Union[Real, ArithmeticExpression]) -> ArithmeticExpression:
return ArithmeticExpression(self, other, "%")
def __str__(self) -> str:
return "({}{}{})".format(self.operand_lhs, self.operator, self.operand_rhs)
def __repr__(self) -> str:
return self.__str__()
def __hash__(self) -> str:
return hash(self.__str__())
def get_result(self) -> Union[Real, ArithmeticExpression]:
"""
Returns the result of the Arithmetic Expression
:return: a number
"""
if isinstance(self.operand_lhs, ArithmeticExpression):
lhs: Union[Real, ArithmeticExpression] = self.operand_lhs.get_result()
else:
lhs: Real = self.operand_lhs
if isinstance(self.operand_rhs, ArithmeticExpression):
rhs: Union[Real, ArithmeticExpression] = self.operand_rhs.get_result()
else:
rhs: Real = self.operand_rhs
return simplify_expression(lhs, rhs, self.operator)
def __copy__(self) -> Union[Real, ArithmeticExpression]:
"""
Returns a copy of the current expression.
Not used
:return: ArithmeticExpression object
"""
if isinstance(self.operand_lhs, ArithmeticExpression):
lhs: Union[Real, ArithmeticExpression] = self.operand_lhs.__copy__()
else:
lhs = self.operand_lhs
if isinstance(self.operand_rhs, ArithmeticExpression):
rhs: Union[Real, ArithmeticExpression] = self.operand_rhs.__copy__()
else:
rhs = self.operand_rhs
return ArithmeticExpression(lhs, rhs, self.operator)
def get_list_permutation(list_given: list) -> list:
"""
Return a list permutations of from list_given
:param list_given: a list given
:return: list of permutations
"""
return list(permutations(list_given))
def get_list_combination_with_replacement(list_given: Union[list, set], size) -> list:
"""
Return a list combinations with replacement from list_given
:param list_given: a list given
:return: list of combinations with replacement
"""
return list(combinations_with_replacement(list_given, size))
def get_list_list_item_mathematical_permutate_operators(list_permutation_operands, list_permutation_operators):
"""
For every list that is a permutation of operands, interweave every list that is a permutation of operators
within it.
Example:
[1, 2, 3, 4]
[+, -, *]
[+, *, -]
[-, +, *]
[-, *, +]
[*, +, -]
[*, -, +]
[1, 3, 2, 4]
[+, -, *]
[+, *, -]
...
etc...
Result:
[1, +, 2, -, 3, *, 4]
[1, +, 2, *, 3, -, 4]
...
[1, *, 2, -, 3, +, 4]
[1, +, 3, -, 2, *, 4]
[1, +, 3, *, 2, -, 4]
...
[1, *, 3, -, 2, +, 4]
etc...
:param list_permutation_operands: list containing a list of operands
:param list_permutation_operators: list containing a list of operators
:return: List containing list of alternating operands and operators
"""
# List containing lists where the inner lists contain mathematical items (operands and operators)
list_list_item_mathematical = []
# Loop through list_permutation_operands
for permutation_operands in list_permutation_operands:
# print(permutation_operands)
# Loop through list_permutation_operators
for permutation_operators in list_permutation_operators:
# print("\t", permutation_operators)
# Make a list containing mathematical items
list_item_mathematical = []
# Loop through permutation_operands getting the operands and its indices
for index, operand in enumerate(permutation_operands):
# Add operand to list_item_mathematical
list_item_mathematical.append(operand)
# If the index of the operand is the last one
if index == len(permutation_operands) - 1:
# print("\t\t", list_item_mathematical)
# Add a copy of list_item_mathematical into list_list_item_mathematical
list_list_item_mathematical.append(list_item_mathematical.copy())
break
# Add operator to list_item_mathematical
list_item_mathematical.append(permutation_operators[index])
# Return list of list of item mathematical
return list_list_item_mathematical
def simplify_list_item_mathematical(list_item_mathematical: list) -> Real:
"""
Apply mathematical operations on the operands and reduce to 1 value
Not Used
:param list_item_mathematical: list of mathematical items
:return: value
"""
# Current value
value = None
# Current Operator
operator_current = None
# If operator is used already
operator_used = None
# Loop through every item in list_item_mathematical
for item in list_item_mathematical:
# Check if item is a string which should be an operator
if isinstance(item, str):
operator_current = item
operator_used = False
# If not a string then it's a number
else:
# If value is None initially
if value is None:
value = item
# Do math of with the operand and the operator if the current operator is not used
if operator_used is False:
value = simplify_expression(value, item, operator_current)
operator_used = True
# Return value
return value
def dfs_permutations_expression_arithmetic_priority(list_item_mathematical: list,
list_arithmetic_expression: list = None):
"""
Given a list of mathematical items, find all permutations of the order in which each arithmetic expression,
a combination of 2 operands and 1 operator, will be evaluated first. Each arithmetic expression is created via dfs
method.
Notes:
list_item_mathematical MUST NOT BE A GENERATOR
Example:
1 + 2 * 3
Result:
((1 + 2) * 3)
(1 + (2 * 3))
# Doctest function call
>>> dfs_permutations_expression_arithmetic_priority([1, "+", 2, "*", 3])
[((1+2)*3), (1+(2*3))]
:param list_item_mathematical: list of mathematical items
:param list_arithmetic_expression: list of arithmetic expresssions
:return:
"""
# Reset variables for reuse of the function
if list_arithmetic_expression is None:
list_arithmetic_expression = []
# Loop through mathematical items in list_item_mathematical
for index, item in enumerate(list_item_mathematical):
# If item is a string then it's probably an operator
if isinstance(item, str):
# Assume item is an operator
operator = item
# lhs and rhs operands
lhs = list_item_mathematical[index - 1]
rhs = list_item_mathematical[index + 1]
# Create a list similar to list_item_mathematical but also hosts a new object called ArithmeticExpression
list_list_item_mathematical_with_expression_arithmetic = []
# Loop through mathematical items in list_item_mathematical again
for index_2, item_2 in enumerate(list_item_mathematical):
# If index_2 is in [index - 1, index, index + 1]
if index_2 in [index - 1, index, index + 1]:
# If index_2 and index match
if index_2 == index:
"""
Create and add the ArithmeticExpression to
list_list_item_mathematical_with_expression_arithmetic
"""
list_list_item_mathematical_with_expression_arithmetic.append(
ArithmeticExpression(lhs, rhs, operator))
# *Continue only when we pass the three indices where the middle index == index_2
continue
"""
*Add the mathematical item to list_list_item_mathematical_with_expression_arithmetic assuming that
we have have passed or not have not reached the 3 mathematical items that will turn into a mathematical
item.
"""
list_list_item_mathematical_with_expression_arithmetic.append(item_2)
# If the size of list_list_item_mathematical_with_expression_arithmetic is 1
if len(list_list_item_mathematical_with_expression_arithmetic) == 1:
# print((list_list_item_mathematical_with_expression_arithmetic[0]))
"""
Add the first object in list_list_item_mathematical_with_expression_arithmetic to
list_arithmetic_expression
This means that the item is just 1 Arithmetic Expression object
"""
list_arithmetic_expression.append(list_list_item_mathematical_with_expression_arithmetic[0])
# Recursive Call ONLY when there is not 1 item in the list_list_item_mathematical_with_expression_arithmetic
else:
dfs_permutations_expression_arithmetic_priority(list_list_item_mathematical_with_expression_arithmetic,
list_arithmetic_expression)
return list_arithmetic_expression
def simplify_expression(operand_lhs: Real, operand_rhs: Real, operator: str) -> Real:
"""
Given lhs operand, rhs operand, and operator, simplify the expression or solve
WARNING:
Don't use the dict way because it has to check the operands immediately regardless of key being called or not
:param operand_lhs: lhs operand
:param operand_rhs: rhs operand
:param operator: operator
:return: result of the expression
"""
# key = {"+": operand_lhs + operand_rhs,
# "-": operand_lhs - operand_rhs,
# "*": operand_lhs * operand_rhs,
# "/": operand_lhs / operand_rhs
# }
# return key.get(operator)
# Result is currently None
result = None
# Get the result of the operation
try:
if operator == "+":
result = operand_lhs + operand_rhs
elif operator == "-":
result = operand_lhs - operand_rhs
elif operator == "*":
result = operand_lhs * operand_rhs
elif operator == "/":
result = operand_lhs / operand_rhs
elif operator == "//":
result = operand_lhs // operand_rhs
elif operator == "**":
result = operand_lhs ** operand_rhs
elif operator == "%":
result = operand_lhs % operand_rhs
except ZeroDivisionError as e:
# print("Cannot do {} / {} ".format(operand_lhs, operand_rhs))
pass
except OverflowError as e:
# print("Result it too big!")
pass
except TypeError as e:
# print("Mathematical operation can be be done with operands {} and {}".format(operand_lhs, operand_rhs))
pass
except Exception as e:
print(e)
print(traceback.print_exc())
# Return the result
return result
def get_dict_key_result_value_set_arithmetic_expression(
list_operands, list_operators,
treat_list_operators_as_allowed_to_use=False) -> Dict[Real, Set[ArithmeticExpression]]:
"""
Given a list of operands and a list of operators, find all possible permutations of these mathematical items, then
solve.
:param list_operands: list of operands
:param list_operators: list of operators
:param treat_list_operators_as_allowed_to_use: Treat list of operators as operators that the algorithm is allowed
to use rather than what the algorithm should use once (Not at least once).
:return: dictionary of result of the expression and the expression
"""
"""
List of list of operands as permutations
Notes:
Alternatively, permutations can be called instead of get_list_permutation which should be less memory intensive
optimal.
"""
permutations_operands = permutations(list_operands)
# List of list of operators as combinations with replacement
if treat_list_operators_as_allowed_to_use:
"""
Get every combination with replacement of operators within list_operators
Notes:
Alternatively, combinations_with_replacement can be called here instead because the set function below this
variable will make this not exhaustible.
"""
list_list_operators_every_combination = combinations_with_replacement(set(list_operators),
len(list_operands) - 1)
"""
*** Get every permutation of of every combination from list_list_operators_every_combination into 1 chain object
of type iterable, then remove duplicate permutations by putting them into a set.
"""
list_list_operators = set(chain(*[permutations(i) for i in list_list_operators_every_combination]))
# print(set(list_list_operators))
# List of list of operators as permutations
else:
# list_list_operators needs to be not exhaustible because it will be reused over again
list_list_operators = get_list_permutation(list_operators)
# Get list of list of mathematical items
list_list_item_mathematical = get_list_list_item_mathematical_permutate_operators(permutations_operands,
list_list_operators)
# Default dict of Key result of expression amd Value set that contains Arithmetic Expression
dict_result_arithmetic_expression = defaultdict(set)
# For list of mathematical items
for list_item_mathematical in list_list_item_mathematical:
# Get a list Arithmetic Expressions which are objects represent a list_item_mathematical
list_arithmetic_expression = dfs_permutations_expression_arithmetic_priority(list_item_mathematical)
# For every Arithmetic Expression
for arithmetic_expression in list_arithmetic_expression:
# print(f"Arithmetic Expression: {arithmetic_expression}")
# print(f"Arithmetic Expression Result: {arithmetic_expression.get_return()}")
# Add result of Arithmetic Expression as a Key and the Arithmetic Expression its set
dict_result_arithmetic_expression[arithmetic_expression.get_result()].add(arithmetic_expression)
# Return dict_result_arithmetic_expression
return dict_result_arithmetic_expression
def solve_problem(target=24):
"""
Solve the problem posed in the video "Can you solve this puzzle. Make 24 from 6 4 3 and 1."
(https://www.youtube.com/watch?v=Jnf18uqZRyw)
:param target: Value to reach
:return: None
"""
operands = [6, 4, 3, 1]
operators = ["+", "-", "*", "/"]
dict_results = get_dict_key_result_value_set_arithmetic_expression(operands, operators,
treat_list_operators_as_allowed_to_use=True)
set_solution = dict_results.get(target, None)
print(f"Target is {target}")
if set_solution is None:
print("Target could not be found!")
print("Solution does not exist")
else:
print("Possible solutions are:")
for expression in set_solution:
print("\t{} = {}".format(expression, expression.get_result()))
print("\n" + 100 * "-" + "\n")
print("All Permutations of Operands, Operators, and Order of Operations:")
for key, value in dict_results.items():
print(key)
for expression in value:
print("\t{}".format(expression))
print()
def test_example():
"""
Possibly show all Arithmetic Expressions and their corresponding result
:return: None
"""
operands = [6, 4, 3, 1]
operators = ["+", "-", "*", "/", "**", "//", "%"]
dict_results = get_dict_key_result_value_set_arithmetic_expression(operands, operators,
treat_list_operators_as_allowed_to_use=True)
total_expressions = 0
for key, value in dict_results.items():
print(key)
for expression in value:
total_expressions += 1
print("\t{}".format(expression))
print()
print(f"Total solutions: {len(dict_results)}")
print(f"Total expressions: {total_expressions}")
if __name__ == '__main__':
solve_problem()
print("\n" + 100 * "-" + "\n")
test_example()
| 35.786667
| 120
| 0.65313
|
dc5c46a86984a910ac4af026c440fcd58256bf08
| 12,765
|
py
|
Python
|
yardstick/network_services/nfvi/resource.py
|
mythwm/yardstick
|
ea13581f450c9c44f6f73d383e6a192697a95cc1
|
[
"Apache-2.0"
] | null | null | null |
yardstick/network_services/nfvi/resource.py
|
mythwm/yardstick
|
ea13581f450c9c44f6f73d383e6a192697a95cc1
|
[
"Apache-2.0"
] | null | null | null |
yardstick/network_services/nfvi/resource.py
|
mythwm/yardstick
|
ea13581f450c9c44f6f73d383e6a192697a95cc1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Resource collection definitions """
import errno
from itertools import chain
import logging
import multiprocessing
import os
import os.path
import re
import jinja2
import pkg_resources
from oslo_config import cfg
from oslo_utils.encodeutils import safe_decode
from yardstick import ssh
from yardstick.common.task_template import finalize_for_yaml
from yardstick.common.utils import validate_non_string_sequence
from yardstick.network_services.nfvi.collectd import AmqpConsumer
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
ZMQ_OVS_PORT = 5567
ZMQ_POLLING_TIME = 12000
LIST_PLUGINS_ENABLED = ["amqp", "cpu", "cpufreq", "memory",
"hugepages"]
class ResourceProfile(object):
"""
This profile adds a resource at the beginning of the test session
"""
COLLECTD_CONF = "collectd.conf"
AMPQ_PORT = 5672
DEFAULT_INTERVAL = 25
DEFAULT_TIMEOUT = 3600
OVS_SOCKET_PATH = "/usr/local/var/run/openvswitch/db.sock"
def __init__(self, mgmt, port_names=None, plugins=None, interval=None, timeout=None):
if plugins is None:
self.plugins = {}
else:
self.plugins = plugins
if interval is None:
self.interval = self.DEFAULT_INTERVAL
else:
self.interval = interval
if timeout is None:
self.timeout = self.DEFAULT_TIMEOUT
else:
self.timeout = timeout
self.enable = True
self._queue = multiprocessing.Queue()
self.amqp_client = None
self.port_names = validate_non_string_sequence(port_names, default=[])
# we need to save mgmt so we can connect to port 5672
self.mgmt = mgmt
self.connection = ssh.AutoConnectSSH.from_node(mgmt)
@classmethod
def make_from_node(cls, node, timeout):
# node dict works as mgmt dict
# don't need port names, there is no way we can
# tell what port is used on the compute node
collectd_options = node["collectd"]
plugins = collectd_options.get("plugins", {})
interval = collectd_options.get("interval")
return cls(node, plugins=plugins, interval=interval, timeout=timeout)
def check_if_system_agent_running(self, process):
""" verify if system agent is running """
try:
err, pid, _ = self.connection.execute("pgrep -f %s" % process)
# strip whitespace
return err, pid.strip()
except OSError as e:
if e.errno in {errno.ECONNRESET}:
# if we can't connect to check, then we won't be able to connect to stop it
LOG.exception("Can't connect to host to check %s status", process)
return 1, None
raise
def run_collectd_amqp(self):
""" run amqp consumer to collect the NFVi data """
amqp_url = 'amqp://admin:admin@{}:{}/%2F'.format(self.mgmt['ip'], self.AMPQ_PORT)
amqp = AmqpConsumer(amqp_url, self._queue)
try:
amqp.run()
except (AttributeError, RuntimeError, KeyboardInterrupt):
amqp.stop()
@classmethod
def parse_simple_resource(cls, key, value):
reskey = "/".join(rkey for rkey in key if "nsb_stats" not in rkey)
return {reskey: value.split(":")[1]}
@classmethod
def get_cpu_data(cls, res_key0, res_key1, value):
""" Get cpu topology of the host """
pattern = r"-(\d+)"
if 'cpufreq' in res_key0:
metric, source = res_key0, res_key1
else:
metric, source = res_key1, res_key0
match = re.search(pattern, source, re.MULTILINE)
if not match:
return "error", "Invalid", "", ""
time, value = value.split(":")
return str(match.group(1)), metric, value, time
@classmethod
def parse_hugepages(cls, key, value):
return cls.parse_simple_resource(key, value)
@classmethod
def parse_dpdkstat(cls, key, value):
return cls.parse_simple_resource(key, value)
@classmethod
def parse_virt(cls, key, value):
return cls.parse_simple_resource(key, value)
@classmethod
def parse_ovs_stats(cls, key, value):
return cls.parse_simple_resource(key, value)
@classmethod
def parse_intel_pmu_stats(cls, key, value):
return {''.join(str(v) for v in key): value.split(":")[1]}
def parse_collectd_result(self, metrics):
""" convert collectd data into json"""
result = {
"cpu": {},
"memory": {},
"hugepages": {},
"dpdkstat": {},
"virt": {},
"ovs_stats": {},
}
testcase = ""
# unicode decode
decoded = ((safe_decode(k, 'utf-8'), safe_decode(v, 'utf-8')) for k, v in metrics.items())
for key, value in decoded:
key_split = key.split("/")
res_key_iter = (key for key in key_split if "nsb_stats" not in key)
res_key0 = next(res_key_iter)
res_key1 = next(res_key_iter)
if "cpu" in res_key0 or "intel_rdt" in res_key0 or "intel_pmu" in res_key0:
cpu_key, name, metric, testcase = \
self.get_cpu_data(res_key0, res_key1, value)
result["cpu"].setdefault(cpu_key, {}).update({name: metric})
elif "memory" in res_key0:
result["memory"].update({res_key1: value.split(":")[0]})
elif "hugepages" in res_key0:
result["hugepages"].update(self.parse_hugepages(key_split, value))
elif "dpdkstat" in res_key0:
result["dpdkstat"].update(self.parse_dpdkstat(key_split, value))
elif "virt" in res_key1:
result["virt"].update(self.parse_virt(key_split, value))
elif "ovs_stats" in res_key0:
result["ovs_stats"].update(self.parse_ovs_stats(key_split, value))
result["timestamp"] = testcase
return result
def amqp_process_for_nfvi_kpi(self):
""" amqp collect and return nfvi kpis """
if self.amqp_client is None and self.enable:
self.amqp_client = multiprocessing.Process(
name="AmqpClient-{}-{}".format(self.mgmt['ip'], os.getpid()),
target=self.run_collectd_amqp)
self.amqp_client.start()
def amqp_collect_nfvi_kpi(self):
""" amqp collect and return nfvi kpis """
if not self.enable:
return {}
metric = {}
while not self._queue.empty():
metric.update(self._queue.get())
msg = self.parse_collectd_result(metric)
return msg
def _provide_config_file(self, config_file_path, nfvi_cfg, template_kwargs):
template = pkg_resources.resource_string("yardstick.network_services.nfvi",
nfvi_cfg).decode('utf-8')
cfg_content = jinja2.Template(template, trim_blocks=True, lstrip_blocks=True,
finalize=finalize_for_yaml).render(
**template_kwargs)
# cfg_content = io.StringIO(template.format(**template_kwargs))
cfg_file = os.path.join(config_file_path, nfvi_cfg)
# must write as root, so use sudo
self.connection.execute("cat | sudo tee {}".format(cfg_file), stdin=cfg_content)
def _prepare_collectd_conf(self, config_file_path):
""" Prepare collectd conf """
kwargs = {
"interval": self.interval,
"loadplugins": set(chain(LIST_PLUGINS_ENABLED, self.plugins.keys())),
# Optional fields PortName is descriptive only, use whatever is present
"port_names": self.port_names,
# "ovs_bridge_interfaces": ["br-int"],
"plugins": self.plugins,
}
self._provide_config_file(config_file_path, self.COLLECTD_CONF, kwargs)
def _setup_ovs_stats(self, connection):
try:
socket_path = self.plugins["ovs_stats"].get("ovs_socket_path", self.OVS_SOCKET_PATH)
except KeyError:
# ovs_stats is not a dict
socket_path = self.OVS_SOCKET_PATH
status = connection.execute("test -S {}".format(socket_path))[0]
if status != 0:
LOG.error("cannot find OVS socket %s", socket_path)
def _start_collectd(self, connection, bin_path):
LOG.debug("Starting collectd to collect NFVi stats")
connection.execute('sudo pkill -x -9 collectd')
collectd_path = os.path.join(bin_path, "collectd", "sbin", "collectd")
config_file_path = os.path.join(bin_path, "collectd", "etc")
exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0]
if exit_status != 0:
LOG.warning("%s is not present disabling", collectd_path)
# disable auto-provisioning because it requires Internet access
# collectd_installer = os.path.join(bin_path, "collectd.sh")
# provision_tool(connection, collectd)
# http_proxy = os.environ.get('http_proxy', '')
# https_proxy = os.environ.get('https_proxy', '')
# connection.execute("sudo %s '%s' '%s'" % (
# collectd_installer, http_proxy, https_proxy))
return
if "ovs_stats" in self.plugins:
self._setup_ovs_stats(connection)
LOG.debug("Starting collectd to collect NFVi stats")
# ensure collectd.conf.d exists to avoid error/warning
connection.execute("sudo mkdir -p /etc/collectd/collectd.conf.d")
self._prepare_collectd_conf(config_file_path)
# Reset amqp queue
LOG.debug("reset and setup amqp to collect data from collectd")
connection.execute("sudo rm -rf /var/lib/rabbitmq/mnesia/rabbit*")
connection.execute("sudo service rabbitmq-server start")
connection.execute("sudo rabbitmqctl stop_app")
connection.execute("sudo rabbitmqctl reset")
connection.execute("sudo rabbitmqctl start_app")
connection.execute("sudo service rabbitmq-server restart")
LOG.debug("Creating admin user for rabbitmq in order to collect data from collectd")
connection.execute("sudo rabbitmqctl delete_user guest")
connection.execute("sudo rabbitmqctl add_user admin admin")
connection.execute("sudo rabbitmqctl authenticate_user admin admin")
connection.execute("sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'")
LOG.debug("Start collectd service..... %s second timeout", self.timeout)
# intel_pmu plug requires large numbers of files open, so try to set
# ulimit -n to a large value
connection.execute("sudo bash -c 'ulimit -n 1000000 ; %s'" % collectd_path,
timeout=self.timeout)
LOG.debug("Done")
def initiate_systemagent(self, bin_path):
""" Start system agent for NFVi collection on host """
if self.enable:
try:
self._start_collectd(self.connection, bin_path)
except Exception:
LOG.exception("Exception during collectd start")
raise
def start(self):
""" start nfvi collection """
if self.enable:
LOG.debug("Start NVFi metric collection...")
def stop(self):
""" stop nfvi collection """
if not self.enable:
return
agent = "collectd"
LOG.debug("Stop resource monitor...")
if self.amqp_client is not None:
# we proper and try to join first
self.amqp_client.join(3)
self.amqp_client.terminate()
LOG.debug("Check if %s is running", agent)
status, pid = self.check_if_system_agent_running(agent)
LOG.debug("status %s pid %s", status, pid)
if status != 0:
return
if pid:
self.connection.execute('sudo kill -9 "%s"' % pid)
self.connection.execute('sudo pkill -9 "%s"' % agent)
self.connection.execute('sudo service rabbitmq-server stop')
self.connection.execute("sudo rabbitmqctl stop_app")
| 37.991071
| 98
| 0.621387
|
38ea0489b9451dcb91dfde227c8abcb98573d2b9
| 28,094
|
py
|
Python
|
molo/core/tests/test_models.py
|
praekeltfoundation/molo-basic
|
0c8a5fdbaf0eb62efc7164a26326801a27116dfe
|
[
"BSD-2-Clause"
] | null | null | null |
molo/core/tests/test_models.py
|
praekeltfoundation/molo-basic
|
0c8a5fdbaf0eb62efc7164a26326801a27116dfe
|
[
"BSD-2-Clause"
] | 3
|
2021-01-28T21:04:23.000Z
|
2022-03-25T19:05:41.000Z
|
molo/core/tests/test_models.py
|
praekeltfoundation/molo-basic
|
0c8a5fdbaf0eb62efc7164a26326801a27116dfe
|
[
"BSD-2-Clause"
] | 1
|
2020-09-09T08:01:02.000Z
|
2020-09-09T08:01:02.000Z
|
# coding=utf-8
import pytest
from mock import patch
from django.test import TestCase, RequestFactory
from django.utils import timezone
from django.urls import reverse
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.contrib.contenttypes.models import ContentType
from wagtail.images.tests.utils import Image, get_test_image_file
from molo.core.models import (
ArticlePage, CmsSettings, Main,
SiteLanguageRelation, Languages, SectionIndexPage, FooterIndexPage,
BannerIndexPage, BannerPage,
Timezone, Site, LanguageRelation
)
from molo.core.templatetags.core_tags import (
load_child_articles_for_section,
get_translation)
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.tasks import promote_articles
from molo.core.wagtail_hooks import copy_translation_pages
@pytest.mark.django_db
class TestModels(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.factory = RequestFactory()
self.main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=self.main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en', is_active=True)
LanguageRelation.objects.create(
page=self.main, language=self.english)
self.french = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='fr', is_active=True)
LanguageRelation.objects.create(
page=self.main, language=self.french)
LanguageRelation.objects.create(
page=self.main, language=self.english)
LanguageRelation.objects.create(
page=self.banner_index, language=self.english)
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
self.yourmind = self.mk_section(
self.section_index, title='Your mind')
self.yourmind_sub = self.mk_section(
self.yourmind, title='Your mind subsection')
self.mk_main2()
self.main2 = Main.objects.all().last()
self.language_setting2 = Languages.objects.create(
site_id=self.main2.get_site().pk)
self.english2 = SiteLanguageRelation.objects.create(
language_setting=self.language_setting2,
locale='en',
is_active=True)
self.spanish = SiteLanguageRelation.objects.create(
language_setting=self.language_setting2,
locale='es',
is_active=True)
LanguageRelation.objects.create(
page=self.main2, language=self.english2)
LanguageRelation.objects.create(
page=self.main2, language=self.spanish)
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
self.image2 = Image.objects.create(
title="Test image 2",
file=get_test_image_file(),
)
self.yourmind2 = self.mk_section(
self.section_index2, title='Your mind')
self.yourmind_sub2 = self.mk_section(
self.yourmind2, title='Your mind subsection')
def test_multisite_one_root_page(self):
second_site = Site.objects.create(
hostname='kaios.mr.com', port=80, root_page=self.main,
is_default_site=False, site_name='kaios main')
self.assertEqual(self.main.get_site().pk, second_site.pk)
def test_copy_method_of_article_page_copies_over_languages(self):
self.assertFalse(
Languages.for_site(
self.main2.get_site()).languages.filter(locale='fr').exists())
article = self.mk_articles(self.yourmind, 1)[0]
LanguageRelation.objects.create(
page=article, language=self.english2)
self.mk_article_translation(article, self.french)
article2 = article.copy(to=self.yourmind2)
copy_translation_pages(article, article2)
self.assertTrue(
Languages.for_site(
self.main2.get_site()).languages.filter(locale='fr').exists())
self.assertFalse(
Languages.for_site(
self.main2.get_site()).languages.filter(
locale='fr').first().is_active)
def test_move_method_of_article_page_copies_over_languages(self):
self.assertFalse(
Languages.for_site(
self.main2.get_site()).languages.filter(locale='fr').exists())
article = self.mk_articles(self.yourmind, 1)[0]
fr_article = self.mk_article_translation(article, self.french)
fr_article.move(self.yourmind2)
self.assertTrue(
Languages.for_site(
self.main2.get_site()).languages.filter(locale='fr').exists())
self.assertFalse(
Languages.for_site(
self.main2.get_site()).languages.filter(
locale='fr').first().is_active)
def test_sections_method_of_main_gives_children_of_main_only(self):
sections = self.main.sections()
self.assertFalse(sections.child_of(self.main2).exists())
@pytest.mark.django_db(transaction=True)
def test_copy_method_of_section_index_wont_duplicate_index_pages(self):
LanguageRelation.objects.create(
page=SectionIndexPage.objects.child_of(self.main2).first(),
language=self.spanish)
self.assertEqual(
SectionIndexPage.objects.child_of(self.main2).count(), 1)
self.section_index.copy(to=self.main2)
self.assertEqual(
SectionIndexPage.objects.child_of(self.main2).count(), 1)
@pytest.mark.django_db(transaction=True)
def test_copy_method_of_footer_index_wont_duplicate_index_pages(self):
LanguageRelation.objects.create(
page=FooterIndexPage.objects.child_of(self.main2).first(),
language=self.spanish)
self.assertEqual(
FooterIndexPage.objects.child_of(self.main2).count(), 1)
self.section_index.copy(to=self.main2)
self.assertEqual(
FooterIndexPage.objects.child_of(self.main2).count(), 1)
@pytest.mark.django_db(transaction=True)
def test_copy_method_of_banner_index_wont_duplicate_index_pages(self):
LanguageRelation.objects.create(
page=BannerIndexPage.objects.child_of(self.main2).first(),
language=self.spanish)
self.assertEqual(
BannerIndexPage.objects.child_of(self.main2).count(), 1)
self.section_index.copy(to=self.main2)
self.assertEqual(
BannerIndexPage.objects.child_of(self.main2).count(), 1)
def test_main_returns_bannerpages(self):
banner = BannerPage(title='test banner')
self.banner_index.add_child(instance=banner)
banner.save_revision().publish()
banner = BannerPage(title='test banner 2')
self.banner_index.add_child(instance=banner)
banner.save_revision().publish()
self.assertEqual(self.main.bannerpages().count(), 2)
def test_get_parent_section_for_article(self):
article = self.mk_article(self.yourmind_sub)
parent = article.get_parent_section()
self.assertEqual(parent.pk, self.yourmind_sub.pk)
def test_get_parent_section_for_section(self):
parent = self.yourmind_sub.get_parent_section()
self.assertEqual(parent.pk, self.yourmind.pk)
def test_get_top_level_parent(self):
title = 'title'
main_content_type, created = ContentType.objects.get_or_create(
model='main', app_label='core')
main = Main.objects.create(
title=title, slug=title, content_type=main_content_type,
path='00010011', depth=2, numchild=0, url_path='/home/',
)
SiteLanguageRelation.objects.create(
language_setting=Languages.for_site(main.get_site()),
locale='en', is_active=True)
french = SiteLanguageRelation.objects.create(
language_setting=Languages.for_site(main.get_site()),
locale='fr', is_active=True)
en_section = self.mk_section(
main, title="New Section", slug="new-section")
en_section2 = self.mk_section(
en_section, title="New Section 2", slug="new-section-2")
en_section3 = self.mk_section(
en_section2, title="New Section 3", slug="new-section-3")
en_section4 = self.mk_section(
en_section3, title="New Section 4", slug="new-section-4")
self.mk_section_translation(en_section, french)
self.mk_section_translation(en_section2, french)
fr_section3 = self.mk_section_translation(en_section3, french)
fr_section4 = self.mk_section_translation(en_section4, french)
parent = fr_section3.get_top_level_parent(locale='en')
self.assertEqual(parent.pk, en_section.pk)
self.assertEqual(fr_section3.depth, 5)
self.assertEqual(parent.depth, 3)
parent = fr_section4.get_top_level_parent(locale='en')
self.assertEqual(parent.pk, en_section.pk)
self.assertEqual(fr_section4.depth, 6)
self.assertEqual(parent.depth, 3)
parent = fr_section4.get_top_level_parent(locale='en', depth=4)
self.assertEqual(parent.pk, en_section2.pk)
self.assertEqual(fr_section4.depth, 6)
self.assertEqual(parent.depth, 4)
parent = fr_section4.get_top_level_parent(locale='en', depth=2)
self.assertEqual(parent.pk, main.pk)
self.assertEqual(parent.depth, 2)
parent = fr_section4.get_top_level_parent(locale='en', depth=-1)
self.assertEqual(parent, None)
def test_article_order(self):
now = timezone.now()
article1 = self.mk_article(
self.yourmind_sub, first_published_at=now)
self.mk_article(
self.yourmind_sub,
first_published_at=now + timezone.timedelta(hours=1))
# most recent first
self.assertEqual(
self.yourmind_sub.articles()[0].title, article1.title)
# swap published date
article1.first_published_at = now + timezone.timedelta(hours=4)
article1.save_revision().publish()
self.assertEqual(
self.yourmind_sub.articles()[0].title, article1.title)
def test_get_effective_image_for_sections(self):
en_section = self.mk_section(
self.section_index,
title="New Section", slug="new-section",
image=self.image)
self.assertEqual(
en_section.get_effective_image(), self.image)
# image not set to use inherited value
en_section2 = self.mk_section(
en_section, title="New Section 2", slug="new-section-2")
self.assertEqual(
en_section2.get_effective_image(), en_section.image)
# image not set to use inherited value
en_section3 = self.mk_section(
en_section2, title="New Section 3", slug="new-section-3")
self.assertEqual(
en_section3.get_effective_image(), en_section.image)
# set the image
en_section3.image = self.image2
self.assertEqual(
en_section3.get_effective_image(), self.image2)
# if translated section doesn't have
# an image it will inherited from the parent
fr_section3 = self.mk_section_translation(en_section3, self.french)
self.assertEqual(
fr_section3.get_effective_image(), en_section3.image)
fr_section2 = self.mk_section_translation(en_section2, self.french)
self.assertEqual(
fr_section2.get_effective_image(), en_section.image)
# check if the section doesn't have image it will return None
en_section4 = self.mk_section(
self.section_index,
title="New Section 4", slug="new-section-4", )
self.assertEqual(
en_section4.get_effective_image(), '')
fr_section4 = self.mk_section_translation(en_section4, self.french)
self.assertEqual(
fr_section4.get_effective_image(), '')
def test_get_effective_image_for_articles(self):
section = self.mk_section(
self.section_index, title="Section", slug="section")
en_article1, en_article2 = self.mk_articles(section, 2)
fr_article1 = self.mk_article_translation(en_article1, self.french)
self.assertEqual(
en_article1.get_effective_image(), '')
self.assertEqual(
fr_article1.get_effective_image(), '')
en_article1.image = self.image
en_article1.save()
self.assertEqual(
en_article1.get_effective_image(), self.image)
# if image not set it should inherite from the main language article
self.assertEqual(
fr_article1.get_effective_image(), en_article1.image)
# if the translated article has an image it should return its image
fr_article1.image = self.image2
fr_article1.save()
self.assertEqual(
fr_article1.get_effective_image(), self.image2)
def test_number_of_child_sections(self):
new_section = self.mk_section(self.section_index)
self.mk_sections(new_section, count=12)
self.client.get('/')
response = self.client.get('/sections-main-1/test-section-0/')
self.assertContains(response, 'Test Section 11')
def test_number_of_child_articles_in_section(self):
new_section = self.mk_section(self.section_index)
self.mk_articles(new_section, count=12)
request = self.factory.get('/sections-main-1/test-section-0/')
request.site = self.site
articles = load_child_articles_for_section(
{'request': request, 'locale_code': 'en'}, new_section, count=None)
self.assertEqual(len(articles), 12)
def test_parent_section(self):
new_section = self.mk_section(
self.section_index, title="New Section", slug="new-section")
new_section1 = self.mk_section(
new_section, title="New Section 1", slug="new-section-1")
self.assertEqual(
new_section1.get_parent_section('en'), new_section)
def test_article_service_aggregator(self):
new_section = self.mk_section(
self.section_index, title="New Section", slug="new-section",
is_service_aggregator=True)
with self.assertRaises(ValidationError):
self.mk_article(
new_section, title="New Section 1", slug="new-section-1",
featured_in_latest=True)
def test_section_service_aggregator(self):
with self.assertRaises(ValidationError):
self.mk_section(
self.section_index, title="New Section", slug="new-section",
is_service_aggregator=True, monday_rotation=True)
def test_meta_data_tags(self):
User.objects.create_superuser(
username='testuser', password='password', email='test@email.com')
self.client.login(username='testuser', password='password')
post_data = {
'title': 'this is a test article',
'slug': 'this-is-a-test-article',
'recommended_articles-INITIAL_FORMS': 0,
'recommended_articles-MAX_NUM_FORMS': 1000,
'recommended_articles-MIN_NUM_FORMS': 0,
'recommended_articles-TOTAL_FORMS': 0,
'reaction_questions-INITIAL_FORMS': 0,
'reaction_questions-MAX_NUM_FORMS': 1000,
'reaciction_questions-MIN_NUM_FORMS': 0,
'reaction_questions-TOTAL_FORMS': 0,
'related_sections-INITIAL_FORMS': 0,
'related_sections-MAX_NUM_FORMS': 1000,
'related_sections-MIN_NUM_FORMS': 0,
'related_sections-TOTAL_FORMS': 0,
'nav_tags-INITIAL_FORMS': 0,
'nav_tags-MAX_NUM_FORMS': 1000,
'nav_tags-MIN_NUM_FORMS': 0,
'nav_tags-TOTAL_FORMS': 0,
'body-count': 1,
'body-0-value': 'Hello',
'body-0-deleted': False,
'body-0-order': 1,
'body-0-type': 'paragraph',
'metadata_tags': 'love, happiness',
'action-publish': 'Publish',
'homepage_media-count': 0
}
self.client.post(
reverse('wagtailadmin_pages:add',
args=('core', 'articlepage', self.yourmind.id,)),
post_data)
post_data.update({
'title': 'this is a test article2',
'slug': 'this-is-a-test-article-2',
'metadata_tags': 'peace, happiness',
})
self.client.post(
reverse('wagtailadmin_pages:add',
args=('core', 'articlepage', self.yourmind.id,)),
post_data)
self.assertEqual(
ArticlePage.objects.filter(
metadata_tags__name='happiness').count(), 2)
self.assertEqual(
ArticlePage.objects.filter(
metadata_tags__name='love').count(), 1)
self.assertEqual(
ArticlePage.objects.filter(
metadata_tags__name='peace').count(), 1)
def test_social_media(self):
User.objects.create_superuser(
username='testuser', password='password', email='test@email.com')
self.client.login(username='testuser', password='password')
self.mk_article(
self.yourmind, title="New article",
social_media_title='media title',
social_media_description='media description', )
self.mk_article(
self.yourmind, title="New article2",
social_media_title='media title',
social_media_image=self.image, )
self.assertEqual(
ArticlePage.objects.filter(
social_media_title='media title').count(), 2)
self.assertEqual(
ArticlePage.objects.filter(
social_media_description='media description').count(), 1)
self.assertEqual(
ArticlePage.objects.filter(
social_media_image=self.image).count(), 1)
response = self.client.get('/sections-main-1/your-mind/new-article/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'content="media title"')
def test_site_languages(self):
main = Main.objects.all().first()
self.english = SiteLanguageRelation.objects.create(
language_setting=Languages.for_site(main.get_site()),
locale='en',
is_active=True)
self.french = SiteLanguageRelation.objects.create(
language_setting=Languages.for_site(main.get_site()),
locale='fr',
is_active=True)
self.spanish = SiteLanguageRelation.objects.create(
language_setting=Languages.for_site(main.get_site()),
locale='sp',
is_active=False)
response = self.client.get('/')
self.assertContains(response, 'English')
self.assertContains(response, 'français')
self.assertNotContains(response, 'español')
def test_get_translation_template_tag(self):
section = self.mk_section(self.section_index)
section2 = self.mk_section(self.section_index)
translated_section = self.mk_section_translation(section, self.french)
request = self.factory.get('/')
request.site = self.site
qs = get_translation({
'locale_code': 'fr', 'request': request}, section)
self.assertEqual(translated_section.id, qs.id)
qs = get_translation({
'locale_code': 'fr', 'request': request}, section2)
self.assertEqual(section2.id, qs.id)
def test_hero_article(self):
User.objects.create_superuser(
username='testuser', password='password', email='test@email.com')
self.client.login(username='testuser', password='password')
# create a new article and go to it's edit page
new_section = self.mk_section(
self.section_index, title="New Section", slug="new-section")
new_article = self.mk_article(new_section, title="New article", )
response = self.client.get(
reverse('wagtailadmin_pages:edit', args=(new_article.id,)))
self.assertEqual(response.status_code, 200)
# Marking article as Hero Article with no promote date
# or demote date raises error
post_data = {
"feature_as_hero_article": True,
'title': 'this is a test article',
'slug': 'this-is-a-test-article',
'recommended_articles-INITIAL_FORMS': 0,
'recommended_articles-MAX_NUM_FORMS': 1000,
'recommended_articles-MIN_NUM_FORMS': 0,
'recommended_articles-TOTAL_FORMS': 0,
'reaction_questions-INITIAL_FORMS': 0,
'reaction_questions-MAX_NUM_FORMS': 1000,
'reaciction_questions-MIN_NUM_FORMS': 0,
'reaction_questions-TOTAL_FORMS': 0,
'nav_tags-INITIAL_FORMS': 0,
'nav_tags-MAX_NUM_FORMS': 1000,
'nav_tags-MIN_NUM_FORMS': 0,
'nav_tags-TOTAL_FORMS': 0,
'related_sections-INITIAL_FORMS': 0,
'related_sections-MAX_NUM_FORMS': 1000,
'related_sections-MIN_NUM_FORMS': 0,
'related_sections-TOTAL_FORMS': 0,
'body-count': 1,
'body-0-value': 'Hello',
'body-0-deleted': False,
'body-0-order': 1,
'body-0-type': 'paragraph',
'metadata_tags': 'love, happiness',
'action-publish': 'Publish',
'homepage_media-count': 0
}
self.client.post(
reverse('wagtailadmin_pages:edit', args=(new_article.id,)),
post_data
)
self.assertRaisesMessage(
ValidationError,
"Please specify the date and time that you would like this "
"article to appear as the Hero Article."
)
# Raises error if promote_date is in the past
post_data.update({
"promote_date": timezone.now() + timezone.timedelta(days=-1),
})
self.client.post(
reverse('wagtailadmin_pages:edit', args=(new_article.id,)),
post_data
)
self.assertRaisesMessage(
ValidationError,
"Please select the present date, or a future date."
)
# Raise error is demote date is before
# promote date
post_data.update({
"promote_date": timezone.now(),
"demote_date": timezone.now() + timezone.timedelta(days=-1)
})
self.client.post(
reverse('wagtailadmin_pages:edit', args=(new_article.id,)),
post_data
)
self.assertRaisesMessage(
ValidationError,
"The article cannot be demoted before it has been promoted."
)
def test_demote_articles_post_save(self):
article = self.mk_article(
self.yourmind_sub, title='article', slug='article',
featured_in_section=True, featured_in_homepage=True,
featured_in_latest=True)
self.assertFalse(article.featured_in_latest)
self.assertFalse(article.featured_in_homepage)
self.assertFalse(article.featured_in_section)
article.slug = 'article-slug'
article.save()
self.assertFalse(article.featured_in_latest)
self.assertFalse(article.featured_in_homepage)
self.assertFalse(article.featured_in_section)
article.featured_in_section = True
article.featured_in_homepage = True
article.featured_in_latest = True
self.assertTrue(article.featured_in_latest)
self.assertTrue(article.featured_in_homepage)
self.assertTrue(article.featured_in_section)
article.save()
self.assertFalse(article.featured_in_latest)
self.assertFalse(article.featured_in_homepage)
self.assertFalse(article.featured_in_section)
def test_is_hero_article(self):
promote_date = timezone.now() + timezone.timedelta(days=-1)
demote_date = timezone.now() + timezone.timedelta(days=1)
article_1 = ArticlePage(
title="New article",
feature_as_hero_article=True,
promote_date=promote_date,
demote_date=demote_date
)
self.yourmind.add_child(instance=article_1)
self.assertTrue(article_1.is_current_hero_article())
promote_date = timezone.now() + timezone.timedelta(days=2)
demote_date = timezone.now() + timezone.timedelta(days=4)
article_2 = ArticlePage(
title="New article",
promote_date=promote_date,
demote_date=demote_date
)
self.yourmind.add_child(instance=article_2)
self.assertFalse(article_2.is_current_hero_article())
def test_molo_page_helper_method_is_content_page(self):
self.assertTrue(self.yourmind.is_content_page("Your mind"))
self.assertFalse(self.yourmind.is_content_page("Not Your mind"))
# exclude future-scheduled Hero Article articles from the
# latest articles queryset.
# Create two articles, one with present promote date and one
# with future promote date. Verify that the article with a
# future promote date does not appear in latest articles
# queryset.
def test_future_hero_article_not_in_latest(self):
promote_date = timezone.now() + timezone.timedelta(days=2)
demote_date = timezone.now() + timezone.timedelta(days=4)
future_article = ArticlePage(
title="Future article",
promote_date=promote_date,
demote_date=demote_date,
depth="1",
path="0003",
featured_in_latest=True,
feature_as_hero_article=True
)
self.yourmind.add_child(instance=future_article)
future_article.save()
main = Main.objects.all().first()
self.assertQuerysetEqual(main.latest_articles(), [])
promote_date = timezone.now() + timezone.timedelta(days=-2)
demote_date = timezone.now() + timezone.timedelta(days=-1)
present_article = ArticlePage(
title="Present article",
promote_date=promote_date,
demote_date=demote_date,
depth="1",
path="0004",
featured_in_latest_start_date=promote_date,
feature_as_hero_article=True
)
self.yourmind.add_child(instance=present_article)
present_article.save()
promote_articles()
self.assertQuerysetEqual(
main.latest_articles(), [repr(present_article), ])
@patch('django.utils.timezone.activate')
class TestCmsSettings(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.mk_main2()
# Something creates CmsSettings for both sites when only
# one is explicitly created here.
self.assertEqual(len(CmsSettings.objects.all()), 0)
self.settings = CmsSettings.objects.create(site=self.site)
self.assertEqual(len(CmsSettings.objects.all()), 2)
self.timezone = Timezone(title='FakeContinent/FakeCity')
self.timezone.save()
def test_cms_settings_activates_timezone_once(self, timezone_activate):
self.settings.timezone = self.timezone
self.settings.save()
timezone_activate.assert_called_once_with('FakeContinent/FakeCity')
def test_cms_settings_save_updates_all_timezones(self, timezone_activate):
self.settings.timezone = self.timezone
self.settings.save()
for settings in CmsSettings.objects.all():
self.assertEqual(settings.timezone, self.timezone)
| 39.793201
| 79
| 0.642237
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.