hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2b2d2083b281f2bcf3550d8a5d3bef8394173ada
| 2,877
|
py
|
Python
|
examples/atrous_conv_example.py
|
miaecle/PNet
|
981ce26cf5358fbd9af128501640d453043eb6f7
|
[
"MIT"
] | 3
|
2017-05-17T01:19:18.000Z
|
2017-07-19T21:41:15.000Z
|
examples/atrous_conv_example.py
|
miaecle/PNet
|
981ce26cf5358fbd9af128501640d453043eb6f7
|
[
"MIT"
] | null | null | null |
examples/atrous_conv_example.py
|
miaecle/PNet
|
981ce26cf5358fbd9af128501640d453043eb6f7
|
[
"MIT"
] | 1
|
2021-06-19T09:32:26.000Z
|
2021-06-19T09:32:26.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 27 19:43:49 2017
@author: zqwu
Example script using saved atrous-conv model to predict targets in CASP11, CASP12, CAMEO.
To run this code, please download the feature files(or generating features by HHblits, CCMpred and RaptorX):
CASP: https://s3-us-west-1.amazonaws.com/deepchem.io/featurized_datasets/CASPALL.tar.gz
CAMEO: https://s3-us-west-1.amazonaws.com/deepchem.io/featurized_datasets/CAMEO.tar.gz
PDB50cut: https://s3-us-west-1.amazonaws.com/deepchem.io/featurized_datasets/PDB50cut.tar.gz
Please decompress into the datasets folder, as defined by the environmental variable: PNET_DATA_DIR
"""
import deepchem as dc
import numpy as np
import pnet
import os
train = pnet.utils.load_PDB50_cut()
data_dir_train = os.path.join(os.environ['PNET_DATA_DIR'], 'PDB50cut')
train.build_features(['raw', 'MSA', 'SS', 'SA'], path=data_dir_train)
train.build_2D_features(feat_list=['CCMpred', 'MI_MCP'], path=data_dir_train)
train.build_labels(path=data_dir_train, weight_base=50., weight_adjust=0.1, binary=True)
CASPALL = pnet.utils.load_CASP_all()
data_dir_valid = os.path.join(os.environ['PNET_DATA_DIR'], 'CASPALL')
CASPALL.build_features(['raw', 'MSA', 'SS', 'SA'], path=data_dir_valid)
CASPALL.build_2D_features(feat_list=['CCMpred', 'MI_MCP'], path=data_dir_valid)
CASPALL.build_labels(path=data_dir_valid, weight_base=50., weight_adjust=0.1, binary=True)
CAMEO = pnet.utils.load_CAMEO()
data_dir_cameo = os.path.join(os.environ['PNET_DATA_DIR'], 'CAMEO')
CAMEO.build_features(['raw', 'MSA', 'SS', 'SA'], path=data_dir_cameo)
CAMEO.build_2D_features(feat_list=['CCMpred', 'MI_MCP'], path=data_dir_cameo)
CAMEO.build_labels(path=data_dir_cameo, weight_base=50., weight_adjust=0.1, binary=True)
CASP11 = pnet.utils.load_CASP(11)
CASP11 = CASPALL.select_by_ID(CASP11._IDs)
CASP12 = pnet.utils.load_CASP(12)
CASP12 = CASPALL.select_by_ID(CASP12._IDs)
batch_size = 1
n_features = CASPALL.n_features
metrics = [pnet.utils.Metric(pnet.utils.top_k_accuracy(5), mode='classification')]
metrics2 = [pnet.utils.Metric(pnet.utils.top_k_accuracy(10), mode='classification')]
model_dir = os.path.join(os.environ['PNET_DATA_DIR'], '../saved_models/AtrousConv/')
model = pnet.models.AtrousConvContactMap(
n_res_feat=n_features,
learning_rate=1e-3,
learning_rate_decay=0.99,
batch_size=batch_size,
use_queue=False,
uppertri=True,
mode='classification',
n_batches=None,
oneD_loss=None,
model_dir=model_dir)
model.build()
model.restore(os.path.join(model_dir, 'model-1'))
#model.fit(train, nb_epoch=25, checkpoint_interval=11498)
print(model.evaluate(CASP11, metrics))
print(model.evaluate(CASP12, metrics))
print(model.evaluate(CAMEO, metrics))
print(model.evaluate(CASP11, metrics2))
print(model.evaluate(CASP12, metrics2))
print(model.evaluate(CAMEO, metrics2))
| 39.410959
| 108
| 0.765728
|
f6c821ef42f4f0f7bb6f76406f0de309f8c8e511
| 20,758
|
py
|
Python
|
tensorflow/python/eager/def_function_xla_jit_test.py
|
pukhlyakova/tensorflow
|
573b838b131b4229179dcc86d675ecd2b09b2c52
|
[
"Apache-2.0"
] | 1
|
2020-09-21T04:52:36.000Z
|
2020-09-21T04:52:36.000Z
|
tensorflow/python/eager/def_function_xla_jit_test.py
|
lonelyagegeek/tensorflow
|
830b72bc81f99de1ab26dc186dcc8c8fb6b9c8bc
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:14:02.000Z
|
2022-02-10T04:14:26.000Z
|
tensorflow/python/eager/def_function_xla_jit_test.py
|
lonelyagegeek/tensorflow
|
830b72bc81f99de1ab26dc186dcc8c8fb6b9c8bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DefFunctionTest(xla_test.XLATestCase):
def testAutoclusteringWithTfFunction(self):
if 'tpu' in self.device.lower():
self.skipTest('Autoclustering does not run on TPU')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=False)
def outer(a, b, c):
return a * inner(b, c) + c
@def_function.function(experimental_compile=True)
def inner(b, c):
return b + c * b
i1 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i2 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i3 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
with context.collect_graphs(optimized=True) as graphs:
outer(i1, i2, i3)
if test_util.is_xla_enabled():
self.assertIn('_XlaRun', [n.op for n in graphs[0].node])
else:
self.assertNotIn('_XlaRun', [n.op for n in graphs[0].node])
def testBasic(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x, a):
return x + a
func = def_function.function(fn, experimental_compile=False)
xla_func = def_function.function(fn, experimental_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([2, 3, 3, 4, 4], func(inputs, 1))
self.assertAllClose([2, 3, 3, 4, 4], xla_func(inputs, 1))
def testBasicInt32(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def fn(x, a):
return x + a
inputs = constant_op.constant([1, 2, 2, 3, 3], dtype=dtypes.int32)
self.assertAllClose([2, 3, 3, 4, 4], fn(inputs, 1))
def testDerivative(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x, a):
return 2 * x + a
xla_func = def_function.function(fn, experimental_compile=True)
with backprop.GradientTape() as tape:
inputs = constant_op.constant([1., 2., 2., 3., 3.])
tape.watch(inputs)
outputs = xla_func(inputs, 1)
self.assertAllClose([2, 2, 2, 2, 2], tape.gradient(outputs, inputs))
# pylint: disable=protected-access
(forward, backward) = xla_func.get_concrete_function(
inputs, 1)._delayed_rewrite_functions.forward_backward()
# Check that the must-compile attribute gets correctly propagated to the
# created derivatives.
self.assertTrue(backward.function_def.attr['_XlaMustCompile'])
self.assertTrue(forward.definition.attr['_XlaMustCompile'])
# Calling function with experimental_compile=True from
# experimental_compile=False should compile the inner func.
def testNestedCall(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162800687: Inner function runs on host')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def fn(x, a):
return x + a
@def_function.function(experimental_compile=False)
def fn2(x, a):
return fn(x, a)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([2, 3, 3, 4, 4], fn2(inputs, 1))
@test_util.disable_mlir_bridge('TODO(b/162272821): MLIR bridge returns'
' wrong status type')
def testNestedCallUnsupportedOps(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x):
return array_ops.unique(x).y
xla_func = def_function.function(fn, experimental_compile=True)
def fn2(x):
return xla_func(x)
func = def_function.function(fn2, experimental_compile=False)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(errors.InvalidArgumentError,
'not compilable'):
func(inputs)
@test_util.disable_mlir_bridge('TODO(b/162272821): MLIR bridge returns'
' wrong status type')
def testUnsupportedOps(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x):
return array_ops.unique(x).y # Unique is not supported by XLA
func = def_function.function(fn, experimental_compile=False)
xla_func = def_function.function(fn, experimental_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([1, 2, 3], func(inputs))
with self.assertRaisesRegex(errors.InvalidArgumentError,
'not compilable'):
xla_func(inputs)
def testFunctionGradient(self):
with ops.device('device:{}:0'.format(self.device)):
v = resource_variable_ops.ResourceVariable(2.0)
def fn(x):
return v * x
func = def_function.function(fn, experimental_compile=False)
xla_func = def_function.function(fn, experimental_compile=True)
def run_and_check(test_func):
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = test_func(x)
dy = tape.gradient(y, v)
self.assertAllClose(6.0, y)
self.assertAllClose(3.0, dy)
run_and_check(func)
run_and_check(xla_func)
@test_util.disable_mlir_bridge('TODO(b/162521846): MLIR bridge fails'
' msan, function library not found')
def testControlFlow(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def f(x):
assert control_flow_util.GraphOrParentsInXlaContext(
ops.get_default_graph())
x = ops.convert_to_tensor(x)
def body(i, a):
return i + 1, control_flow_ops.cond(i > 2, lambda: a + (x**2),
lambda: a + 3)
return control_flow_ops.while_loop(
lambda i, *_: i < 10,
body, (constant_op.constant(0), constant_op.constant(3.)),
maximum_iterations=10)[1]
@def_function.function(experimental_compile=True)
def g(x):
x = ops.convert_to_tensor(x)
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return y, tape.gradient(y, x)
# Test that XLA context gets correctly propagated.
g._get_concrete_function_garbage_collected(2.0)(2.0)
self.assertAllClose(40.0, f(2.0))
self.assertAllClose([40.0, 28.0], g(2.0))
self.assertAllClose(40.0, f.get_concrete_function(2.0)(2.0))
self.assertAllClose([40.0, 28.0], g.get_concrete_function(2.0)(2.0))
def testMethodCompilation(self):
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@def_function.function(experimental_compile=True)
def f1(self, x, a):
return x + a
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
self.assertAllClose([2, 3, 3, 4, 4], c.f1(inputs, 1))
@test_util.disable_mlir_bridge('TODO(b/162272821): MLIR bridge returns '
' wrong status type')
def testMethodCompilationUnsupportedFunc(self):
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@def_function.function(experimental_compile=True)
def f1(self, x):
return array_ops.unique(x).y
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
with self.assertRaisesRegex(errors.InvalidArgumentError,
'not compilable'):
c.f1(inputs)
def testMustBeConstantPropagation(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162799319: Cannot resolve constant on TPU')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def f():
return constant_op.constant([0, 2, 1], dtype=dtypes.int32)
@def_function.function(experimental_compile=True)
def g(a, b):
return array_ops.transpose(a, b)
@def_function.function
def z():
return g(array_ops.ones([3, 4, 3], dtype=dtypes.float32), f())
z()
@test_util.disable_mlir_bridge('TODO(b/162271237): argmax gives different'
' results in MLIR-based bridge')
def testArgMinMax(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def argmax(x):
return math_ops.argmax(x)
@def_function.function(experimental_compile=True)
def argmin(x):
return math_ops.argmin(x)
self.assertAllClose(0, argmax(array_ops.ones([10], dtype=dtypes.float32)))
self.assertAllClose(0, argmax(array_ops.ones([10])))
self.assertAllClose(0, argmin(array_ops.ones([10], dtype=dtypes.float32)))
self.assertAllClose(0, argmin(array_ops.ones([10])))
@test_util.disable_mlir_bridge('TensorArray support not implemented')
def testErrorMessagePassingTensorArray(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=1, element_shape=[])
ta = ta.write(0, 2 * x)
y = ta.read(0)
return y
x = constant_op.constant(3.14)
with backprop.GradientTape() as tape:
tape.watch(x)
with self.assertRaisesRegex(errors.UnimplementedError,
'TensorList crossing the XLA/TF boundary'):
y = f(x)
tape.gradient(y, x)
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(experimental_compile=True)(f)
inputs = constant_op.constant([3.14, 2.68, 7.69])
self.assertAllClose([6.28, 5.36, 15.38, 9.42, 8.04, 23.07], f(inputs))
self.assertAllClose(compiled_f(inputs), f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2Multidim(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3, 2])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(experimental_compile=True)(f)
inputs = constant_op.constant([[3.14, 21.1], [2.68, 22.2], [7.69, 23.3]])
self.assertAllClose(f(inputs), compiled_f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2Scalars(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[1])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(experimental_compile=True)(f)
inputs = constant_op.constant([3.14])
self.assertAllClose(f(inputs), compiled_f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatGrad(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
def g():
x = constant_op.constant([3.14, 2.68, 7.69])
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return tape.gradient(y, x)
compiled_g = def_function.function(experimental_compile=True)(g)
self.assertAllClose([5.0, 5.0, 5.0], g())
self.assertAllClose(compiled_g(), g())
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatGradNestedCompile(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
@def_function.function(experimental_compile=True)
def g():
x = constant_op.constant([3.14, 2.68, 7.69])
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
out = tape.gradient(y, x)
return out
self.assertAllClose([5.0, 5.0, 5.0], g())
def testCumsum(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162771302: 64bit rewrite of cumsum not supported')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def f(x):
return math_ops.cumsum(x)
f64_input = constant_op.constant([1.1, 2.2, 3.3], dtype=dtypes.float64)
self.assertAllClose([1.1, 3.3, 6.6], f(f64_input))
def testNoExcessiveRetracing(self):
with ops.device('device:{}:0'.format(self.device)):
inner_retracings = 0
@def_function.function(experimental_compile=True)
def inner(a, b):
nonlocal inner_retracings
inner_retracings += 1
return a * b + a
def outer(a, b):
return inner(a, b)
func_input = random_ops.random_normal([10, 10])
for _ in range(2):
def_function.function(outer)(func_input, func_input)
self.assertEqual(inner_retracings, 1)
def testUpdateVariable(self):
with ops.device('device:{}:0'.format(self.device)):
on_gpu = 'gpu' in self.device.lower()
v = variables.Variable([3.1, 3.2])
@def_function.function(experimental_compile=True)
def update_var(a, b):
v.assign_add(a * b)
arg1 = random_ops.random_normal([2])
arg2 = random_ops.random_normal([2])
initial_usage = context.context().get_total_memory_usage(
v.device) if on_gpu else 0
update_var(arg1, arg2)
final_usage = context.context().get_total_memory_usage(
v.device) if on_gpu else 0
self.assertEqual(initial_usage, final_usage)
@test_util.disable_mlir_bridge('TODO(b/162381930): MLIR bridge renames '
' functions')
def testUpdateVariableInClass(self):
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@def_function.function(experimental_compile=True)
def update_var(self, a, b):
if not hasattr(self, 'v'):
self.v = variables.Variable(3.1)
self.v.assign_add(a * b)
c = C()
@def_function.function
def outer():
c.update_var(constant_op.constant(0.7), constant_op.constant(0.6))
outer()
self.assertAllClose(c.v, 3.52)
@test_util.disable_mlir_bridge('TODO(b/162801728): MLIR bridge causes '
' invalid free on TPUs')
def testUpdateVariableMultipleOutputs(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable(3.1)
@def_function.function(experimental_compile=True)
def update_var(a, b):
v.assign_add(a * b)
return a * b + v
out = update_var(constant_op.constant(0.7), constant_op.constant(0.6))
self.assertAllClose(v, 3.52)
self.assertAllClose(out, 3.94)
def testReturnIdentity(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def f(a, b):
return (a, b)
a = random_ops.random_normal([10, 10])
b = random_ops.random_normal([10, 10])
on_gpu = 'gpu' in self.device.lower()
initial_usage = context.context().get_total_memory_usage(
b.backing_device) if on_gpu else 0
f(a, b)
final_usage = context.context().get_total_memory_usage(
b.backing_device) if on_gpu else 0
self.assertEqual(initial_usage, final_usage)
def testGetCompilerIrConstants(self):
if 'tpu' in self.device.lower():
self.skipTest('TPU generates different HLO')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def f(a, b):
return array_ops.transpose(a, b)
a = array_ops.ones([3, 4, 3], dtype=dtypes.float32)
b = constant_op.constant([0, 2, 1], dtype=dtypes.int32)
self.assertIn('{1,2,0}',
f.experimental_get_compiler_ir(a, b)(stage='optimized_hlo'))
@test_util.disable_mlir_bridge('TODO(b/168732524): MLIR bridge does not '
' optimize single-element tuples to scalars')
def testGetCompilerIrResourceVars(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable([3.1, 3.2])
@def_function.function(experimental_compile=True)
def f(a, b):
v.assign_add(a * b)
a = random_ops.random_normal([2])
b = random_ops.random_normal([2])
self.assertIn('input_output_alias={ {}: (2, {}, may-alias) }',
f.experimental_get_compiler_ir(a, b)('optimized_hlo'))
def testGetCompilerIrNotCompiled(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function
def f(x):
return x + 1
a = random_ops.random_normal([10, 10])
with self.assertRaisesRegex(ValueError,
'marked with experimental_compile'):
f.experimental_get_compiler_ir(a)()
def testGetCompilerIrNested(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def fn(x, a):
return x + a
@def_function.function(experimental_compile=False)
def fn2(x, a):
fn.experimental_get_compiler_ir(x, a)()
return fn(x, a)
inputs = constant_op.constant([1, 2, 2, 3, 3])
with self.assertRaisesRegex(TypeError, '"Graph" tensor'):
fn2(inputs, 1)
def testGetCompilerIrKwargs(self):
with ops.device('device:{}:0'.format(self.device)):
v = variables.Variable([0.1, 0.1])
@def_function.function(experimental_compile=True)
def f(a, b):
return (a + b) * v
a = constant_op.constant([1.1, 1.1])
b = constant_op.constant([2.2, 2.2])
self.assertIn('multiply',
f.experimental_get_compiler_ir(b=a, a=b)(stage='hlo'))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| 34.085386
| 80
| 0.635321
|
9ad9fff758d4652d40fd5bbe41f1d7f8e1b2a4a4
| 390
|
py
|
Python
|
client/src/__init__.py
|
Sheerabth/blob-system
|
808f1591247fecace4cbd121053d79205096ced3
|
[
"MIT"
] | null | null | null |
client/src/__init__.py
|
Sheerabth/blob-system
|
808f1591247fecace4cbd121053d79205096ced3
|
[
"MIT"
] | null | null | null |
client/src/__init__.py
|
Sheerabth/blob-system
|
808f1591247fecace4cbd121053d79205096ced3
|
[
"MIT"
] | null | null | null |
__app_name__ = "blob-system"
__version__ = "0.1.0"
(
SUCCESS,
DIR_ERROR,
FILE_ERROR,
DB_READ_ERROR,
DB_WRITE_ERROR,
JSON_ERROR,
ID_ERROR,
) = range(7)
ERRORS = {
DIR_ERROR: "config directory error",
FILE_ERROR: "config file error",
DB_READ_ERROR: "database read error",
DB_WRITE_ERROR: "database write error",
ID_ERROR: "to-do id error",
}
| 18.571429
| 43
| 0.651282
|
98d97aaae3157752e08a29559abeeaadcb9073a2
| 287
|
py
|
Python
|
love/lover/urls.py
|
woshinib/text
|
ad40b5992d0006c95b39e6f449e0cae16deb256d
|
[
"MIT"
] | null | null | null |
love/lover/urls.py
|
woshinib/text
|
ad40b5992d0006c95b39e6f449e0cae16deb256d
|
[
"MIT"
] | null | null | null |
love/lover/urls.py
|
woshinib/text
|
ad40b5992d0006c95b39e6f449e0cae16deb256d
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'lover'
urlpatterns = [
path('lover_index',views.lover_index,name = 'lover_index'),
path('lover_movie', views.lover_movie,name = 'lover_movie'),
path('lover_cake', views.lover_cake,name = 'lover_cake'),
]
| 28.7
| 65
| 0.689895
|
cbed60ae0203d4a04dcd2e465c594ac34522dfa9
| 637
|
py
|
Python
|
boatsandjoy_api/core/validators.py
|
bertini36/boatsandjoy-api
|
b22d82eb02947218d924b381160d622ded9e1d98
|
[
"MIT"
] | null | null | null |
boatsandjoy_api/core/validators.py
|
bertini36/boatsandjoy-api
|
b22d82eb02947218d924b381160d622ded9e1d98
|
[
"MIT"
] | 12
|
2021-04-08T21:18:37.000Z
|
2022-03-12T00:39:39.000Z
|
boatsandjoy_api/core/validators.py
|
bertini36/boatsandjoy-api
|
b22d82eb02947218d924b381160d622ded9e1d98
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from dataclasses import asdict, dataclass
from .exceptions import InvalidDataError
class RequestValidatorInterface(ABC):
"""
This validators are used to validate api requests types and if are required
"""
@classmethod
@abstractmethod
def validate(cls, params: any):
pass
class DjangoRequestValidator(RequestValidatorInterface):
FORM = None
@classmethod
def validate(cls, request: dataclass):
form = cls.FORM(data=asdict(request))
if not form.is_valid():
raise InvalidDataError(f'Validation error {form.errors.as_text()}')
| 24.5
| 79
| 0.706436
|
207757132cb2743016b90f04432db7b9479a9b59
| 23,109
|
py
|
Python
|
meutils/besttable.py
|
Jie-Yuan/MeUtils
|
2bb191b0d35b809af037c0f65b37570b8828bea3
|
[
"Apache-2.0"
] | 3
|
2020-12-03T07:30:02.000Z
|
2021-02-07T13:37:33.000Z
|
meutils/besttable.py
|
Jie-Yuan/MeUtils
|
2bb191b0d35b809af037c0f65b37570b8828bea3
|
[
"Apache-2.0"
] | null | null | null |
meutils/besttable.py
|
Jie-Yuan/MeUtils
|
2bb191b0d35b809af037c0f65b37570b8828bea3
|
[
"Apache-2.0"
] | 1
|
2021-02-07T13:37:38.000Z
|
2021-02-07T13:37:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : besttable
# @Time : 2021/1/28 8:01 下午
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description : https://github.com/foutaise/texttable/blob/master/texttable.py
# TODO: 设置df输出精度
"""module for creating simple ASCII tables
Example:
table = Besttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"],
["Mme\\nLouise\\nBourgeau", 28, "Lou\\n\\nLoue"]])
print table.draw() + "\\n"
table = Besttable()
table.set_deco(Besttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
| Mme | | Lou |
| Louise | 28 | |
| Bourgeau | | Loue |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
from __future__ import division
__all__ = ["Besttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'MIT'
__version__ = '1.6.3'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
Frank Sachsenheim:
- add Python 2/3-compatibility
Maximilian Hils:
- fix minor bug for Python 3 compatibility
frinkelpi:
- preserve empty lines
"""
import sys
import unicodedata
from functools import reduce
unicode_type = str
bytes_type = bytes
# define a text wrapping function to wrap some text
# to a specific width:
# - use cjkwrap if available (better CJK support)
# - fallback to textwrap otherwise
try:
import cjkwrap
def textwrapper(txt, width):
return cjkwrap.wrap(txt, width)
except ImportError:
try:
import textwrap
def textwrapper(txt, width):
return textwrap.wrap(txt, width)
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
# define a function to calculate the rendering width of a unicode character
# - use wcwidth if available
# - fallback to unicodedata information otherwise
try:
import wcwidth
def uchar_width(c):
"""Return the rendering width of a unicode character
"""
return max(0, wcwidth.wcwidth(c))
except ImportError:
def uchar_width(c):
"""Return the rendering width of a unicode character
"""
if unicodedata.east_asian_width(c) in 'WF':
return 2
elif unicodedata.combining(c):
return 0
else:
return 1
def obj2unicode(obj):
"""Return a unicode representation of a python object
"""
if isinstance(obj, unicode_type):
return obj
elif isinstance(obj, bytes_type):
try:
return unicode_type(obj, 'utf-8')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror))
return unicode_type(obj, 'utf-8', 'replace')
else:
return unicode_type(obj)
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if isinstance(iterable, bytes_type) or isinstance(iterable, unicode_type):
return sum([uchar_width(c) for c in obj2unicode(iterable)])
else:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class FallbackToText(Exception):
"""Used for failed conversion to float"""
pass
class Besttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Besttable.VLINES | Besttable.HLINES | Besttable.BORDER | \
Besttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [x[:1] for x in [str(s) for s in array]]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Besttable.BORDER: Border around the table
Besttable.HEADER: Horizontal line below the header
Besttable.HLINES: Horizontal lines between rows
Besttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Besttable.BORDER | Besttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def draw_env(cls):
import os
return cls.draw_dict(os.environ)
@classmethod
def draw_df(cls, df):
tb = cls()
tb.header(df.columns)
tb.add_rows(df.values, header=False)
return tb.draw()
@classmethod
def draw_dict(cls, dic):
tb = cls()
tb.add_rows(list(dic.items()), header=False)
return tb.draw()
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
"""
if type(x) == int:
return str(x)
else:
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
elif f != f: # NaN
fn = cls._fmt_text
elif f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a': self._fmt_auto,
'i': self._fmt_int,
'f': self._fmt_float,
'e': self._fmt_exp,
't': self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Besttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Besttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Besttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Besttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length // 8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [self._len_cell(x) for x in self._header]
for row in self._rows:
for cell, i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3 * (ncols - 1) + [0, 4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill / 2) * space + cell_line \
+ int(fill / 2 + fill % 2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Besttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"],
["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"]])
print(table.draw() + "\n")
table = Besttable()
table.set_deco(Besttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
print(table.draw_env())
| 32.095833
| 100
| 0.532087
|
e9b2c7b572ccb9b525f4bf3b767aed35c1d4ea25
| 4,011
|
py
|
Python
|
config/settings.py
|
FrankCasanova/FrankyNews
|
527e8fbf0a4c09e5a34e88a2106a0af376b8b6c0
|
[
"MIT"
] | null | null | null |
config/settings.py
|
FrankCasanova/FrankyNews
|
527e8fbf0a4c09e5a34e88a2106a0af376b8b6c0
|
[
"MIT"
] | null | null | null |
config/settings.py
|
FrankCasanova/FrankyNews
|
527e8fbf0a4c09e5a34e88a2106a0af376b8b6c0
|
[
"MIT"
] | null | null | null |
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from environs import Env
from pathlib import Path
env = Env()
env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', default=False)
ALLOWED_HOSTS = ['.herokuapp.com', 'localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
# 3rd
'crispy_forms',
# local
'accounts',
'pages',
'articles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# django search in this directory
'DIRS': [str(BASE_DIR.joinpath('templates'))],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': env.dj_db_url('DATABASE_URL')
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_URL = '/static/'
STATICFILES_DIRS = [str(BASE_DIR.joinpath('static'))]
STATIC_ROOT = str(BASE_DIR.joinpath('staticfiles')) # new
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# my User model
AUTH_USER_MODEL = 'accounts.CustomUser'
# user redirect
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
# settings for crispy_forms
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# setting to implement the reset password with email.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| 25.877419
| 91
| 0.714784
|
4ea68884bfd2ec5ee0afaa5b76cc43ae8c2d4ade
| 2,180
|
py
|
Python
|
wallet/serializers.py
|
iesteban/bitcoin_bazaar_backend
|
2aa7c61d8727dae3a9be4b19c4b2aa49ec7ecaa0
|
[
"MIT"
] | 18
|
2017-03-08T06:30:55.000Z
|
2020-05-08T17:30:20.000Z
|
wallet/serializers.py
|
iesteban/bitcoin_bazaar_backend
|
2aa7c61d8727dae3a9be4b19c4b2aa49ec7ecaa0
|
[
"MIT"
] | 871
|
2017-03-06T21:03:59.000Z
|
2022-03-28T19:46:44.000Z
|
wallet/serializers.py
|
iesteban/bitcoin_bazaar_backend
|
2aa7c61d8727dae3a9be4b19c4b2aa49ec7ecaa0
|
[
"MIT"
] | 5
|
2017-07-07T12:10:47.000Z
|
2020-05-13T15:57:56.000Z
|
from rest_framework import serializers
from semillas_backend.users.serializers import UserSerializer
from .models import Wallet, Transaction
class CreateTransactionSerializer(serializers.Serializer):
user_source = serializers.UUIDField()
user_dest = serializers.UUIDField()
value = serializers.DecimalField(max_digits=6, decimal_places=2)
class Meta:
model = Transaction
fields = ('user_source', 'user_dest', 'value')
class TransactionSerializer(serializers.ModelSerializer):
user = serializers.SerializerMethodField()
balance = serializers.SerializerMethodField()
trans_value = serializers.SerializerMethodField()
created_at = serializers.DateTimeField(format='%d %b %Y')
user_uuid = serializers.SerializerMethodField()
class Meta:
model = Transaction
fields = ('id', 'trans_value', 'balance', 'user', 'created_at', 'user_uuid')
def get_user_uuid(self, obj):
if (
'owner_uuid' in self.context and
self.context['owner_uuid'] == str(obj.wallet_source.owner.uuid)
):
return obj.wallet_dest.owner.uuid
return obj.wallet_source.owner.uuid
def get_user(self, obj):
if (
'owner_uuid' in self.context and
self.context['owner_uuid'] == str(obj.wallet_source.owner.uuid)
):
return obj.wallet_dest.owner.name
return obj.wallet_source.owner.name
def get_balance(self, obj):
if (
'owner_uuid' in self.context and
self.context['owner_uuid'] == str(obj.wallet_source.owner.uuid)
):
return obj.balance_source
return obj.balance_dest
def get_trans_value(self, obj):
if (
'owner_uuid' in self.context and
self.context['owner_uuid'] == str(obj.wallet_source.owner.uuid)
):
return -obj.value
return obj.value
class WalletSerializer(serializers.ModelSerializer):
transactions = TransactionSerializer(many=True)
owner = UserSerializer()
class Meta:
model = Wallet
fields = ('uuid', 'owner', 'balance', 'last_updated', 'transactions')
| 33.538462
| 84
| 0.658257
|
63eec7be2c5cbbfe0e3f32e22c5edcc9303efea5
| 3,899
|
py
|
Python
|
tools/c7n_azure/tests_azure/tests_resources/test_storage_container.py
|
al3pht/cloud-custodian
|
ce6613d1b716f336384c5e308eee300389e6bf50
|
[
"Apache-2.0"
] | 2,415
|
2018-12-04T00:37:58.000Z
|
2022-03-31T12:28:56.000Z
|
tools/c7n_azure/tests_azure/tests_resources/test_storage_container.py
|
al3pht/cloud-custodian
|
ce6613d1b716f336384c5e308eee300389e6bf50
|
[
"Apache-2.0"
] | 3,272
|
2018-12-03T23:58:17.000Z
|
2022-03-31T21:15:32.000Z
|
tools/c7n_azure/tests_azure/tests_resources/test_storage_container.py
|
al3pht/cloud-custodian
|
ce6613d1b716f336384c5e308eee300389e6bf50
|
[
"Apache-2.0"
] | 773
|
2018-12-06T09:43:23.000Z
|
2022-03-30T20:44:43.000Z
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from ..azure_common import BaseTest, arm_template, cassette_name, DEFAULT_SUBSCRIPTION_ID
from c7n_azure.storage_utils import StorageUtilities
from mock import patch
from c7n_azure.constants import CONTAINER_EVENT_TRIGGER_MODE
from c7n_azure.session import Session
from c7n_azure.utils import local_session
class StorageContainerTest(BaseTest):
def setUp(self):
super(StorageContainerTest, self).setUp()
StorageUtilities.get_storage_primary_key.cache_clear()
def test_storage_schema_validate(self):
p = self.load_policy({
'name': 'test-storage-container',
'resource': 'azure.storage-container'
}, validate=True)
self.assertTrue(p)
@arm_template('storage.json')
@cassette_name('containers')
def test_value_filter(self):
p = self.load_policy({
'name': 'test-azure-storage-container-enum',
'resource': 'azure.storage-container',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'container*'},
{'type': 'parent',
'filter':
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctstorage*'}}],
})
resources = p.run()
self.assertEqual(2, len(resources))
self.assertEqual({'containerone', 'containertwo'}, {c['name'] for c in resources})
@arm_template('storage.json')
@cassette_name('containers')
def test_set_public_access(self):
with patch('azure.mgmt.storage.v%s.operations.'
'BlobContainersOperations.update'
% self._get_storage_management_client_api_string()) as update_container_mock:
p = self.load_policy({
'name': 'test-azure-storage-container-enum',
'resource': 'azure.storage-container',
'filters': [
{
'type': 'value',
'key': 'name',
'value': 'containerone'
}
],
'actions': [
{
'type': 'set-public-access',
'value': 'None'
}
]
}, validate=True)
p.run()
args, kwargs = update_container_mock.call_args_list[0]
self.assertEqual('test_storage', args[0])
self.assertTrue(args[1].startswith('cctstorage'))
self.assertEqual('None', kwargs['public_access'])
@arm_template('storage.json')
def test_event(self):
p = self.load_policy({
'name': 'test-azure-container-event',
'mode':
{'type': CONTAINER_EVENT_TRIGGER_MODE,
'events': ['StorageContainerWrite']},
'resource': 'azure.storage-container'})
account = self.setup_account()
event = {"subject": "/subscriptions/" + DEFAULT_SUBSCRIPTION_ID + "/resourceGroups/"
"test_storage/providers/Microsoft.Storage/storageAccounts"
"/" + account.name + "/blobServices/default/containers/containerone",
"eventTime": "2019-07-16T18:30:43.3595255Z",
"id": "619d2674-b396-4356-9619-6c5a52fe4e88"}
resources = p.push(event, None)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], 'containerone')
def _get_storage_management_client_api_string(self):
return local_session(Session)\
.client('azure.mgmt.storage.StorageManagementClient')\
.DEFAULT_API_VERSION.replace("-", "_")
| 38.60396
| 97
| 0.553732
|
f864b788f318ea618925ef504d1957e35486b91e
| 12,270
|
py
|
Python
|
weatherlight-server/lib/flask_jwt/__init__.py
|
svanscho/weatherlight
|
14ce4114381af53cf040cee7c04b5b5670bbb95c
|
[
"MIT"
] | 581
|
2015-01-06T03:34:55.000Z
|
2022-03-27T17:16:29.000Z
|
weatherlight-server/lib/flask_jwt/__init__.py
|
svanscho/weatherlight
|
14ce4114381af53cf040cee7c04b5b5670bbb95c
|
[
"MIT"
] | 104
|
2015-01-18T23:25:19.000Z
|
2022-02-16T15:44:43.000Z
|
weatherlight-server/lib/flask_jwt/__init__.py
|
svanscho/weatherlight
|
14ce4114381af53cf040cee7c04b5b5670bbb95c
|
[
"MIT"
] | 212
|
2015-02-02T18:55:17.000Z
|
2022-03-30T11:13:38.000Z
|
# -*- coding: utf-8 -*-
"""
flask_jwt
~~~~~~~~~
Flask-JWT module
"""
import logging
import warnings
from collections import OrderedDict
from datetime import datetime, timedelta
from functools import wraps
import jwt
from flask import current_app, request, jsonify, _request_ctx_stack
from werkzeug.local import LocalProxy
__version__ = '0.3.2'
logger = logging.getLogger(__name__)
current_identity = LocalProxy(lambda: getattr(_request_ctx_stack.top, 'current_identity', None))
_jwt = LocalProxy(lambda: current_app.extensions['jwt'])
CONFIG_DEFAULTS = {
'JWT_DEFAULT_REALM': 'Login Required',
'JWT_AUTH_URL_RULE': '/auth',
'JWT_AUTH_ENDPOINT': 'jwt',
'JWT_AUTH_USERNAME_KEY': 'username',
'JWT_AUTH_PASSWORD_KEY': 'password',
'JWT_ALGORITHM': 'HS256',
'JWT_LEEWAY': timedelta(seconds=10),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
'JWT_EXPIRATION_DELTA': timedelta(seconds=300),
'JWT_NOT_BEFORE_DELTA': timedelta(seconds=0),
'JWT_VERIFY_CLAIMS': ['signature', 'exp', 'nbf', 'iat'],
'JWT_REQUIRED_CLAIMS': ['exp', 'iat', 'nbf']
}
def _default_jwt_headers_handler(identity):
return None
def _default_jwt_payload_handler(identity):
iat = datetime.utcnow()
exp = iat + current_app.config.get('JWT_EXPIRATION_DELTA')
nbf = iat + current_app.config.get('JWT_NOT_BEFORE_DELTA')
identity = getattr(identity, 'id') or identity['id']
return {'exp': exp, 'iat': iat, 'nbf': nbf, 'identity': identity}
def _default_jwt_encode_handler(identity):
secret = current_app.config['JWT_SECRET_KEY']
algorithm = current_app.config['JWT_ALGORITHM']
required_claims = current_app.config['JWT_REQUIRED_CLAIMS']
payload = _jwt.jwt_payload_callback(identity)
missing_claims = list(set(required_claims) - set(payload.keys()))
if missing_claims:
raise RuntimeError('Payload is missing required claims: %s' % ', '.join(missing_claims))
headers = _jwt.jwt_headers_callback(identity)
return jwt.encode(payload, secret, algorithm=algorithm, headers=headers)
def _default_jwt_decode_handler(token):
secret = current_app.config['JWT_SECRET_KEY']
algorithm = current_app.config['JWT_ALGORITHM']
leeway = current_app.config['JWT_LEEWAY']
verify_claims = current_app.config['JWT_VERIFY_CLAIMS']
required_claims = current_app.config['JWT_REQUIRED_CLAIMS']
options = {
'verify_' + claim: True
for claim in verify_claims
}
options.update({
'require_' + claim: True
for claim in required_claims
})
return jwt.decode(token, secret, options=options, algorithms=[algorithm], leeway=leeway)
def _default_request_handler():
auth_header_value = request.headers.get('Authorization', None)
auth_header_prefix = current_app.config['JWT_AUTH_HEADER_PREFIX']
if not auth_header_value:
return
parts = auth_header_value.split()
if parts[0].lower() != auth_header_prefix.lower():
raise JWTError('Invalid JWT header', 'Unsupported authorization type')
elif len(parts) == 1:
raise JWTError('Invalid JWT header', 'Token missing')
elif len(parts) > 2:
raise JWTError('Invalid JWT header', 'Token contains spaces')
return parts[1]
def _default_auth_request_handler():
data = request.get_json()
username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)
password = data.get(current_app.config.get('JWT_AUTH_PASSWORD_KEY'), None)
criterion = [username, password, len(data) == 2]
if not all(criterion):
raise JWTError('Bad Request', 'Invalid credentials')
identity = _jwt.authentication_callback(username, password)
if identity:
access_token = _jwt.jwt_encode_callback(identity)
return _jwt.auth_response_callback(access_token, identity)
else:
raise JWTError('Bad Request', 'Invalid credentials')
def _default_auth_response_handler(access_token, identity):
return jsonify({'access_token': access_token.decode('utf-8')})
def _default_jwt_error_handler(error):
logger.error(error)
return jsonify(OrderedDict([
('status_code', error.status_code),
('error', error.error),
('description', error.description),
])), error.status_code, error.headers
def _jwt_required(realm):
"""Does the actual work of verifying the JWT data in the current request.
This is done automatically for you by `jwt_required()` but you could call it manually.
Doing so would be useful in the context of optional JWT access in your APIs.
:param realm: an optional realm
"""
token = _jwt.request_callback()
if token is None:
raise JWTError('Authorization Required', 'Request does not contain an access token',
headers={'WWW-Authenticate': 'JWT realm="%s"' % realm})
try:
payload = _jwt.jwt_decode_callback(token)
except jwt.InvalidTokenError as e:
raise JWTError('Invalid token', str(e))
_request_ctx_stack.top.current_identity = identity = _jwt.identity_callback(payload)
if identity is None:
raise JWTError('Invalid JWT', 'User does not exist')
def jwt_required(realm=None):
"""View decorator that requires a valid JWT token to be present in the request
:param realm: an optional realm
"""
def wrapper(fn):
@wraps(fn)
def decorator(*args, **kwargs):
_jwt_required(realm or current_app.config['JWT_DEFAULT_REALM'])
return fn(*args, **kwargs)
return decorator
return wrapper
class JWTError(Exception):
def __init__(self, error, description, status_code=401, headers=None):
self.error = error
self.description = description
self.status_code = status_code
self.headers = headers
def __repr__(self):
return 'JWTError: %s' % self.error
def __str__(self):
return '%s. %s' % (self.error, self.description)
def encode_token():
return _jwt.encode_callback(_jwt.header_callback(), _jwt.payload_callback())
class JWT(object):
def __init__(self, app=None, authentication_handler=None, identity_handler=None):
self.authentication_callback = authentication_handler
self.identity_callback = identity_handler
self.auth_response_callback = _default_auth_response_handler
self.auth_request_callback = _default_auth_request_handler
self.jwt_encode_callback = _default_jwt_encode_handler
self.jwt_decode_callback = _default_jwt_decode_handler
self.jwt_headers_callback = _default_jwt_headers_handler
self.jwt_payload_callback = _default_jwt_payload_handler
self.jwt_error_callback = _default_jwt_error_handler
self.request_callback = _default_request_handler
if app is not None:
self.init_app(app)
def init_app(self, app):
for k, v in CONFIG_DEFAULTS.items():
app.config.setdefault(k, v)
app.config.setdefault('JWT_SECRET_KEY', app.config['SECRET_KEY'])
auth_url_rule = app.config.get('JWT_AUTH_URL_RULE', None)
if auth_url_rule:
if self.auth_request_callback == _default_auth_request_handler:
assert self.authentication_callback is not None, (
'an authentication_handler function must be defined when using the built in '
'authentication resource')
auth_url_options = app.config.get('JWT_AUTH_URL_OPTIONS', {'methods': ['POST']})
auth_url_options.setdefault('view_func', self.auth_request_callback)
app.add_url_rule(auth_url_rule, **auth_url_options)
app.errorhandler(JWTError)(self._jwt_error_callback)
if not hasattr(app, 'extensions'): # pragma: no cover
app.extensions = {}
app.extensions['jwt'] = self
def _jwt_error_callback(self, error):
return self.jwt_error_callback(error)
def authentication_handler(self, callback):
"""Specifies the identity handler function. This function receives two positional
arguments. The first being the username the second being the password. It should return an
object representing an authenticated identity. Example::
@jwt.authentication_handler
def authenticate(username, password):
user = User.query.filter(User.username == username).scalar()
if bcrypt.check_password_hash(user.password, password):
return user
:param callback: the identity handler function
"""
self.authentication_callback = callback
return callback
def identity_handler(self, callback):
"""Specifies the identity handler function. This function receives one positional argument
being the JWT payload. For example::
@jwt.identity_handler
def identify(payload):
return User.query.filter(User.id == payload['identity']).scalar()
:param callback: the identity handler function
"""
self.identity_callback = callback
return callback
def jwt_error_handler(self, callback):
"""Specifies the error handler function. Example::
@jwt.error_handler
def error_handler(e):
return "Something bad happened", 400
:param callback: the error handler function
"""
self.jwt_error_callback = callback
return callback
def auth_response_handler(self, callback):
"""Specifies the authentication response handler function.
:param callable callback: the auth response handler function
"""
self.auth_response_callback = callback
return callback
def auth_request_handler(self, callback):
"""Specifies the authentication response handler function.
:param callable callback: the auth request handler function
.. deprecated
"""
warnings.warn("This handler is deprecated. The recommended approach to have control over "
"the authentication resource is to disable the built-in resource by "
"setting JWT_AUTH_URL_RULE=None and registering your own authentication "
"resource directly on your application.", DeprecationWarning, stacklevel=2)
self.auth_request_callback = callback
return callback
def request_handler(self, callback):
"""Specifieds the request handler function. This function returns a JWT from the current
request.
:param callable callback: the request handler function
"""
self.request_callback = callback
return callback
def jwt_encode_handler(self, callback):
"""Specifies the encoding handler function. This function receives a payload and signs it.
:param callable callback: the encoding handler function
"""
self.jwt_encode_callback = callback
return callback
def jwt_decode_handler(self, callback):
"""Specifies the decoding handler function. This function receives a
signed payload and decodes it.
:param callable callback: the decoding handler function
"""
self.jwt_decode_callback = callback
return callback
def jwt_payload_handler(self, callback):
"""Specifies the JWT payload handler function. This function receives the return value from
the ``identity_handler`` function
Example::
@jwt.payload_handler
def make_payload(identity):
return {'user_id': identity.id}
:param callable callback: the payload handler function
"""
self.jwt_payload_callback = callback
return callback
def jwt_headers_handler(self, callback):
"""Specifies the JWT header handler function. This function receives the return value from
the ``identity_handler`` function.
Example::
@jwt.payload_handler
def make_payload(identity):
return {'user_id': identity.id}
:param callable callback: the payload handler function
"""
self.jwt_headers_callback = callback
return callback
| 33.801653
| 99
| 0.67987
|
44c8e43072a0de4809d496ec7e65bbe9fb1cfae0
| 5,098
|
py
|
Python
|
doc/conf.py
|
Infinidat/mitba
|
f4a38d1dbf5241b53251d6fffa0aab4257147d29
|
[
"BSD-3-Clause"
] | null | null | null |
doc/conf.py
|
Infinidat/mitba
|
f4a38d1dbf5241b53251d6fffa0aab4257147d29
|
[
"BSD-3-Clause"
] | 2
|
2019-08-08T06:49:10.000Z
|
2019-08-13T12:04:16.000Z
|
doc/conf.py
|
Infinidat/mitba
|
f4a38d1dbf5241b53251d6fffa0aab4257147d29
|
[
"BSD-3-Clause"
] | 1
|
2016-05-22T15:11:00.000Z
|
2016-05-22T15:11:00.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import pkg_resources
sys.path.insert(0, os.path.dirname(__file__))
# -- Project information -----------------------------------------------------
project = 'mitba'
copyright = '2019, Infinidat Ltd.' # pylint: disable=redefined-builtin
author = 'Infinidat Ltd.'
# The short X.Y version
# The full version, including alpha/beta/rc tags
version = release = pkg_resources.get_distribution('mitba').version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'releases',
'alabaster',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'github_user': 'Infinidat',
'github_repo': 'mitba',
'github_button': True,
'github_banner': True,
'travis_button': 'Infinidat/mitba',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'mitbadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mitba.tex', 'mitba Documentation',
'Infinidat Ltd.', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mitba', 'mitba Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mitba', 'mitba Documentation',
author, 'mitba', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| 30.526946
| 79
| 0.650843
|
f5379887eee97f5f3469bef9f5b624293b9f63a4
| 691
|
py
|
Python
|
examples/shared.py
|
ucbrise/jarvis
|
f40ccb1708a44e4392f26ed217b69df44abca824
|
[
"Apache-2.0"
] | 14
|
2017-12-13T01:52:46.000Z
|
2022-01-12T11:31:20.000Z
|
examples/shared.py
|
ucbrise/jarvis
|
f40ccb1708a44e4392f26ed217b69df44abca824
|
[
"Apache-2.0"
] | 9
|
2018-01-18T02:53:16.000Z
|
2018-04-07T18:36:12.000Z
|
examples/shared.py
|
ucbrise/jarvis
|
f40ccb1708a44e4392f26ed217b69df44abca824
|
[
"Apache-2.0"
] | 8
|
2018-01-12T21:49:39.000Z
|
2021-09-03T06:27:05.000Z
|
#!/usr/bin/env python3
import numpy as np
# Define the names of each column in the tweets file
attribute_names = []
attribute_names.append('id')
attribute_names.append('tweet')
attribute_names.append('place')
attribute_names.append('city')
attribute_names.append('country')
attribute_names.append('code')
# Define the data type of every element in a column
attribute_types = {
'id': np.int32,
'tweet': str,
'place': str,
'city': str,
'country': str,
'code': str
}
# Read the twitter data into a pandas dataframe
params = dict(header=None, names=attribute_names, dtype=attribute_types)
# Select a relevant subset of features
relevant_attributes = ["tweet", "code"]
| 25.592593
| 72
| 0.725036
|
6f47a4758f775526a78c5878d10d9d94772bafae
| 3,638
|
py
|
Python
|
pyntel4004/src/hardware/suboperations/ram.py
|
alshapton/Pyntel4004
|
865a7fc5264d24f1281bee44c40a51e7e42598a0
|
[
"MIT"
] | 6
|
2021-02-12T21:37:53.000Z
|
2022-02-24T23:09:37.000Z
|
pyntel4004/src/hardware/suboperations/ram.py
|
alshapton/Pyntel4004
|
865a7fc5264d24f1281bee44c40a51e7e42598a0
|
[
"MIT"
] | 43
|
2021-04-23T09:32:24.000Z
|
2022-02-01T15:17:09.000Z
|
pyntel4004/src/hardware/suboperations/ram.py
|
alshapton/Pyntel4004
|
865a7fc5264d24f1281bee44c40a51e7e42598a0
|
[
"MIT"
] | 2
|
2021-06-11T01:12:44.000Z
|
2021-09-14T22:44:11.000Z
|
"""RAM methods."""
# Import system modules
import os
import sys
sys.path.insert(1, '..' + os.sep + 'src')
from hardware.suboperations.other import decode_command_register # noqa
def rdx(self, character) -> int:
"""
Read RAM status character X.
Parameters
----------
self : Processor, mandatory
The instance of the Processor containing the registers, accumulator etc
character:
RAM STATUS CHARACTER to read
Returns
-------
self.ACCUMULATOR
The value read from the specified RAM STATUS CHARACTER
"""
crb = self.read_current_ram_bank()
chip, register, _none = \
decode_command_register(self.COMMAND_REGISTER, 'DATA_RAM_STATUS_CHAR')
self.ACCUMULATOR = self.STATUS_CHARACTERS[crb][chip][register][character]
self.increment_pc(1)
return self.ACCUMULATOR
def read_all_ram(self) -> list:
"""
Return the values of all the locations of RAM.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
RAM
The values of all the locations of RAM
"""
return self.RAM
def read_all_ram_ports(self) -> list:
"""
Return the values of all the RAM ports.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
RAM_PORT
The values of all the RAM ports
"""
return self.RAM_PORT
def read_all_pram(self) -> list:
"""
Return the values of all the locations of PRAM.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
PRAM
The values of all the locations of PRAM
"""
return self.PRAM
def read_all_status_characters(self) -> list:
"""
Return the values of all the RAM status characters.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
STATUS_CHARACTERS
The values of all the RAM status characters
"""
return self.STATUS_CHARACTERS
def read_current_ram_bank(self) -> int:
"""
Return the current RAM bank i.e. the one selected by the SRC.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
CURRENT_RAM_BANK
The current RAM bank value
"""
return self.CURRENT_RAM_BANK
def write_ram_status(self, char: int) -> bool:
"""
Write to a RAM status character.
Parameters
----------
self: Processor, mandatory
The instance of the processor containing the registers, accumulator etc
char: int, mandatory
specified status character
Returns
-------
True
if the value is set successfully
Raises
------
N/A
Notes
-----
No error checking is done in this function
All parameters cannot be out of range, since the functions to
place them in various registers etc all have range checking built in .
Eventually - there will be error checking here
"""
value = self.read_accumulator()
crb = self.read_current_ram_bank()
chip, register, _none = \
decode_command_register(self.COMMAND_REGISTER,
'DATA_RAM_STATUS_CHAR')
self.STATUS_CHARACTERS[crb][chip][register][char] = value
return True
| 22.182927
| 79
| 0.637438
|
d95464afeb553cf139e32d4310ff38c1355f877c
| 1,940
|
py
|
Python
|
google/appengine/_internal/antlr3/extras.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/google/appengine/_internal/antlr3/extras.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/google/appengine/_internal/antlr3/extras.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
""" @package antlr3.dottreegenerator
@brief ANTLR3 runtime package, tree module
This module contains all support classes for AST construction and tree parsers.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
# lot's of docstrings are missing, don't complain for now...
# pylint: disable-msg=C0111
from treewizard import TreeWizard
try:
from google.appengine._internal.antlr3.dottreegen import toDOT
except ImportError, exc:
def toDOT(*args, **kwargs):
raise exc
| 40.416667
| 79
| 0.769072
|
12fdeef5d26bbb94352e8d3eab0790bd1137b9c1
| 14,709
|
py
|
Python
|
util.py
|
Koen-Git/ColorSymDetect
|
5d6bb6734063f4a09c9a153527a446ce5c02a5b0
|
[
"MIT"
] | 1
|
2022-03-07T20:08:32.000Z
|
2022-03-07T20:08:32.000Z
|
util.py
|
Koen-Git/ColorSymDetect
|
5d6bb6734063f4a09c9a153527a446ce5c02a5b0
|
[
"MIT"
] | null | null | null |
util.py
|
Koen-Git/ColorSymDetect
|
5d6bb6734063f4a09c9a153527a446ce5c02a5b0
|
[
"MIT"
] | null | null | null |
import os
import cv2
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import preprocess
# Return a list of all image names with a given extension in a given folder
def listImages(dir, extension):
res = []
for img in os.listdir(dir):
if img.endswith(extension):
res.append(img)
return res
# Return a list of all image names in subfolders with a given extension in a given folder
def listImagesSub(dir, extension):
res = []
for subdir in os.listdir(dir):
for img in os.listdir(dir + subdir):
if img.endswith(extension):
res.append(subdir + "/" + img)
return res
# Compute the slope of a given line
# Infite slopes (vertical lines) are set to the height of the image
def getSlope(line, height):
# line is [x1, y1, x2, y2]
if (line[2] - line[0]) == 0:
slope = height
else:
slope = ((line[3]-line[1]) / (line[2]-line[0]))
return slope
# Calculate perpendicular slope
# Return true if two slopes are perpendicular
# Will also return true is slopes are close to perpendicular (within a range of 0-1)
def isPerpendicular(slope1, slope2):
if slope1 != 0:
if (abs((-1 * (1 / slope1)) - slope2) < 1):
return True
elif slope2 != 0:
if (abs((-1 * (1 / slope2)) - slope1) < 1):
return True
return False
# Calculate intersection between two lines, return None if no intersection
# from https://rosettacode.org/wiki/Find_the_intersection_of_two_lines#Python
def line_intersect(Ax1, Ay1, Ax2, Ay2, Bx1, By1, Bx2, By2):
""" returns a (x, y) tuple or None if there is no intersection """
d = (By2 - By1) * (Ax2 - Ax1) - (Bx2 - Bx1) * (Ay2 - Ay1)
if d:
uA = ((Bx2 - Bx1) * (Ay1 - By1) - (By2 - By1) * (Ax1 - Bx1)) / d
uB = ((Ax2 - Ax1) * (Ay1 - By1) - (Ay2 - Ay1) * (Ax1 - Bx1)) / d
else:
return
if not(0 <= uA <= 1 and 0 <= uB <= 1):
return
x = Ax1 + uA * (Ax2 - Ax1)
y = Ay1 + uA * (Ay2 - Ay1)
return x, y
# Calculate the distance of a point to all endpoints of two lines
# returns minimum of these distances
# Used to calculate radius of rotaional symmetry
# Minimum distance from intersection to any endpoint = radius
def minDistance(intersect, line1, line2):
dist1 = np.sqrt( (line1[0] - intersect[0])**2 + (line1[1] - intersect[1])**2 )
dist2 = np.sqrt( (line1[2] - intersect[0])**2 + (line1[3] - intersect[1])**2 )
dist3 = np.sqrt( (line2[0] - intersect[0])**2 + (line2[1] - intersect[1])**2 )
dist4 = np.sqrt( (line2[2] - intersect[0])**2 + (line2[3] - intersect[1])**2 )
return (min(dist1, dist2, dist3, dist4))
# Used to reorder symmetries for ease of processing
# from:
# [[[[[line], slope, score, normScore, depth], ...], depth] ... ]
# To:
# [[[line], slope, score, normScore, depth], ...]
def placeInOrder(symmetries):
newSymmetries = []
for syms in symmetries:
newSymmetries += syms[0]
return newSymmetries
# Only required when cuts are made before knowing symThreshold (Ipynb kernel)
# Will also reorder the symmetries as placeInOrder does
# Will remove all smaller parts of an image where the cut was made on a reflection
# symmetry line with a low score. Will also remove all parts based on that recursive loop.
def removeBadCuts(symmetries, symThreshold):
newSymmetries = []
deleteDepth = 99999
for syms in symmetries:
if syms[1] == 0:
newSymmetries += syms[0]
continue
if syms[1] >= deleteDepth:
symmetries.remove(syms)
continue
else:
deleteDepth = 99999
mainSym = syms[0][0]
if mainSym[2] < symThreshold:
deleteDepth = syms[1] + 1
symmetries.remove(syms)
continue
else:
newSymmetries += syms[0]
return newSymmetries
# Remove symmetries if they have a normalized score under normThreshold
# or if they have a normalized score of 1.0 and a score under symThreshold, i.e. are the main symmetry in their recursive loop (sub image)
# If a main symmetry is removed, all other symmetries in that recursive loop are also removed,
# by removing next symmetries untill they have a different depth, meaning they belong to a different loop
def removeBadSymmetries(symmetries, symThreshold, normThreshold):
copySym = symmetries[:]
# Start from one, always keep first symmetry
for i in range(1, len(symmetries)):
if symmetries[i] not in copySym:
continue
if symmetries[i][3] < normThreshold:
copySym.remove(symmetries[i])
elif symmetries[i][3] == 1.0:
if symmetries[i][2] < symThreshold:
copySym.remove(symmetries[i])
j = i + 1
if j >= len(symmetries):
break
while (symmetries[i][4] == symmetries[j][4]):
if symmetries[j] not in copySym:
j = j + 1
if j >= len(symmetries):
break
continue
copySym.remove(symmetries[j])
j = j + 1
if j >= len(symmetries):
break
return copySym
# Loop over each line and compare to other lines
# If slope is similar and the distance between endpoints is small enough, remove line with lower symmetry score
# maxDistX and maxDistY set based on width and height of image
# Both dictate the maximum distance between endpoints of lines
# If line1-endpoint1 is within maxDist to line2-endpoint1
# line1-endpoint2 only has to lie within (maxDist*0,66) line2-endpoint2 to be flagged as similar
def removeSimilarLines(symmetries, image, lineSimilarity):
height, width, _ = image.shape
maxDistX = width / lineSimilarity
maxDistY = height / lineSimilarity
maxDist = (maxDistX + maxDistY) / 2
maxSlopeDiff = maxDistY
copySym = symmetries[:]
def lowerScore(sym1, sym2):
if sym1[2] < sym2[2]:
return sym1
return sym2
for i in range(0, len(copySym)):
for j in range(i + 1, len(copySym)):
if copySym[i] not in symmetries:
break
if copySym[j] not in symmetries:
continue
if abs(copySym[i][1] - copySym[j][1]) < maxSlopeDiff or (abs(copySym[i][1]) > height / 3 and abs(copySym[j][1]) > height / 3):
center1 = (((copySym[i][0][0] + copySym[i][0][2]) / 2), ((copySym[i][0][1] + copySym[i][0][3]) / 2))
center2 = (((copySym[j][0][0] + copySym[j][0][2]) / 2), ((copySym[j][0][1] + copySym[j][0][3]) / 2))
dist = np.sqrt( (center1[0] - center2[0])**2 + (center1[1] - center2[1])**2 )
if dist < maxDist:
if i == 0:
symmetries.remove(copySym[j])
else:
symmetries.remove(lowerScore(copySym[i], copySym[j]))
return symmetries
# Remove similar rotational symmetries
# Remove if centerpoint is within maxDistX and maxDistY and the radius is within max(maxDistX, maxDistY)
# Rotation symmetry which has the highest avarage depth is removed
# Average depth is calculated based on the depth of the two reflection lines that form the rotational symmetry
def removeSimilarRotational(rotations, image, rotationSimilarity):
height, width, _ = image.shape
maxDistX = width / rotationSimilarity
maxDistY = height / rotationSimilarity
copyRot = rotations[:]
def removeRot(rot1, rot2):
# i=1 -> Remove rotaional with smaller radius
# i=2 -> Remove rotational with lower average scores of reflection symmetries that made the rotation:
i = 1
if rot1[i] < rot2[i]:
return rot1
return rot2
for i in range(0, len(copyRot)):
for j in range(i + 1, len(copyRot)):
if copyRot[i] not in rotations:
break
if copyRot[j] not in rotations:
continue
if abs(copyRot[i][0][0] - copyRot[j][0][0]) < maxDistX:
if abs(copyRot[i][0][1] - copyRot[j][0][1]) < maxDistY:
if abs(copyRot[i][1] - copyRot[j][1]) < max(maxDistX, maxDistY):
rotations.remove(removeRot(copyRot[i], copyRot[j]))
# Checks if distance between intersection point and endpoints of reflection lines is similar enough
# Used to calculate rotational symmetries with a non ML approach
def checkDistance(intersect, line1, line2, distDifference):
dist1 = np.sqrt( (line1[0] - intersect[0])**2 + (line1[1] - intersect[1])**2 )
dist2 = np.sqrt( (line1[2] - intersect[0])**2 + (line1[3] - intersect[1])**2 )
dist3 = np.sqrt( (line2[0] - intersect[0])**2 + (line2[1] - intersect[1])**2 )
dist4 = np.sqrt( (line2[2] - intersect[0])**2 + (line2[3] - intersect[1])**2 )
if abs(dist1 - dist2) > distDifference:
return False
elif abs(dist1 - dist3) > distDifference:
return False
elif abs(dist1 - dist4) > distDifference:
return False
elif abs(dist2 - dist3) > distDifference:
return False
elif abs(dist2 - dist4) > distDifference:
return False
elif abs(dist3 - dist4) > distDifference:
return False
return True
# Find rotaional symmetries with a given machine learning model
# Will loop over each reflection symmetry in a double loop and check if any have intersections
# Pairs with intersections will be pre-processed and subsequently predicted by the model
# Positive results will create a rotational symmetry in their centerpoint
# The radius is determined by the minDistance function
# Reflection symmetries which create a rotational symmetrie are removed afterwards
# Will not be executed in 'fast' mode
def rotationalSymmetriesML(symmetries, model, data):
h, w, _ = data.shape
rotations = []
data = pd.DataFrame()
for i in range(0, len(symmetries)):
for j in range(i + 1, len(symmetries)):
intersect = line_intersect(symmetries[i][0][0], symmetries[i][0][1], symmetries[i][0][2], symmetries[i][0][3], symmetries[j][0][0], symmetries[j][0][1], symmetries[j][0][2], symmetries[j][0][3])
if intersect == None:
continue
s = pd.Series(data={
"line1x1": symmetries[i][0][0],
"line1y1": symmetries[i][0][1],
"line1x2": symmetries[i][0][2],
"line1y2": symmetries[i][0][3],
"line1Score": symmetries[i][2],
"line2x1": symmetries[j][0][0],
"line2y1": symmetries[j][0][1],
"line2x2": symmetries[j][0][2],
"line2y2": symmetries[j][0][3],
"line2Score": symmetries[j][2],
"height": h,
"width": w
}, name="rotation")
data = data.append(s, ignore_index=False)
if len(data) > 0:
cpyData = data.copy()
data = preprocess.preproccesData(data)
pred = model.predict(data)
for i in range(0, len(data)):
if pred[i] == True:
intersect = line_intersect(cpyData["line1x1"][i], cpyData["line1y1"][i], cpyData["line1x2"][i], cpyData["line1y2"][i], cpyData["line2x1"][i], cpyData["line2y1"][i], cpyData["line2x2"][i], cpyData["line2y2"][i])
rad = minDistance(intersect, [cpyData["line1x1"][i], cpyData["line1y1"][i], cpyData["line1x2"][i], cpyData["line1y2"][i]], [cpyData["line2x1"][i], cpyData["line2y1"][i], cpyData["line2x2"][i], cpyData["line2y2"][i]])
meanScore = (cpyData["line1Score"][i] + cpyData["line2Score"][i]) / 2
rot = [intersect, rad, meanScore]
rotations.append(rot)
return rotations
# Find rotaional symmetries given reflection symmetries and a threshold
# Will loop over each reflection symmetry in a double loop and check if any pairs:
# - have similar symmetry score, their relative score must be inside the circleSymThreshold
# - have intersections,
# - are (close to) perpendicular
# - have distances from their endpoints to the intersection not too different from one another
# Positive results will create a rotational symmetry in their centerpoint
# The radius is determined by the minDistance function
# Reflection symmetries which create a rotational symmetrie are removed afterwards
# Will not be executed in 'slow' mode
def rotationalSymmetries(symmetries, image, circleSymThreshold):
rotations = []
tmp = []
copySym = symmetries[:]
height, width, _ = image.shape
distDifference = min(height / 5, width / 5)
for sym in symmetries:
for subsym in copySym:
# First check if lines have similar symmetry scores
if max(sym[2], subsym[2]) * circleSymThreshold > min(sym[2], subsym[2]):
continue
# Check if lines are perpendicular
if isPerpendicular(sym[1], subsym[1]) == False:
continue
intersect = line_intersect(sym[0][0], sym[0][1], sym[0][2], sym[0][3], subsym[0][0], subsym[0][1], subsym[0][2], subsym[0][3])
if intersect != None:
if checkDistance(intersect, sym[0], subsym[0], distDifference) == False:
continue
rad = minDistance(intersect, sym[0], subsym[0])
meanScore = (sym[2] + subsym[2]) / 2
rot = [intersect, rad, meanScore]
rotations.append(rot)
return rotations
# Plot all given reflection symmetry lines
def plotLines(symmetries):
n = 0
for sym in symmetries:
if sym[4] > n:
n = sym[4]
linewidth = 3
# Colors dicated by colormap (default: viridis)
colors = plt.cm.jet(np.linspace(0,1,n + 1))
for i, sym in enumerate(symmetries):
color = colors[sym[4]]
x = [sym[0][0], sym[0][2]]
y = [sym[0][1], sym[0][3]]
plt.plot(x, y, color=color, linewidth=linewidth)
# Plot all given rotational symmetries
def plotRotations(rotations):
for rot in rotations:
circleSym = plt.Circle(rot[0], linewidth=2.5, radius=rot[1], color="yellow", fill=False)
fig = plt.gcf()
axs = fig.gca()
axs.add_patch(circleSym)
# Used to resize an image by a given fraction
def resize_image(image, fraction):
h, w, _ = image.shape
desiredW = int(w / fraction)
desiredH = int(h / fraction)
dimensions = (desiredW, desiredH)
resizedImage = cv2.resize(image, dimensions, interpolation = cv2.INTER_AREA)
return resizedImage
| 42.025714
| 232
| 0.607451
|
4635be3de31410935a1facb10a74d414b76dcdca
| 666
|
py
|
Python
|
comportamentais/chain_of_responsibility/descontos.py
|
jgabriellima/design-patterns-python
|
e955d570265c154863fbfc65564dd4781f549042
|
[
"Apache-2.0"
] | 363
|
2018-07-30T18:52:55.000Z
|
2022-03-29T23:04:26.000Z
|
comportamentais/chain_of_responsibility/descontos.py
|
sou-rafael/design-patterns-python
|
e955d570265c154863fbfc65564dd4781f549042
|
[
"Apache-2.0"
] | 7
|
2018-07-14T20:19:23.000Z
|
2020-04-17T00:24:30.000Z
|
comportamentais/chain_of_responsibility/descontos.py
|
sou-rafael/design-patterns-python
|
e955d570265c154863fbfc65564dd4781f549042
|
[
"Apache-2.0"
] | 99
|
2018-09-06T18:11:43.000Z
|
2022-03-27T13:32:45.000Z
|
class DescontoCincoItens:
def __init__(self, proximo_desconto):
self.__proximo_desconto = proximo_desconto
def calcula(self, orcamento):
if orcamento.total_itens > 5:
return orcamento.valor * 0.1
return self.__proximo_desconto.calcula(orcamento)
class DescontoMaisDeQuinhentosReais:
def __init__(self, proximo_desconto):
self.__proximo_desconto = proximo_desconto
def calcula(self, orcamento):
if orcamento.valor > 500.0:
return orcamento.valor * 0.07
return self.__proximo_desconto.calcula(orcamento)
class SemDesconto:
def calcula(self, orcamento):
return 0
| 27.75
| 57
| 0.695195
|
869c606a1ac8a16958bbcc35f3a62a65f9d833ea
| 1,501
|
py
|
Python
|
other/day21b.py
|
p88h/aoc2021
|
b42a12ce7ca786b2b5061fdcb46bd850ed54b82c
|
[
"Apache-2.0"
] | 15
|
2021-12-01T16:50:40.000Z
|
2022-01-04T02:44:00.000Z
|
other/day21b.py
|
p88h/aoc2021
|
b42a12ce7ca786b2b5061fdcb46bd850ed54b82c
|
[
"Apache-2.0"
] | null | null | null |
other/day21b.py
|
p88h/aoc2021
|
b42a12ce7ca786b2b5061fdcb46bd850ed54b82c
|
[
"Apache-2.0"
] | 6
|
2021-12-08T01:33:56.000Z
|
2021-12-29T11:45:52.000Z
|
from collections import defaultdict
import time
lines = open("input/day21.txt").readlines()
p1 = int(lines[0].split(": ")[1])
p2 = int(lines[1].split(": ")[1])
def run1(p1, p2):
s1 = s2 = ofs = cnt = 0
while True:
p1 = (p1+(ofs % 100)+((ofs+1) % 100)+((ofs+2) % 100)+2) % 10+1
ofs = (ofs+3) % 100
cnt = cnt + 3
s1 = s1 + p1
if s1 >= 1000:
return s2*cnt
(p1, p2, s1, s2) = (p2, p1, s2, s1)
def produce3(multiverse, p, s, c, w):
for (d, f) in [(6, 7), (5, 6), (7, 6), (4, 3), (8, 3), (3, 1), (9, 1)]:
np = ((p + d - 1) % 10) + 1
ns = s + np
if ns < 21:
multiverse[(np, ns)] += c*f
else:
w[-1] += c*f
def run2(p):
multiverse = defaultdict(int)
multiverse[(p, 0)] = 1
wins = []
while multiverse:
wins.append(0)
temp = defaultdict(int)
for (p, s) in multiverse:
produce3(temp, p, s, multiverse[(p, s)], wins)
multiverse = temp
return wins
def run3(p1, p2):
wins1 = run2(p1)
wins2 = run2(p2)
size1 = size2 = 1
w1 = w2 = 0
for step in range(len(wins1)):
size1 = size1 * 27 - wins1[step]
w1 += wins1[step]*size2
size2 = size2 * 27 - wins2[step]
w2 += wins2[step]*size1
return max(w1, w2)
start = time.time()
ret1 = run1(p1, p2)
end = time.time()
print(ret1, end-start)
start = time.time()
ret2 = run3(p1, p2)
end = time.time()
print(ret2, end-start)
| 23.092308
| 75
| 0.493671
|
3034d54c52c453cb06e1825ddb59a806a4c4a59b
| 59,992
|
py
|
Python
|
pandas/tests/io/parser/test_common.py
|
sayanmondal2098/pandas
|
2f6b90aaaab6814c102eb160c5a9c11bc04a092e
|
[
"BSD-3-Clause"
] | 2
|
2021-04-07T13:56:06.000Z
|
2021-04-12T13:45:23.000Z
|
pandas/tests/io/parser/test_common.py
|
sanjusci/pandas
|
a1fee9199eba7ebf423880243936b9f1501d3d3a
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/io/parser/test_common.py
|
sanjusci/pandas
|
a1fee9199eba7ebf423880243936b9f1501d3d3a
|
[
"BSD-3-Clause"
] | 3
|
2018-01-08T08:40:55.000Z
|
2019-10-07T02:02:40.000Z
|
# -*- coding: utf-8 -*-
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
from collections import OrderedDict
import csv
from datetime import datetime
from io import BytesIO, StringIO
import os
import platform
from tempfile import TemporaryFile
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.compat import lrange
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas.util.testing as tm
from pandas.io.common import URLError
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [
Timestamp("2014-01-01 09:00:00"),
Timestamp("2014-01-02 10:00:00")
]
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ","}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_bytes_io_input(all_parsers):
encoding = "cp1255"
parser = all_parsers
data = BytesIO("שלום:1234\n562:123".encode(encoding))
result = parser.read_csv(data, sep=":", encoding=encoding)
expected = DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame([[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007]],
columns=["A", "B", "C", "D"],
index=Index([datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11)], name="index"))
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
"A": [1, 10],
"B": [2334, 13],
"C": [5, 10.]
})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0,
header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = 'Expected 3 fields in line 6, saw 5'
reader = parser.read_csv(StringIO(data), header=1, comment="#",
iterator=True, chunksize=1, skiprows=[2])
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]],
dtype=np.int64, columns=["A", "B", "C",
"Unnamed: 3",
"Unnamed: 4"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"],
"B": [1, 3, 4],
"C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True,
index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame([[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007]],
columns=["A", "B", "C", "D"],
index=Index([datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11)], name="index"))
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame([[0.980269, 3.685731, -0.364216805298,
-1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307,
0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318,
1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681,
0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093,
0.103469, "foo2"]],
columns=["A", "B", "C", "D", "E"],
index=Index([datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7)]))
tm.assert_frame_equal(result, expected)
def test_read_csv_unicode(all_parsers):
parser = all_parsers
data = BytesIO("\u0141aski, Jan;1".encode("utf-8"))
result = parser.read_csv(data, sep=";", encoding="utf-8", header=None)
expected = DataFrame([["\u0141aski, Jan", 1]])
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame([[2, 3, 4, 5], [7, 8, 9, 10],
[12, 13, 14, 15], [12, 13, 14, 15],
[12, 13, 14, 15], [12, 13, 14, 15]],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz",
"qux", "foo", "bar"], name="index"))
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[2, 3, 4, 5], [7, 8, 9, 10],
[12, 13, 14, 15], [12, 13, 14, 15],
[12, 13, 14, 15], [12, 13, 14, 15]],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz",
"qux", "foo", "bar"]))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("A,B\nTrue,1\nFalse,2\nTrue,3", dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"])),
("A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
dict(true_values=["yes", "Yes", "YES"],
false_values=["no", "NO", "No"]),
DataFrame([[True, 1], [False, 2], [True, 3],
[False, 3], [True, 3]], columns=["A", "B"])),
("A,B\nTRUE,1\nFALSE,2\nTRUE,3", dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"])),
("A,B\nfoo,bar\nbar,foo", dict(true_values=["foo"],
false_values=["bar"]),
DataFrame([[True, False], [False, True]], columns=["A", "B"]))
])
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame([["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"])
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2)
expected = DataFrame([["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"])
expected = expected.set_index("index")
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
reader = parser.read_csv(StringIO(data), chunksize=2)
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs)
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4)
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = dict(index_col=0)
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"],
["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
parser = TextParser(data_list, chunksize=2, **kwargs)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
expected = parser.read_csv(StringIO(data), **kwargs)
reader = parser.read_csv(StringIO(data), iterator=True, **kwargs)
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, expected[3:])
def test_iterator2(all_parsers):
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"])
tm.assert_frame_equal(result[0], expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_iterator_stop_on_chunksize(all_parsers):
# gh-3967: stopping iteration when chunksize is specified
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), chunksize=1)
result = list(reader)
assert len(result) == 3
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"])
tm.assert_frame_equal(concat(result), expected)
@pytest.mark.parametrize("kwargs", [
dict(iterator=True,
chunksize=1),
dict(iterator=True),
dict(chunksize=1)
])
def test_iterator_skipfooter_errors(all_parsers, kwargs):
msg = "'skipfooter' not supported for 'iteration'"
parser = all_parsers
data = "a\n1\n2"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, **kwargs)
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@pytest.mark.parametrize("data,kwargs,expected", [
("""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""", dict(index_col=0, names=["index", "A", "B", "C", "D"]),
DataFrame([[2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15],
[12, 13, 14, 15], [12, 13, 14, 15], [12, 13, 14, 15]],
index=Index(["foo", "bar", "baz", "qux",
"foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"])),
("""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""", dict(index_col=[0, 1], names=["index1", "index2", "A", "B", "C", "D"]),
DataFrame([[2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15],
[12, 13, 14, 15], [12, 13, 14, 15]],
index=MultiIndex.from_tuples([
("foo", "one"), ("foo", "two"), ("foo", "three"),
("bar", "one"), ("bar", "two")],
names=["index1", "index2"]),
columns=["A", "B", "C", "D"])),
])
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = '\n'.join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(StringIO(headless_data),
index_col=index_col,
header=None, names=names)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame([[2, 3, 4, 5], [7, 8, 9, 10], [12, 13, 14, 15],
[12, 13, 14, 15], [12, 13, 14, 15]],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples([
("foo", "one"), ("foo", "two"), ("foo", "three"),
("bar", "one"), ("bar", "two")]))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,expected,header", [
("a,b", DataFrame(columns=["a", "b"]), [0]),
("a,b\nc,d", DataFrame(columns=MultiIndex.from_tuples(
[("a", "c"), ("b", "d")])), [0, 1]),
])
@pytest.mark.parametrize("round_trip", [True, False])
def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
# see gh-14545
parser = all_parsers
data = expected.to_csv(index=False) if round_trip else data
result = parser.read_csv(StringIO(data), header=header)
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(all_parsers):
parser = all_parsers
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
result = parser.read_csv(StringIO(data), sep=" ")
expected = DataFrame([[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"],
[2, 2, 2, "e", "f"]], columns=["Unnamed: 0", "id",
"c0", "c1", "c2"])
tm.assert_frame_equal(result, expected)
def test_read_csv_parse_simple_list(all_parsers):
parser = all_parsers
data = """foo
bar baz
qux foo
foo
bar"""
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
tm.assert_frame_equal(result, expected)
@tm.network
def test_url(all_parsers, csv_dir_path):
# TODO: FTP testing
parser = all_parsers
kwargs = dict(sep="\t")
url = ("https://raw.github.com/pandas-dev/pandas/master/"
"pandas/tests/io/parser/data/salaries.csv")
url_result = parser.read_csv(url, **kwargs)
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
tm.assert_frame_equal(url_result, local_result)
@pytest.mark.slow
def test_local_file(all_parsers, csv_dir_path):
parser = all_parsers
kwargs = dict(sep="\t")
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
url = "file://localhost/" + local_path
try:
url_result = parser.read_csv(url, **kwargs)
tm.assert_frame_equal(url_result, local_result)
except URLError:
# Fails on some systems.
pytest.skip("Failing on: " + " ".join(platform.uname()))
def test_path_path_lib(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
df.to_csv, lambda p: parser.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_local_path(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv, lambda p: parser.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(all_parsers):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
parser = all_parsers
path = "%s.csv" % tm.rands(10)
msg = ("does not exist" if parser.engine == "c"
else r"\[Errno 2\]")
with pytest.raises(FileNotFoundError, match=msg) as e:
parser.read_csv(path)
filename = e.value.filename
filename = filename.decode() if isinstance(
filename, bytes) else filename
assert path == filename
def test_missing_trailing_delimiters(all_parsers):
parser = all_parsers
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1, 2, 3, 4], [1, 3, 3, np.nan],
[1, 4, 5, np.nan]], columns=["A", "B", "C", "D"])
tm.assert_frame_equal(result, expected)
def test_skip_initial_space(all_parsers):
data = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
parser = all_parsers
result = parser.read_csv(StringIO(data), names=lrange(33), header=None,
na_values=["-9999.0"], skipinitialspace=True)
expected = DataFrame([["09-Apr-2012", "01:10:18.300", 2456026.548822908,
12849, 1.00361, 1.12551, 330.65659,
355626618.16711, 73.48821, 314.11625, 1917.09447,
179.71425, 80.0, 240.0, -350, 70.06056, 344.9837,
1, 1, -0.689265, -0.692787, 0.212036, 14.7674,
41.605, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, 0, 12, 128]])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("sep", [",", "\t"])
@pytest.mark.parametrize("encoding", ["utf-16", "utf-16le", "utf-16be"])
def test_utf16_bom_skiprows(all_parsers, sep, encoding):
# see gh-2298
parser = all_parsers
data = """skip this
skip this too
A,B,C
1,2,3
4,5,6""".replace(",", sep)
path = "__%s__.csv" % tm.rands(10)
kwargs = dict(sep=sep, skiprows=2)
utf8 = "utf-8"
with tm.ensure_clean(path) as path:
from io import TextIOWrapper
bytes_data = data.encode(encoding)
with open(path, "wb") as f:
f.write(bytes_data)
bytes_buffer = BytesIO(data.encode(utf8))
bytes_buffer = TextIOWrapper(bytes_buffer, encoding=utf8)
result = parser.read_csv(path, encoding=encoding, **kwargs)
expected = parser.read_csv(bytes_buffer, encoding=utf8, **kwargs)
bytes_buffer.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(all_parsers, csv_dir_path):
path = os.path.join(csv_dir_path, "utf16_ex.txt")
parser = all_parsers
result = parser.read_csv(path, encoding="utf-16", sep="\t")
assert len(result) == 50
def test_unicode_encoding(all_parsers, csv_dir_path):
path = os.path.join(csv_dir_path, "unicode_series.csv")
parser = all_parsers
result = parser.read_csv(path, header=None, encoding="latin-1")
result = result.set_index(0)
got = result[1][1632]
expected = '\xc1 k\xf6ldum klaka (Cold Fever) (1994)'
assert got == expected
def test_trailing_delimiters(all_parsers):
# see gh-2442
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(all_parsers):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
parser = all_parsers
result = parser.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(all_parsers):
# see gh-2599
parser = all_parsers
data = "A,B\n0,0\n0,"
result = parser.read_csv(StringIO(data))
expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(all_parsers):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame({"Numbers": [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_frame_equal(result, expected)
def test_chunks_have_consistent_numerical_type(all_parsers):
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
# Coercions should work without warnings.
with tm.assert_produces_warning(None):
result = parser.read_csv(StringIO(data))
assert type(result.a[0]) is np.float64
assert result.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(all_parsers):
warning_type = None
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if parser.engine == "c" and parser.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = parser.read_csv(StringIO(data))
assert df.a.dtype == np.object
@pytest.mark.parametrize("sep", [" ", r"\s+"])
def test_integer_overflow_bug(all_parsers, sep):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, sep=sep)
expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
tm.assert_frame_equal(result, expected)
def test_catch_too_many_names(all_parsers):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
parser = all_parsers
msg = ("Too many columns specified: "
"expected 4 and found 3" if parser.engine == "c"
else "Number of passed names did not match "
"number of header fields in the file")
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
def test_ignore_leading_whitespace(all_parsers):
# see gh-3374, gh-6607
parser = all_parsers
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
result = parser.read_csv(StringIO(data), sep=r"\s+")
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(all_parsers):
# see gh-10022
parser = all_parsers
data = "\n hello\nworld\n"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([" hello", "world"])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(all_parsers):
# see gh-10184
data = "x,y"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(columns=["y"], index=Index([], name="x"))
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index(all_parsers):
# see gh-10467
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=["x", "y"])
expected = DataFrame(columns=["z"],
index=MultiIndex.from_arrays(
[[]] * 2, names=["x", "y"]))
tm.assert_frame_equal(result, expected)
def test_empty_with_reversed_multi_index(all_parsers):
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame(columns=["z"],
index=MultiIndex.from_arrays(
[[]] * 2, names=["y", "x"]))
tm.assert_frame_equal(result, expected)
def test_float_parser(all_parsers):
# see gh-9565
parser = all_parsers
data = "45e-1,4.5,45.,inf,-inf"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(",")]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(all_parsers):
# see gh-12215
df = DataFrame.from_dict(OrderedDict([("w", ["2e"]), ("x", ["3E"]),
("y", ["42e"]),
("z", ["632E"])]))
data = df.to_csv(index=False)
parser = all_parsers
for precision in parser.float_precision_choices:
df_roundtrip = parser.read_csv(StringIO(data),
float_precision=precision)
tm.assert_frame_equal(df_roundtrip, df)
@pytest.mark.parametrize("conv", [None, np.int64, np.uint64])
def test_int64_overflow(all_parsers, conv):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
parser = all_parsers
if conv is None:
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = parser.read_csv(StringIO(data))
expected = DataFrame(["00013007854817840016671868",
"00013007854817840016749251",
"00013007854817840016754630",
"00013007854817840016781876",
"00013007854817840017028824",
"00013007854817840017963235",
"00013007854817840018860166"], columns=["ID"])
tm.assert_frame_equal(result, expected)
else:
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
msg = ("(Python int too large to convert to C long)|"
"(long too big to convert)|"
"(int too big to convert)")
with pytest.raises(OverflowError, match=msg):
parser.read_csv(StringIO(data), converters={"ID": conv})
@pytest.mark.parametrize("val", [
np.iinfo(np.uint64).max,
np.iinfo(np.int64).max,
np.iinfo(np.int64).min
])
def test_int64_uint64_range(all_parsers, val):
# These numbers fall right inside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("val", [
np.iinfo(np.uint64).max + 1,
np.iinfo(np.int64).min - 1
])
def test_outside_int64_uint64_range(all_parsers, val):
# These numbers fall just outside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([str(val)])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("exp_data", [[str(-1), str(2**63)],
[str(2**63), str(-1)]])
def test_numeric_range_too_wide(all_parsers, exp_data):
# No numerical dtype can hold both negative and uint64
# values, so they should be cast as string.
parser = all_parsers
data = "\n".join(exp_data)
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("iterator", [True, False])
def test_empty_with_nrows_chunksize(all_parsers, iterator):
# see gh-9535
parser = all_parsers
expected = DataFrame(columns=["foo", "bar"])
nrows = 10
data = StringIO("foo,bar\n")
if iterator:
result = next(iter(parser.read_csv(data, chunksize=nrows)))
else:
result = parser.read_csv(data, nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,kwargs,expected,msg", [
# gh-10728: WHITESPACE_LINE
("a,b,c\n4,5,6\n ", dict(),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None),
# gh-10548: EAT_LINE_COMMENT
("a,b,c\n4,5,6\n#comment", dict(comment="#"),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None),
# EAT_CRNL_NOP
("a,b,c\n4,5,6\n\r", dict(),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None),
# EAT_COMMENT
("a,b,c\n4,5,6#comment", dict(comment="#"),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None),
# SKIP_LINE
("a,b,c\n4,5,6\nskipme", dict(skiprows=[2]),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None),
# EAT_LINE_COMMENT
("a,b,c\n4,5,6\n#comment", dict(comment="#", skip_blank_lines=False),
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), None),
# IN_FIELD
("a,b,c\n4,5,6\n ", dict(skip_blank_lines=False),
DataFrame([["4", 5, 6], [" ", None, None]],
columns=["a", "b", "c"]), None),
# EAT_CRNL
("a,b,c\n4,5,6\n\r", dict(skip_blank_lines=False),
DataFrame([[4, 5, 6], [None, None, None]],
columns=["a", "b", "c"]), None),
# ESCAPED_CHAR
("a,b,c\n4,5,6\n\\", dict(escapechar="\\"),
None, "(EOF following escape character)|(unexpected end of data)"),
# ESCAPE_IN_QUOTED_FIELD
('a,b,c\n4,5,6\n"\\', dict(escapechar="\\"),
None, "(EOF inside string starting at row 2)|(unexpected end of data)"),
# IN_QUOTED_FIELD
('a,b,c\n4,5,6\n"', dict(escapechar="\\"),
None, "(EOF inside string starting at row 2)|(unexpected end of data)"),
], ids=["whitespace-line", "eat-line-comment", "eat-crnl-nop", "eat-comment",
"skip-line", "eat-line-comment", "in-field", "eat-crnl",
"escaped-char", "escape-in-quoted-field", "in-quoted-field"])
def test_eof_states(all_parsers, data, kwargs, expected, msg):
# see gh-10728, gh-10548
parser = all_parsers
if expected is None:
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
def test_uneven_lines_with_usecols(all_parsers, usecols):
# see gh-12203
parser = all_parsers
data = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10"""
if usecols is None:
# Make sure that an error is still raised
# when the "usecols" parameter is not provided.
msg = r"Expected \d+ fields in line \d+, saw \d+"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
else:
expected = DataFrame({
"a": [0, 3, 8],
"b": [1, 4, 9]
})
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
# First, check to see that the response of parser when faced with no
# provided columns raises the correct error, with or without usecols.
("", dict(), None),
("", dict(usecols=["X"]), None),
(",,", dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]),
DataFrame(columns=["X"], index=[0], dtype=np.float64)),
("", dict(names=["Dummy", "X", "Dummy_2"], usecols=["X"]),
DataFrame(columns=["X"])),
])
def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
# see gh-12493
parser = all_parsers
if expected is None:
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs,expected", [
# gh-8661, gh-8679: this should ignore six lines, including
# lines with trailing whitespace and blank lines.
(dict(header=None, delim_whitespace=True, skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True), DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])),
# gh-8983: test skipping set of rows after a row with trailing spaces.
(dict(delim_whitespace=True, skiprows=[1, 2, 3, 5, 6],
skip_blank_lines=True), DataFrame({"A": [1., 5.1],
"B": [2., np.nan],
"C": [4., 10]})),
])
def test_trailing_spaces(all_parsers, kwargs, expected):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
parser = all_parsers
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
tm.assert_frame_equal(result, expected)
def test_raise_on_sep_with_delim_whitespace(all_parsers):
# see gh-6607
data = "a b c\n1 2 3"
parser = all_parsers
with pytest.raises(ValueError, match="you can only specify one"):
parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
@pytest.mark.parametrize("delim_whitespace", [True, False])
def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
# see gh-9710
parser = all_parsers
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({"MyColumn": list("abab")})
result = parser.read_csv(StringIO(data), skipinitialspace=True,
delim_whitespace=delim_whitespace)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("sep,skip_blank_lines,exp_data", [
(",", True, [[1., 2., 4.], [5., np.nan, 10.], [-70., .4, 1.]]),
(r"\s+", True, [[1., 2., 4.], [5., np.nan, 10.], [-70., .4, 1.]]),
(",", False, [[1., 2., 4.], [np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan], [5., np.nan, 10.],
[np.nan, np.nan, np.nan], [-70., .4, 1.]]),
])
def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
parser = all_parsers
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
if sep == r"\s+":
data = data.replace(",", " ")
result = parser.read_csv(StringIO(data), sep=sep,
skip_blank_lines=skip_blank_lines)
expected = DataFrame(exp_data, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_whitespace_lines(all_parsers):
parser = all_parsers
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = DataFrame([[1, 2., 4.], [5., np.nan, 10.]],
columns=["A", "B", "C"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,expected", [
(""" A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
""", DataFrame([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
columns=["A", "B", "C", "D"], index=["a", "b", "c"])),
(" a b c\n1 2 3 \n4 5 6\n 7 8 9",
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])),
])
def test_whitespace_regex_separator(all_parsers, data, expected):
# see gh-6607
parser = all_parsers
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_verbose_read(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
parser.read_csv(StringIO(data), verbose=True)
captured = capsys.readouterr()
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 3 NA values in column a\n"
def test_verbose_read2(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
parser.read_csv(StringIO(data), verbose=True, index_col=0)
captured = capsys.readouterr()
# Engines are verbose in different ways.
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 1 NA values in column a\n"
def test_iteration_open_handle(all_parsers):
parser = all_parsers
kwargs = dict(squeeze=True, header=None)
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
with open(path, "r") as f:
for line in f:
if "CCC" in line:
break
result = parser.read_csv(f, **kwargs)
expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("data,thousands,decimal", [
("""A|B|C
1|2,334.01|5
10|13|10.
""", ",", "."),
("""A|B|C
1|2.334,01|5
10|13|10,
""", ".", ","),
])
def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal):
parser = all_parsers
expected = DataFrame({
"A": [1, 10],
"B": [2334.01, 13],
"C": [5, 10.]
})
result = parser.read_csv(StringIO(data), sep="|",
thousands=thousands,
decimal=decimal)
tm.assert_frame_equal(result, expected)
def test_euro_decimal_format(all_parsers):
parser = all_parsers
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
result = parser.read_csv(StringIO(data), sep=";", decimal=",")
expected = DataFrame([
[1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
[2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
[3, 878.158, 108013.434, "GHI", "rez", 2.735694704]
], columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_inf_parsing(all_parsers, na_filter):
parser = all_parsers
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
expected = DataFrame({"A": [float("inf"), float("-inf")] * 5},
index=["a", "b", "c", "d", "e",
"f", "g", "h", "i", "j"])
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])
def test_raise_on_no_columns(all_parsers, nrows):
parser = all_parsers
data = "\n" * nrows
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data))
def test_memory_map(all_parsers, csv_dir_path):
mmap_file = os.path.join(csv_dir_path, "test_mmap.csv")
parser = all_parsers
expected = DataFrame({
"a": [1, 2, 3],
"b": ["one", "two", "three"],
"c": ["I", "II", "III"]
})
result = parser.read_csv(mmap_file, memory_map=True)
tm.assert_frame_equal(result, expected)
def test_null_byte_char(all_parsers):
# see gh-2741
data = "\x00,foo"
names = ["a", "b"]
parser = all_parsers
if parser.engine == "c":
expected = DataFrame([[np.nan, "foo"]], columns=names)
out = parser.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(out, expected)
else:
msg = "NULL byte detected"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), names=names)
@pytest.mark.parametrize("data,kwargs,expected", [
# Basic test
("a\n1", dict(), DataFrame({"a": [1]})),
# "Regular" quoting
('"a"\n1', dict(quotechar='"'), DataFrame({"a": [1]})),
# Test in a data row instead of header
("b\n1", dict(names=["a"]), DataFrame({"a": ["b", "1"]})),
# Test in empty data row with skipping
("\n1", dict(names=["a"], skip_blank_lines=True), DataFrame({"a": [1]})),
# Test in empty data row without skipping
("\n1", dict(names=["a"], skip_blank_lines=False),
DataFrame({"a": [np.nan, 1]})),
])
def test_utf8_bom(all_parsers, data, kwargs, expected):
# see gh-4793
parser = all_parsers
bom = "\ufeff"
utf8 = "utf-8"
def _encode_data_with_bom(_data):
bom_data = (bom + _data).encode(utf8)
return BytesIO(bom_data)
result = parser.read_csv(_encode_data_with_bom(data),
encoding=utf8, **kwargs)
tm.assert_frame_equal(result, expected)
def test_temporary_file(all_parsers):
# see gh-13398
parser = all_parsers
data = "0 0"
new_file = TemporaryFile("w+")
new_file.write(data)
new_file.flush()
new_file.seek(0)
result = parser.read_csv(new_file, sep=r"\s+", header=None)
new_file.close()
expected = DataFrame([[0, 0]])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("byte", [8, 16])
@pytest.mark.parametrize("fmt", ["utf-{0}", "utf_{0}",
"UTF-{0}", "UTF_{0}"])
def test_read_csv_utf_aliases(all_parsers, byte, fmt):
# see gh-13549
expected = DataFrame({"mb_num": [4.8], "multibyte": ["test"]})
parser = all_parsers
encoding = fmt.format(byte)
data = "mb_num,multibyte\n4.8,test".encode(encoding)
result = parser.read_csv(BytesIO(data), encoding=encoding)
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte(all_parsers):
# see gh-5500
parser = all_parsers
data = "a,b\n1\x1a,2"
expected = DataFrame([["1\x1a", 2]], columns=["a", "b"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte_to_file(all_parsers):
# see gh-16559
parser = all_parsers
data = b'c1,c2\r\n"test \x1a test", test\r\n'
expected = DataFrame([["test \x1a test", " test"]],
columns=["c1", "c2"])
path = "__%s__.csv" % tm.rands(10)
with tm.ensure_clean(path) as path:
with open(path, "wb") as f:
f.write(data)
result = parser.read_csv(path)
tm.assert_frame_equal(result, expected)
def test_sub_character(all_parsers, csv_dir_path):
# see gh-16893
filename = os.path.join(csv_dir_path, "sub_char.csv")
expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
parser = all_parsers
result = parser.read_csv(filename)
tm.assert_frame_equal(result, expected)
def test_file_handle_string_io(all_parsers):
# gh-14418
#
# Don't close user provided file handles.
parser = all_parsers
data = "a,b\n1,2"
fh = StringIO(data)
parser.read_csv(fh)
assert not fh.closed
def test_file_handles_with_open(all_parsers, csv1):
# gh-14418
#
# Don't close user provided file handles.
parser = all_parsers
with open(csv1, "r") as f:
parser.read_csv(f)
assert not f.closed
def test_invalid_file_buffer_class(all_parsers):
# see gh-15337
class InvalidBuffer(object):
pass
parser = all_parsers
msg = "Invalid file path or buffer object type"
with pytest.raises(ValueError, match=msg):
parser.read_csv(InvalidBuffer())
def test_invalid_file_buffer_mock(all_parsers):
# see gh-15337
parser = all_parsers
msg = "Invalid file path or buffer object type"
class Foo():
pass
with pytest.raises(ValueError, match=msg):
parser.read_csv(Foo())
def test_valid_file_buffer_seems_invalid(all_parsers):
# gh-16135: we want to ensure that "tell" and "seek"
# aren't actually being used when we call `read_csv`
#
# Thus, while the object may look "invalid" (these
# methods are attributes of the `StringIO` class),
# it is still a valid file-object for our purposes.
class NoSeekTellBuffer(StringIO):
def tell(self):
raise AttributeError("No tell method")
def seek(self, pos, whence=0):
raise AttributeError("No seek method")
data = "a\n1"
parser = all_parsers
expected = DataFrame({"a": [1]})
result = parser.read_csv(NoSeekTellBuffer(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [
dict(), # Default is True.
dict(error_bad_lines=True), # Explicitly pass in.
])
@pytest.mark.parametrize("warn_kwargs", [
dict(), dict(warn_bad_lines=True),
dict(warn_bad_lines=False)
])
def test_error_bad_lines(all_parsers, kwargs, warn_kwargs):
# see gh-15925
parser = all_parsers
kwargs.update(**warn_kwargs)
data = "a\n1\n1,2,3\n4\n5,6,7"
msg = "Expected 1 fields in line 3, saw 3"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
def test_warn_bad_lines(all_parsers, capsys):
# see gh-15925
parser = all_parsers
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = DataFrame({"a": [1, 4]})
result = parser.read_csv(StringIO(data),
error_bad_lines=False,
warn_bad_lines=True)
tm.assert_frame_equal(result, expected)
captured = capsys.readouterr()
assert "Skipping line 3" in captured.err
assert "Skipping line 5" in captured.err
def test_suppress_error_output(all_parsers, capsys):
# see gh-15925
parser = all_parsers
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = DataFrame({"a": [1, 4]})
result = parser.read_csv(StringIO(data),
error_bad_lines=False,
warn_bad_lines=False)
tm.assert_frame_equal(result, expected)
captured = capsys.readouterr()
assert captured.err == ""
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY36,
reason="On Python < 3.6 won't pass on Windows")
@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv"])
def test_filename_with_special_chars(all_parsers, filename):
# see gh-15086.
parser = all_parsers
df = DataFrame({"a": [1, 2, 3]})
with tm.ensure_clean(filename) as path:
df.to_csv(path, index=False)
result = parser.read_csv(path)
tm.assert_frame_equal(result, df)
def test_read_csv_memory_growth_chunksize(all_parsers):
# see gh-24805
#
# Let's just make sure that we don't crash
# as we iteratively process all chunks.
parser = all_parsers
with tm.ensure_clean() as path:
with open(path, "w") as f:
for i in range(1000):
f.write(str(i) + "\n")
result = parser.read_csv(path, chunksize=20)
for _ in result:
pass
def test_read_table_deprecated(all_parsers):
# see gh-21948
parser = all_parsers
data = "a\tb\n1\t2\n3\t4"
expected = parser.read_csv(StringIO(data), sep="\t")
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = parser.read_table(StringIO(data))
tm.assert_frame_equal(result, expected)
| 31.05176
| 194
| 0.594996
|
a4d8388933c1181b096b272546f4684bd8a9b0a6
| 11,709
|
py
|
Python
|
veles/tests/test_workflow.py
|
AkshayJainG/veles
|
21106f41a8e7e7e74453cd16a5059a0e6b1c315e
|
[
"Apache-2.0"
] | 1,007
|
2015-07-20T12:01:41.000Z
|
2022-03-30T23:08:35.000Z
|
veles/tests/test_workflow.py
|
AkshayJainG/veles
|
21106f41a8e7e7e74453cd16a5059a0e6b1c315e
|
[
"Apache-2.0"
] | 52
|
2015-07-21T10:26:24.000Z
|
2019-01-24T05:46:43.000Z
|
veles/tests/test_workflow.py
|
AkshayJainG/veles
|
21106f41a8e7e7e74453cd16a5059a0e6b1c315e
|
[
"Apache-2.0"
] | 235
|
2015-07-20T09:42:42.000Z
|
2021-12-06T18:12:26.000Z
|
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Jun 16, 2014
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import gc
import six
import unittest
import weakref
from zope.interface.verify import verifyObject
from veles.snapshotter import SnapshotterBase
from veles.workflow import Workflow
from veles.distributable import IDistributable
from veles.units import TrivialUnit
from veles.tests import DummyLauncher
from veles.workflow import StartPoint
from veles.tests import DummyWorkflow
from veles.pickle2 import pickle
class Test(unittest.TestCase):
def add_units(self, wf):
u1 = TrivialUnit(wf, name="unit1")
u1.tag = 0
u1.link_from(wf.start_point)
u2 = TrivialUnit(wf, name="unit1")
u2.tag = 1
u2.link_from(u1)
u3 = TrivialUnit(wf, name="unit1")
u3.tag = 2
u3.link_from(u2)
u4 = TrivialUnit(wf, name="unit2")
u4.link_from(u3)
u5 = TrivialUnit(wf, name="aaa")
u5.link_from(u4)
wf.end_point.link_from(u5)
def testIterator(self):
wf = Workflow(DummyLauncher())
self.add_units(wf)
self.assertEqual(7, len(wf))
units = list(wf)
self.assertEqual(7, len(units))
self.assertEqual("Start of Workflow", units[0].name)
self.assertEqual("End of Workflow", units[1].name)
self.assertEqual("unit1", units[2].name)
self.assertEqual("unit1", units[3].name)
self.assertEqual("unit1", units[4].name)
self.assertEqual("unit2", units[5].name)
self.assertEqual("aaa", units[6].name)
self.assertEqual(0, units[2].tag)
self.assertEqual(1, units[3].tag)
self.assertEqual(2, units[4].tag)
def testIndex(self):
wf = Workflow(DummyLauncher())
self.add_units(wf)
unit1 = wf["unit1"]
self.assertTrue(isinstance(unit1, list))
self.assertEqual(3, len(unit1))
self.assertEqual(0, unit1[0].tag)
self.assertEqual("unit1", unit1[0].name)
self.assertEqual(1, unit1[1].tag)
self.assertEqual("unit1", unit1[1].name)
self.assertEqual(2, unit1[2].tag)
self.assertEqual("unit1", unit1[2].name)
unit2 = wf["unit2"]
self.assertTrue(isinstance(unit2, TrivialUnit))
self.assertEqual("unit2", unit2.name)
raises = False
try:
wf["fail"]
except KeyError:
raises = True
self.assertTrue(raises)
unit = wf[0]
self.assertEqual("Start of Workflow", unit.name)
unit = wf[1]
self.assertEqual("End of Workflow", unit.name)
unit = wf[2]
self.assertEqual(0, unit.tag)
self.assertEqual("unit1", unit.name)
unit = wf[3]
self.assertEqual(1, unit.tag)
self.assertEqual("unit1", unit.name)
unit = wf[4]
self.assertEqual(2, unit.tag)
self.assertEqual("unit1", unit.name)
unit = wf[5]
self.assertEqual("unit2", unit.name)
unit = wf[6]
self.assertEqual("aaa", unit.name)
raises = False
try:
wf[7]
except IndexError:
raises = True
self.assertTrue(raises)
def testUnits(self):
wf = Workflow(DummyLauncher())
self.add_units(wf)
units = wf.units
self.assertTrue(isinstance(units, list))
self.assertEqual(7, len(units))
self.assertEqual("Start of Workflow", units[0].name)
self.assertEqual("End of Workflow", units[1].name)
self.assertEqual("unit1", units[2].name)
self.assertEqual("unit1", units[3].name)
self.assertEqual("unit1", units[4].name)
self.assertEqual("unit2", units[5].name)
self.assertEqual("aaa", units[6].name)
units = wf.units_in_dependency_order
self.assertTrue(hasattr(units, "__iter__"))
units = list(units)
self.assertEqual(7, len(units))
self.assertEqual("Start of Workflow", units[0].name)
self.assertEqual("unit1", units[1].name)
self.assertEqual("unit1", units[2].name)
self.assertEqual("unit1", units[3].name)
self.assertEqual("unit2", units[4].name)
self.assertEqual("aaa", units[5].name)
self.assertEqual("End of Workflow", units[6].name)
def testGraph(self):
wf = Workflow(DummyLauncher())
self.add_units(wf)
dot, _ = wf.generate_graph(write_on_disk=False)
ids = []
for unit in wf:
ids.append(hex(id(unit)))
ids.append(ids[-1])
ids.append(ids[-1])
# Move EndPoint to the tail
backup = ids[3:6]
ids[3:-3] = ids[6:]
ids[-3:] = backup
ids = ids[1:-1]
valid = ('digraph Workflow {\n'
'bgcolor=transparent;\n'
'mindist=0.5;\n'
'outputorder=edgesfirst;\n'
'overlap=false;\n'
'"%s" [fillcolor=lightgrey, gradientangle=90, '
'label=<<b><font point-size="18">Start of Workflow</font>'
'</b><br/><font point-size="14">'
'plumbing.py</font>>, shape=rect, '
'style="rounded,filled"];\n'
'"%s" -> "%s" [penwidth=3, weight=100];\n'
'"%s" [fillcolor=white, gradientangle=90, '
'label=<<b><font point-size="18">unit1</font></b><br/>'
'<font point-size="14">units.py'
'</font>>, shape=rect, style="rounded,filled"];\n'
'"%s" -> "%s" [penwidth=3, weight=100];\n'
'"%s" [fillcolor=white, gradientangle=90, '
'label=<<b><font point-size="18">unit1</font></b><br/>'
'<font point-size="14">units.py'
'</font>>, shape=rect, style="rounded,filled"];\n'
'"%s" -> "%s" [penwidth=3, weight=100];\n'
'"%s" [fillcolor=white, gradientangle=90, '
'label=<<b><font point-size="18">unit1</font></b><br/>'
'<font point-size="14">units.py'
'</font>>, shape=rect, style="rounded,filled"];\n'
'"%s" -> "%s" [penwidth=3, weight=100];\n'
'"%s" [fillcolor=white, gradientangle=90, '
'label=<<b><font point-size="18">unit2</font></b><br/>'
'<font point-size="14">units.py'
'</font>>, shape=rect, style="rounded,filled"];\n'
'"%s" -> "%s" [penwidth=3, weight=100];\n'
'"%s" [fillcolor=white, gradientangle=90, '
'label=<<b><font point-size="18">aaa</font></b><br/>'
'<font point-size="14">units.py'
'</font>>, shape=rect, style="rounded,filled"];\n'
'"%s" -> "%s" [penwidth=3, weight=100];\n'
'"%s" [fillcolor=lightgrey, gradientangle=90, '
'label=<<b><font point-size="18">End of Workflow</font>'
'</b><br/><font point-size="14">plumbing.py'
'</font>>, shape=rect, style="rounded,filled"];\n'
'}') % tuple(ids)
self.maxDiff = None
self.assertEqual(valid, dot)
def testStartPoint(self):
dwf = DummyWorkflow()
sp = StartPoint(dwf)
verifyObject(IDistributable, sp)
sp = pickle.loads(pickle.dumps(sp))
verifyObject(IDistributable, sp)
self.assertEqual(sp.workflow, None)
del dwf
if six.PY3:
def testWithDestruction(self):
flag = [False, False]
class MyUnit(TrivialUnit):
def __del__(self):
flag[0] = True
class MyWorkflow(Workflow):
def __del__(self):
flag[1] = True
with MyWorkflow(DummyLauncher()) as wf:
u = MyUnit(wf)
self.assertEqual(len(wf), 3)
self.assertEqual(u.workflow, wf)
self.assertEqual(len(wf), 2)
self.assertEqual(u.workflow, wf)
self.assertIsInstance(u._workflow_, weakref.ReferenceType)
del wf
gc.collect()
self.assertTrue(flag[1])
del u
gc.collect()
self.assertTrue(flag[0])
def testDestruction(self):
flag = [False, False]
class MyUnit(TrivialUnit):
def __del__(self):
flag[0] = True
class MyWorkflow(Workflow):
def __del__(self):
flag[1] = True
wf = MyWorkflow(DummyLauncher())
u = MyUnit(wf)
self.assertEqual(len(wf), 3)
self.assertEqual(u.workflow, wf)
del u
del wf
gc.collect()
self.assertTrue(flag[0])
self.assertTrue(flag[1])
def testPickling(self):
dl = DummyLauncher()
wf = Workflow(dl)
TrivialUnit(wf)
w2 = pickle.loads(pickle.dumps(wf))
self.assertEqual(len(w2), len(wf))
def testRestoredFromSnapshot(self):
dl = DummyLauncher()
wf = Workflow(dl)
self.assertFalse(wf.restored_from_snapshot)
self.assertFalse(wf.start_point.restored_from_snapshot)
self.assertIsNone(wf._restored_from_snapshot_)
wf._restored_from_snapshot_ = True
self.assertTrue(wf.restored_from_snapshot)
self.assertTrue(wf.start_point.restored_from_snapshot)
wf._restored_from_snapshot_ = False
self.assertFalse(wf.restored_from_snapshot)
self.assertFalse(wf.start_point.restored_from_snapshot)
w2 = SnapshotterBase._import_fobj(six.BytesIO(pickle.dumps(wf)))
self.assertTrue(w2.restored_from_snapshot)
self.assertTrue(w2.start_point.restored_from_snapshot)
self.assertTrue(w2._restored_from_snapshot_)
w2.end_point.link_from(w2.start_point)
w2.workflow = dl
w2.initialize()
self.assertFalse(w2.restored_from_snapshot)
self.assertFalse(w2.start_point.restored_from_snapshot)
self.assertIsNone(w2._restored_from_snapshot_)
w2.link_from(wf)
wf.end_point.link_from(w2)
w2.workflow = wf
self.assertFalse(w2.restored_from_snapshot)
self.assertFalse(wf.restored_from_snapshot)
wf._restored_from_snapshot_ = True
self.assertTrue(w2.restored_from_snapshot)
self.assertTrue(w2.start_point.restored_from_snapshot)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testItarator']
unittest.main()
| 37.408946
| 79
| 0.564352
|
b5d77fd3c49b625eae746b86b2e3a401d1e46618
| 18,949
|
py
|
Python
|
python/pyarrow/tests/test_feather.py
|
prutskov/arrow
|
e570db9c45ca97f77c5633e5525c02f55dbb6c4b
|
[
"Apache-2.0"
] | null | null | null |
python/pyarrow/tests/test_feather.py
|
prutskov/arrow
|
e570db9c45ca97f77c5633e5525c02f55dbb6c4b
|
[
"Apache-2.0"
] | 8
|
2020-04-10T19:03:51.000Z
|
2021-01-21T01:06:28.000Z
|
python/pyarrow/tests/test_feather.py
|
signavio/arrow
|
866e6a82e2794b151235c19b8c5cbf1fcaf780ef
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import os
import sys
import tempfile
import pytest
import numpy as np
import pyarrow as pa
from pyarrow.feather import (read_feather, write_feather, read_table,
FeatherDataset)
try:
from pandas.testing import assert_frame_equal
import pandas as pd
import pyarrow.pandas_compat
except ImportError:
pass
def random_path(prefix='feather_'):
return tempfile.mktemp(prefix=prefix)
@pytest.fixture(scope="module", params=[1, 2])
def version(request):
yield request.param
TEST_FILES = None
def setup_module(module):
global TEST_FILES
TEST_FILES = []
def teardown_module(module):
for path in TEST_FILES:
try:
os.remove(path)
except os.error:
pass
@pytest.mark.pandas
def test_file_not_exist():
with pytest.raises(pa.ArrowIOError):
read_feather('test_invalid_file')
def _check_pandas_roundtrip(df, expected=None, path=None,
columns=None, use_threads=False,
version=None, compression=None,
compression_level=None):
if path is None:
path = random_path()
TEST_FILES.append(path)
write_feather(df, path, compression=compression,
compression_level=compression_level, version=version)
if not os.path.exists(path):
raise Exception('file not written')
result = read_feather(path, columns, use_threads=use_threads)
if expected is None:
expected = df
assert_frame_equal(result, expected)
def _assert_error_on_write(df, exc, path=None):
# check that we are raising the exception
# on writing
if path is None:
path = random_path()
TEST_FILES.append(path)
def f():
write_feather(df, path)
pytest.raises(exc, f)
@pytest.mark.pandas
def test_dataset(version):
num_values = (100, 100)
num_files = 5
paths = [random_path() for i in range(num_files)]
df = pd.DataFrame(np.random.randn(*num_values),
columns=['col_' + str(i)
for i in range(num_values[1])])
TEST_FILES.extend(paths)
for index, path in enumerate(paths):
rows = (index * (num_values[0] // num_files),
(index + 1) * (num_values[0] // num_files))
write_feather(df.iloc[rows[0]:rows[1]], path, version=version)
data = FeatherDataset(paths).read_pandas()
assert_frame_equal(data, df)
@pytest.mark.pandas
def test_float_no_nulls(version):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_read_table(version):
num_values = (100, 100)
path = random_path()
TEST_FILES.append(path)
values = np.random.randint(0, 100, size=num_values)
df = pd.DataFrame(values, columns=['col_' + str(i)
for i in range(100)])
write_feather(df, path, version=version)
data = pd.DataFrame(values,
columns=['col_' + str(i) for i in range(100)])
table = pa.Table.from_pandas(data)
result = read_table(path)
assert_frame_equal(table.to_pandas(), result.to_pandas())
# Test without memory mapping
result = read_table(path, memory_map=False)
assert_frame_equal(table.to_pandas(), result.to_pandas())
@pytest.mark.pandas
def test_float_nulls(version):
num_values = 100
path = random_path()
TEST_FILES.append(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
arrays = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
arrays.append(pa.array(values, mask=null_mask))
values[null_mask] = np.nan
expected_cols.append(values)
table = pa.table(arrays, names=dtypes)
write_feather(table, path, version=version)
ex_frame = pd.DataFrame(dict(zip(dtypes, expected_cols)),
columns=dtypes)
result = read_feather(path)
assert_frame_equal(result, ex_frame)
@pytest.mark.pandas
def test_integer_no_nulls(version):
data = {}
numpy_dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_platform_numpy_integers(version):
data = {}
numpy_dtypes = ['longlong']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_integer_with_nulls(version):
# pandas requires upcast to float dtype
path = random_path()
TEST_FILES.append(path)
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
arrays = []
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arrays.append(pa.array(values, mask=null_mask))
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
table = pa.table(arrays, names=int_dtypes)
write_feather(table, path, version=version)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
result = read_feather(path)
assert_frame_equal(result, ex_frame)
@pytest.mark.pandas
def test_boolean_no_nulls(version):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_boolean_nulls(version):
# pandas requires upcast to object dtype
path = random_path()
TEST_FILES.append(path)
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
table = pa.table([pa.array(values, mask=mask)], names=['bools'])
write_feather(table, path, version=version)
expected = values.astype(object)
expected[mask] = None
ex_frame = pd.DataFrame({'bools': expected})
result = read_feather(path)
assert_frame_equal(result, ex_frame)
@pytest.mark.pandas
def test_buffer_bounds_error(version):
# ARROW-1676
path = random_path()
TEST_FILES.append(path)
for i in range(16, 256):
values = pa.array([None] + list(range(i)), type=pa.float64())
write_feather(pa.table([values], names=['arr']), path,
version=version)
result = read_feather(path)
expected = pd.DataFrame({'arr': values.to_pandas()})
assert_frame_equal(result, expected)
_check_pandas_roundtrip(expected, version=version)
@pytest.mark.pandas
def test_boolean_object_nulls(version):
repeats = 100
arr = np.array([False, None, True] * repeats, dtype=object)
df = pd.DataFrame({'bools': arr})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_delete_partial_file_on_error(version):
if sys.platform == 'win32':
pytest.skip('Windows hangs on to file handle for some reason')
class CustomClass:
pass
# strings will fail
df = pd.DataFrame(
{
'numbers': range(5),
'strings': [b'foo', None, 'bar', CustomClass(), np.nan]},
columns=['numbers', 'strings'])
path = random_path()
try:
write_feather(df, path, version=version)
except Exception:
pass
assert not os.path.exists(path)
@pytest.mark.pandas
def test_strings(version):
repeats = 1000
# Mixed bytes, unicode, strings coerced to binary
values = [b'foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
ex_values = [b'foo', None, b'bar', b'qux', np.nan]
expected = pd.DataFrame({'strings': ex_values * repeats})
_check_pandas_roundtrip(df, expected, version=version)
# embedded nulls are ok
values = ['foo', None, 'bar', 'qux', None]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
_check_pandas_roundtrip(df, expected, version=version)
values = ['foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
def test_empty_strings(version):
df = pd.DataFrame({'strings': [''] * 10})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_all_none(version):
df = pd.DataFrame({'all_none': [None] * 10})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_all_null_category(version):
# ARROW-1188
df = pd.DataFrame({"A": (1, 2, 3), "B": (None, None, None)})
df = df.assign(B=df.B.astype("category"))
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_multithreaded_read(version):
data = {'c{}'.format(i): [''] * 10
for i in range(100)}
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, use_threads=True, version=version)
@pytest.mark.pandas
def test_nan_as_null(version):
# Create a nan that is not numpy.nan
values = np.array(['foo', np.nan, np.nan * 2, 'bar'] * 10)
df = pd.DataFrame({'strings': values})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_category(version):
repeats = 1000
values = ['foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
df['strings'] = df['strings'].astype('category')
values = ['foo', None, 'bar', 'qux', None]
expected = pd.DataFrame({'strings': pd.Categorical(values * repeats)})
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
def test_timestamp(version):
df = pd.DataFrame({'naive': pd.date_range('2016-03-28', periods=10)})
df['with_tz'] = (df.naive.dt.tz_localize('utc')
.dt.tz_convert('America/Los_Angeles'))
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_timestamp_with_nulls(version):
df = pd.DataFrame({'test': [pd.Timestamp(2016, 1, 1),
None,
pd.Timestamp(2016, 1, 3)]})
df['with_tz'] = df.test.dt.tz_localize('utc')
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
@pytest.mark.xfail(reason="not supported", raises=TypeError)
def test_timedelta_with_nulls_v1():
df = pd.DataFrame({'test': [pd.Timedelta('1 day'),
None,
pd.Timedelta('3 day')]})
_check_pandas_roundtrip(df, version=1)
@pytest.mark.pandas
def test_timedelta_with_nulls():
df = pd.DataFrame({'test': [pd.Timedelta('1 day'),
None,
pd.Timedelta('3 day')]})
_check_pandas_roundtrip(df, version=2)
@pytest.mark.pandas
def test_out_of_float64_timestamp_with_nulls(version):
df = pd.DataFrame(
{'test': pd.DatetimeIndex([1451606400000000001,
None, 14516064000030405])})
df['with_tz'] = df.test.dt.tz_localize('utc')
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_non_string_columns(version):
df = pd.DataFrame({0: [1, 2, 3, 4],
1: [True, False, True, False]})
expected = df.rename(columns=str)
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
@pytest.mark.skipif(not os.path.supports_unicode_filenames,
reason='unicode filenames not supported')
def test_unicode_filename(version):
# GH #209
name = (b'Besa_Kavaj\xc3\xab.feather').decode('utf-8')
df = pd.DataFrame({'foo': [1, 2, 3, 4]})
_check_pandas_roundtrip(df, path=random_path(prefix=name),
version=version)
@pytest.mark.pandas
def test_read_columns(version):
df = pd.DataFrame({
'foo': [1, 2, 3, 4],
'boo': [5, 6, 7, 8],
'woo': [1, 3, 5, 7]
})
expected = df[['boo', 'woo']]
_check_pandas_roundtrip(df, expected, version=version,
columns=['boo', 'woo'])
@pytest.mark.pandas
def test_overwritten_file(version):
path = random_path()
TEST_FILES.append(path)
num_values = 100
np.random.seed(0)
values = np.random.randint(0, 10, size=num_values)
write_feather(pd.DataFrame({'ints': values}), path, version=version)
df = pd.DataFrame({'ints': values[0: num_values//2]})
_check_pandas_roundtrip(df, path=path, version=version)
@pytest.mark.pandas
def test_filelike_objects(version):
buf = io.BytesIO()
# the copy makes it non-strided
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=['a', 'b', 'c']).copy()
write_feather(df, buf, version=version)
buf.seek(0)
result = read_feather(buf)
assert_frame_equal(result, df)
@pytest.mark.pandas
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
def test_sparse_dataframe(version):
if not pa.pandas_compat._pandas_api.has_sparse:
pytest.skip("version of pandas does not support SparseDataFrame")
# GH #221
data = {'A': [0, 1, 2],
'B': [1, 0, 1]}
df = pd.DataFrame(data).to_sparse(fill_value=1)
expected = df.to_dense()
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
def test_duplicate_columns():
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
_assert_error_on_write(df, ValueError)
@pytest.mark.pandas
def test_unsupported():
# https://github.com/wesm/feather/issues/240
# serializing actual python objects
# custom python objects
class A:
pass
df = pd.DataFrame({'a': [A(), A()]})
_assert_error_on_write(df, ValueError)
# non-strings
df = pd.DataFrame({'a': ['a', 1, 2.0]})
_assert_error_on_write(df, TypeError)
@pytest.mark.pandas
def test_v2_set_chunksize():
df = pd.DataFrame({'A': np.arange(1000)})
table = pa.table(df)
buf = io.BytesIO()
write_feather(table, buf, chunksize=250, version=2)
result = buf.getvalue()
ipc_file = pa.ipc.open_file(pa.BufferReader(result))
assert ipc_file.num_record_batches == 4
assert len(ipc_file.get_batch(0)) == 250
@pytest.mark.pandas
def test_v2_compression_options():
df = pd.DataFrame({'A': np.arange(1000)})
cases = [
# compression, compression_level
('uncompressed', None),
('lz4', None),
('zstd', 1),
('zstd', 10)
]
for compression, compression_level in cases:
_check_pandas_roundtrip(df, compression=compression,
compression_level=compression_level)
buf = io.BytesIO()
# LZ4 doesn't support compression_level
with pytest.raises(pa.ArrowInvalid,
match="doesn't support setting a compression level"):
write_feather(df, buf, compression='lz4', compression_level=10)
# Trying to compress with V1
with pytest.raises(
ValueError,
match="Feather V1 files do not support compression option"):
write_feather(df, buf, compression='lz4', version=1)
# Trying to set chunksize with V1
with pytest.raises(
ValueError,
match="Feather V1 files do not support chunksize option"):
write_feather(df, buf, chunksize=4096, version=1)
# Unsupported compressor
with pytest.raises(ValueError,
match='compression="snappy" not supported'):
write_feather(df, buf, compression='snappy')
def test_v1_unsupported_types():
table = pa.table([pa.array([[1, 2, 3], [], None])], names=['f0'])
buf = io.BytesIO()
with pytest.raises(TypeError,
match=("Unsupported Feather V1 type: "
"list<item: int64>. "
"Use V2 format to serialize all Arrow types.")):
write_feather(table, buf, version=1)
@pytest.mark.slow
@pytest.mark.pandas
def test_large_dataframe(version):
df = pd.DataFrame({'A': np.arange(400000000)})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.large_memory
@pytest.mark.pandas
def test_chunked_binary_error_message():
# ARROW-3058: As Feather does not yet support chunked columns, we at least
# make sure it's clear to the user what is going on
# 2^31 + 1 bytes
values = [b'x'] + [
b'x' * (1 << 20)
] * 2 * (1 << 10)
df = pd.DataFrame({'byte_col': values})
# Works fine with version 2
buf = io.BytesIO()
write_feather(df, buf, version=2)
result = read_feather(pa.BufferReader(buf.getvalue()))
assert_frame_equal(result, df)
with pytest.raises(ValueError, match="'byte_col' exceeds 2GB maximum "
"capacity of a Feather binary column. This restriction "
"may be lifted in the future"):
write_feather(df, io.BytesIO(), version=1)
def test_feather_without_pandas(tempdir, version):
# ARROW-8345
table = pa.table([pa.array([1, 2, 3])], names=['f0'])
write_feather(table, str(tempdir / "data.feather"), version=version)
result = read_table(str(tempdir / "data.feather"))
assert result.equals(table)
| 28.494737
| 79
| 0.641723
|
10a43d048b73ac7bf087e644cd52843ffbfa7c51
| 6,807
|
py
|
Python
|
alipay/aop/api/domain/AlipayEcoCplifeCommunityModifyModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AlipayEcoCplifeCommunityModifyModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AlipayEcoCplifeCommunityModifyModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEcoCplifeCommunityModifyModel(object):
def __init__(self):
self._associated_pois = None
self._city_code = None
self._community_address = None
self._community_id = None
self._community_locations = None
self._community_name = None
self._district_code = None
self._hotline = None
self._out_community_id = None
self._province_code = None
@property
def associated_pois(self):
return self._associated_pois
@associated_pois.setter
def associated_pois(self, value):
if isinstance(value, list):
self._associated_pois = list()
for i in value:
self._associated_pois.append(i)
@property
def city_code(self):
return self._city_code
@city_code.setter
def city_code(self, value):
self._city_code = value
@property
def community_address(self):
return self._community_address
@community_address.setter
def community_address(self, value):
self._community_address = value
@property
def community_id(self):
return self._community_id
@community_id.setter
def community_id(self, value):
self._community_id = value
@property
def community_locations(self):
return self._community_locations
@community_locations.setter
def community_locations(self, value):
if isinstance(value, list):
self._community_locations = list()
for i in value:
self._community_locations.append(i)
@property
def community_name(self):
return self._community_name
@community_name.setter
def community_name(self, value):
self._community_name = value
@property
def district_code(self):
return self._district_code
@district_code.setter
def district_code(self, value):
self._district_code = value
@property
def hotline(self):
return self._hotline
@hotline.setter
def hotline(self, value):
self._hotline = value
@property
def out_community_id(self):
return self._out_community_id
@out_community_id.setter
def out_community_id(self, value):
self._out_community_id = value
@property
def province_code(self):
return self._province_code
@province_code.setter
def province_code(self, value):
self._province_code = value
def to_alipay_dict(self):
params = dict()
if self.associated_pois:
if isinstance(self.associated_pois, list):
for i in range(0, len(self.associated_pois)):
element = self.associated_pois[i]
if hasattr(element, 'to_alipay_dict'):
self.associated_pois[i] = element.to_alipay_dict()
if hasattr(self.associated_pois, 'to_alipay_dict'):
params['associated_pois'] = self.associated_pois.to_alipay_dict()
else:
params['associated_pois'] = self.associated_pois
if self.city_code:
if hasattr(self.city_code, 'to_alipay_dict'):
params['city_code'] = self.city_code.to_alipay_dict()
else:
params['city_code'] = self.city_code
if self.community_address:
if hasattr(self.community_address, 'to_alipay_dict'):
params['community_address'] = self.community_address.to_alipay_dict()
else:
params['community_address'] = self.community_address
if self.community_id:
if hasattr(self.community_id, 'to_alipay_dict'):
params['community_id'] = self.community_id.to_alipay_dict()
else:
params['community_id'] = self.community_id
if self.community_locations:
if isinstance(self.community_locations, list):
for i in range(0, len(self.community_locations)):
element = self.community_locations[i]
if hasattr(element, 'to_alipay_dict'):
self.community_locations[i] = element.to_alipay_dict()
if hasattr(self.community_locations, 'to_alipay_dict'):
params['community_locations'] = self.community_locations.to_alipay_dict()
else:
params['community_locations'] = self.community_locations
if self.community_name:
if hasattr(self.community_name, 'to_alipay_dict'):
params['community_name'] = self.community_name.to_alipay_dict()
else:
params['community_name'] = self.community_name
if self.district_code:
if hasattr(self.district_code, 'to_alipay_dict'):
params['district_code'] = self.district_code.to_alipay_dict()
else:
params['district_code'] = self.district_code
if self.hotline:
if hasattr(self.hotline, 'to_alipay_dict'):
params['hotline'] = self.hotline.to_alipay_dict()
else:
params['hotline'] = self.hotline
if self.out_community_id:
if hasattr(self.out_community_id, 'to_alipay_dict'):
params['out_community_id'] = self.out_community_id.to_alipay_dict()
else:
params['out_community_id'] = self.out_community_id
if self.province_code:
if hasattr(self.province_code, 'to_alipay_dict'):
params['province_code'] = self.province_code.to_alipay_dict()
else:
params['province_code'] = self.province_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoCplifeCommunityModifyModel()
if 'associated_pois' in d:
o.associated_pois = d['associated_pois']
if 'city_code' in d:
o.city_code = d['city_code']
if 'community_address' in d:
o.community_address = d['community_address']
if 'community_id' in d:
o.community_id = d['community_id']
if 'community_locations' in d:
o.community_locations = d['community_locations']
if 'community_name' in d:
o.community_name = d['community_name']
if 'district_code' in d:
o.district_code = d['district_code']
if 'hotline' in d:
o.hotline = d['hotline']
if 'out_community_id' in d:
o.out_community_id = d['out_community_id']
if 'province_code' in d:
o.province_code = d['province_code']
return o
| 35.453125
| 89
| 0.613927
|
110758d9046ea080af14a37ef3f557a653821683
| 8,406
|
py
|
Python
|
scripts/vs2012.py
|
liak-git/oneos
|
e28482c0bbcea1fcff4b815533486db9fa51d57f
|
[
"Apache-2.0"
] | 2
|
2021-10-05T02:34:18.000Z
|
2022-01-18T15:22:41.000Z
|
scripts/vs2012.py
|
liak-git/oneos
|
e28482c0bbcea1fcff4b815533486db9fa51d57f
|
[
"Apache-2.0"
] | null | null | null |
scripts/vs2012.py
|
liak-git/oneos
|
e28482c0bbcea1fcff4b815533486db9fa51d57f
|
[
"Apache-2.0"
] | 2
|
2021-10-05T02:28:50.000Z
|
2022-03-23T06:39:39.000Z
|
#
# File : vs2012.py
# This file is part of CMCC IOT OS
# COPYRIGHT (C) 2012-2020, CMCC IOT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
import os
import sys
import string
import build_tools
import uuid
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from utils import _make_path_relative
from utils import xml_indent
import utils
fs_encoding = sys.getfilesystemencoding()
#reference
# http://woodpecker.org.cn/diveintopython3/xml.html
# https://pycoders-weekly-chinese.readthedocs.org/en/latest/issue6/processing-xml-in-python-with-element-tree.html
# http://www.cnblogs.com/ifantastic/archive/2013/04/12/3017110.html
filter_project = etree.Element('Project', attrib={'ToolsVersion':'4.0'})
def get_uuid():
id = uuid.uuid1() # UUID('3e5526c0-2841-11e3-a376-20cf3048bcb3')
idstr = id.get_urn()[9:] #'urn:uuid:3e5526c0-2841-11e3-a376-20cf3048bcb3'[9:]
return '{'+idstr+'}'
def VS2012_AddGroup(parent, group_name, files, project_path):
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
ClCompile = SubElement(parent, 'ClCompile')
ClCompile.set('Include', path.decode(fs_encoding))
Filter = SubElement(ClCompile, 'Filter')
Filter.text='Source Files\\'+group_name
def VS2012_CreateFilter(script, project_path):
c_ItemGroup = SubElement(filter_project, 'ItemGroup')
filter_ItemGroup = SubElement(filter_project, 'ItemGroup')
Filter = SubElement(filter_ItemGroup, 'Filter')
Filter.set('Include', 'Source Files')
UniqueIdentifier = SubElement(Filter, 'UniqueIdentifier')
UniqueIdentifier.text = get_uuid()
Extensions = SubElement(Filter, 'Extensions')
Extensions.text = 'cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx'
Filter = SubElement(filter_ItemGroup, 'Filter')
Filter.set('Include', 'Header Files')
UniqueIdentifier = SubElement(Filter, 'UniqueIdentifier')
UniqueIdentifier.text = get_uuid()
Extensions = SubElement(Filter, 'Extensions')
Extensions.text = 'h;hpp;hxx;hm;inl;inc;xsd'
for group in script:
VS2012_AddGroup(c_ItemGroup, group['name'], group['src'], project_path)
Filter = SubElement(filter_ItemGroup, 'Filter')
Filter.set('Include', 'Source Files\\'+group['name'])
UniqueIdentifier = SubElement(Filter, 'UniqueIdentifier')
UniqueIdentifier.text = get_uuid()
#program: object from scons
# parent: xml node
# file_type: C or H
# files: c/h list
# project_path
def VS_add_ItemGroup(parent, file_type, files, project_path):
from build_tools import Os_Root
OS_ROOT = os.path.normpath(Os_Root)
file_dict = {'C':"ClCompile", 'H':'ClInclude'}
item_tag = file_dict[file_type]
ItemGroup = SubElement(parent, 'ItemGroup')
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
objpath = path.lower()
if len(project_path) >= len(OS_ROOT) :
if objpath.startswith(project_path.lower()) :
objpath = ''.join('bsp'+objpath[len(project_path):])
else :
objpath = ''.join('kernel'+objpath[len(OS_ROOT):])
else :
if objpath.startswith(OS_ROOT.lower()) :
objpath = ''.join('kernel'+objpath[len(OS_ROOT):])
else :
objpath = ''.join('bsp'+objpath[len(project_path):])
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
File = SubElement(ItemGroup, item_tag)
File.set('Include', path.decode(fs_encoding))
if file_type == 'C' :
ObjName = SubElement(File, 'ObjectFileName')
ObjName.text = ''.join('$(IntDir)'+objpath+'\\')
def VS_add_HeadFiles(program, elem, project_path):
utils.source_ext = []
utils.source_ext = ["h"]
for item in program:
utils.walk_children(item)
utils.source_list.sort()
# print utils.source_list
ItemGroup = SubElement(elem, 'ItemGroup')
filter_h_ItemGroup = SubElement(filter_project, 'ItemGroup')
for f in utils.source_list:
path = _make_path_relative(project_path, f)
File = SubElement(ItemGroup, 'ClInclude')
File.set('Include', path.decode(fs_encoding))
# add project.vcxproj.filter
ClInclude = SubElement(filter_h_ItemGroup, 'ClInclude')
ClInclude.set('Include', path.decode(fs_encoding))
Filter = SubElement(ClInclude, 'Filter')
Filter.text='Header Files'
def VS2012Project(target, script, program):
project_path = os.path.dirname(os.path.abspath(target))
tree = etree.parse('template_vs2012.vcxproj')
root = tree.getroot()
elem = root
out = file(target, 'wb')
out.write('<?xml version="1.0" encoding="UTF-8"?>\r\n')
ProjectFiles = []
# add "*.c or *.h" files
VS2012_CreateFilter(script, project_path)
# add "*.c" files
for group in script:
VS_add_ItemGroup(elem, 'C', group['src'], project_path)
# add "*.h" files
VS_add_HeadFiles(program, elem, project_path)
# write head include path
if 'CPPPATH' in build_tools.Env:
cpp_path = build_tools.Env['CPPPATH']
paths = set()
for path in cpp_path:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
paths = [i for i in paths]
paths.sort()
cpp_path = ';'.join(paths) + ';%(AdditionalIncludeDirectories)'
# write include path
for elem in tree.iter(tag='AdditionalIncludeDirectories'):
elem.text = cpp_path
break
# write cppdefinitons flags
if 'CPPDEFINES' in build_tools.Env:
for elem in tree.iter(tag='PreprocessorDefinitions'):
definitions = ';'.join(build_tools.Env['CPPDEFINES']) + ';%(PreprocessorDefinitions)'
elem.text = definitions
break
# write link flags
# write lib dependence (Link)
if 'LIBS' in build_tools.Env:
for elem in tree.iter(tag='AdditionalDependencies'):
libs_with_extention = [i+'.lib' for i in build_tools.Env['LIBS']]
libs = ';'.join(libs_with_extention) + ';%(AdditionalDependencies)'
elem.text = libs
break
# write lib include path
if 'LIBPATH' in build_tools.Env:
lib_path = build_tools.Env['LIBPATH']
paths = set()
for path in lib_path:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc)
paths = [i for i in paths]
paths.sort()
lib_paths = ';'.join(paths) + ';%(AdditionalLibraryDirectories)'
for elem in tree.iter(tag='AdditionalLibraryDirectories'):
elem.text = lib_paths
break
xml_indent(root)
vcxproj_string = etree.tostring(root, encoding='utf-8')
root_node=r'<Project DefaultTargets="Build" ToolsVersion="4.0">'
out.write(r'<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">')
out.write(vcxproj_string[len(root_node):])
out.close()
xml_indent(filter_project)
filter_string = etree.tostring(filter_project, encoding='utf-8')
out = file('project.vcxproj.filters', 'wb')
out.write('<?xml version="1.0" encoding="UTF-8"?>\r\n')
root_node=r'<Project ToolsVersion="4.0">'
out.write(r'<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">')
out.write(filter_string[len(root_node):])
out.close()
| 36.38961
| 129
| 0.657269
|
fb3c7fe4e7b761e25613198575d94a94557139a3
| 2,859
|
py
|
Python
|
src/python/WMCore/WMBS/MySQL/Jobs/GetNumberOfJobsPerSite.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 21
|
2015-11-19T16:18:45.000Z
|
2021-12-02T18:20:39.000Z
|
src/python/WMCore/WMBS/MySQL/Jobs/GetNumberOfJobsPerSite.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 5,671
|
2015-01-06T14:38:52.000Z
|
2022-03-31T22:11:14.000Z
|
src/python/WMCore/WMBS/MySQL/Jobs/GetNumberOfJobsPerSite.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 67
|
2015-01-21T15:55:38.000Z
|
2022-02-03T19:53:13.000Z
|
#!/usr/bin/env python
"""
_GetNumberOfJobsPerSite_
MySQL implementation of Jobs.GetNumberOfJobsPerSite
"""
__all__ = []
import logging
from WMCore.Database.DBFormatter import DBFormatter
class GetNumberOfJobsPerSite(DBFormatter):
"""
_GetLocation_
Retrieve all files that are associated with the given job from the
database.
"""
def buildSQL(self, states, type):
"""
_buildSQL_
builds the sql statements; necessary for lists
"""
baseSQL = """SELECT count(*) FROM wmbs_job
WHERE location = (SELECT ID FROM wmbs_location WHERE site_name = :location)
AND state IN (SELECT ID FROM wmbs_job_state js WHERE js.name IN (
"""
typeSQL = """SELECT count(*) FROM wmbs_job
INNER JOIN wmbs_jobgroup ON wmbs_job.jobgroup = wmbs_jobgroup.id
INNER JOIN wmbs_subscription ON wmbs_jobgroup.subscription = wmbs_subscription.id
INNER JOIN wmbs_job_state ON wmbs_job.state = wmbs_job_state.id
INNER JOIN wmbs_location ON wmbs_job.location = wmbs_location.id
INNER JOIN wmbs_sub_types ON wmbs_subscription.subtype = wmbs_sub_types.id
WHERE wmbs_location.site_name = :location
AND wmbs_sub_types.name = :type
AND wmbs_job_state.name IN ("""
if type:
sql = typeSQL
else:
sql = baseSQL
states = list(states)
count = 0
for state in states:
if not count == 0:
sql += ", "
sql += ":state%i" %(count)
count += 1
sql += ")"
return sql
def format(self, results):
"""
_format_
"""
if len(results) == 0:
return False
else:
return results[0].fetchall()[0]
def buildBinds(self, location, states, type):
"""
_buildBinds_
Build a list of binds
"""
binds = {}
binds['location'] = location
count = 0
for state in states:
binds["state%i" %(count)] = state
count += 1
if type:
binds['type'] = type
return binds
def execute(self, location, states, type = None, conn = None, transaction = False):
"""
_execute_
Execute the SQL for the given job ID and then format and return
the result.
"""
sql = self.buildSQL(states, type)
binds = self.buildBinds(location, states, type)
#print "In Jobs.GetNumberOfJobsPerSite"
#print sql
#print binds
result = self.dbi.processData(sql, binds, conn = conn, transaction = transaction)
return self.format(result)
| 23.628099
| 104
| 0.55369
|
5a0b832993835c9c8c192c54e9188426c47cf2a0
| 1,338
|
py
|
Python
|
examples/rmsd.py
|
carbonscott/pyrotein
|
4c41eade0d014e70aadf9f9c475cbc4255a0a32e
|
[
"MIT"
] | 1
|
2021-11-05T21:09:23.000Z
|
2021-11-05T21:09:23.000Z
|
examples/rmsd.py
|
carbonscott/pyrotein
|
4c41eade0d014e70aadf9f9c475cbc4255a0a32e
|
[
"MIT"
] | null | null | null |
examples/rmsd.py
|
carbonscott/pyrotein
|
4c41eade0d014e70aadf9f9c475cbc4255a0a32e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
## import sys
## sys.path.insert(0, "/home/scott/Dropbox/codes/pyrotein")
## sys.path.insert(0, "/Users/scott/Dropbox/codes/pyrotein")
import os
import numpy as np
import pyrotein as pr
from loaddata import load_xlsx, label_TMs
from display import plot_dmat
# Specify chains to process...
fl_chain = "chains.comp.xlsx"
lines = load_xlsx(fl_chain)
drc = "pdb"
# Define atoms used for distance matrix analysis...
peptide = ["N", "CA", "C", "O"]
# Specify the range of residues to analyze...
nterm = 1
cterm = 322
len_peptide = (cterm - nterm + 1) * len(peptide)
dmats = np.zeros((len(lines), len_peptide, len_peptide))
for i_fl, line in enumerate(lines):
# Unpack parameters
_, pdb, chain, species = line[:4]
# Read coordinates from a PDB file...
fl_pdb = f"{pdb}.pdb"
pdb_path = os.path.join(drc, fl_pdb)
atoms_pdb = pr.atom.read(pdb_path)
# Create a lookup table for this pdb...
atom_dict = pr.atom.create_lookup_table(atoms_pdb)
# Obtain coordinates...
xyzs = pr.atom.extract_xyz_by_atom(peptide, atom_dict, chain, nterm, cterm)
# Calculate distance matrix...
dmat = pr.distance.calc_dmat(xyzs, xyzs)
dmats[i_fl, :, :] = dmat[:, :]
rmsd_dmat = pr.distance.calc_rmsd_mats(dmats)
np.save("rmsd_dmat.npy", rmsd_dmat)
| 26.76
| 79
| 0.679372
|
7ea6915d932610686f690851dfb2d5cbdbb56e75
| 507
|
py
|
Python
|
thresholding.py
|
kigane/opencv-tutorial
|
90f21e0a82a6db00b1c3b3982891eeac477beb3c
|
[
"MIT"
] | null | null | null |
thresholding.py
|
kigane/opencv-tutorial
|
90f21e0a82a6db00b1c3b3982891eeac477beb3c
|
[
"MIT"
] | null | null | null |
thresholding.py
|
kigane/opencv-tutorial
|
90f21e0a82a6db00b1c3b3982891eeac477beb3c
|
[
"MIT"
] | null | null | null |
import cv2 as cv
import numpy as np
img = cv.imread('images/cat2.jpg')
resized = cv.resize(img, (800, 600), interpolation=cv.INTER_AREA)
cv.imshow('Cat', resized)
gray = cv.cvtColor(resized, cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray, 125, 255, type=cv.THRESH_BINARY)
cv.imshow('Thresh', thresh)
for i in range(10):
adp_thresh = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, i+1)
cv.imshow(f'AdaptiveThreshold C={i+1}', adp_thresh)
cv.waitKey(0)
| 26.684211
| 102
| 0.721893
|
50f821ead11ed4fb1c70de916b85aaad0f4f5af8
| 538
|
py
|
Python
|
Algorithms/Hard/60. Permutation Sequence/answer.py
|
KenWoo/Algorithm
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/Hard/60. Permutation Sequence/answer.py
|
KenWoo/Algorithm
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/Hard/60. Permutation Sequence/answer.py
|
KenWoo/Algorithm
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
class Solution:
def getPermutation(self, n: int, k: int) -> str:
res = ''
fact = [1] * n
num = [str(i) for i in range(1, 10)]
for i in range(1, n):
fact[i] = fact[i - 1] * i
k -= 1
for i in range(n, 0, -1):
first = k // fact[i - 1]
k %= fact[i - 1]
res += num[first]
num.pop(first)
return res
if __name__ == "__main__":
s = Solution()
result = s.getPermutation(3, 3)
print(result)
| 25.619048
| 52
| 0.459108
|
3f4bdebf7e29b388d3beac3d4e878081295d727f
| 90
|
py
|
Python
|
adlibre_tms/apps/reporting/context_processors.py
|
adlibre/Adlibre-TMS
|
4c8de1e4448203fb267d38ec0f4ec9e64d58a21d
|
[
"BSD-3-Clause"
] | 26
|
2015-01-06T11:09:18.000Z
|
2022-03-16T06:20:53.000Z
|
adlibre_tms/apps/reporting/context_processors.py
|
adlibre/Adlibre-TMS
|
4c8de1e4448203fb267d38ec0f4ec9e64d58a21d
|
[
"BSD-3-Clause"
] | 4
|
2015-02-26T11:00:35.000Z
|
2020-06-05T18:02:02.000Z
|
adlibre_tms/apps/reporting/context_processors.py
|
adlibre/Adlibre-TMS
|
4c8de1e4448203fb267d38ec0f4ec9e64d58a21d
|
[
"BSD-3-Clause"
] | 16
|
2015-02-08T05:24:38.000Z
|
2021-06-13T14:45:30.000Z
|
import reporting
def reports(request):
return {'reports': reporting.all_reports(), }
| 18
| 49
| 0.722222
|
79a21b048cebb396d3a5629d64220496b6c399f1
| 824
|
py
|
Python
|
FNet/Model/Layers/Inceptions/ThirdInception/secondBlock.py
|
ren-dishen/fnet
|
460c3177ee1c8b7aa37d0dc0c8deae577dc79222
|
[
"MIT"
] | null | null | null |
FNet/Model/Layers/Inceptions/ThirdInception/secondBlock.py
|
ren-dishen/fnet
|
460c3177ee1c8b7aa37d0dc0c8deae577dc79222
|
[
"MIT"
] | null | null | null |
FNet/Model/Layers/Inceptions/ThirdInception/secondBlock.py
|
ren-dishen/fnet
|
460c3177ee1c8b7aa37d0dc0c8deae577dc79222
|
[
"MIT"
] | null | null | null |
import Model.blockFactory as factory
def block1x1(input):
tensor = factory.convolutionBlock(input, 'inception_5b_1x1_', '', 256, (1,1))
return tensor
def block3x3(input):
tensor = factory.convolutionBlock(input, 'inception_5b_3x3_', '1', 96, (1,1))
tensor = factory.zeroPadding(tensor)
tensor = factory.convolutionBlock(tensor, 'inception_5b_3x3_', '2', 384, (3,3))
return tensor
def blockPool(input):
tensor = factory.maxPooling(input, (3,3), (2,2))
tensor = factory.convolutionBlock(tensor, 'inception_5b_pool_', '', 96, (1,1))
tensor = factory.zeroPadding(tensor, (1,1))
return tensor
def inceptionConstructor(input):
_3x3 = block3x3(input)
_pool = blockPool(input)
_1x1 = block1x1(input)
tensor = factory.merge([_3x3, _pool, _1x1])
return tensor
| 26.580645
| 83
| 0.682039
|
ea04b55f878b7a7c15e3ad1532f001987f64640f
| 2,774
|
py
|
Python
|
UTILS/image_read.py
|
emersonrafaels/ocr_tables
|
11e696422f6fd8508fdc92ffe9a7d14be319e51f
|
[
"MIT"
] | null | null | null |
UTILS/image_read.py
|
emersonrafaels/ocr_tables
|
11e696422f6fd8508fdc92ffe9a7d14be319e51f
|
[
"MIT"
] | null | null | null |
UTILS/image_read.py
|
emersonrafaels/ocr_tables
|
11e696422f6fd8508fdc92ffe9a7d14be319e51f
|
[
"MIT"
] | null | null | null |
"""
FUNÇÕES PARA LEITURA DA IMAGEM.
# Arguments
object - Required : Imagem para leitura/visualização (String | Object)
# Returns
"""
__version__ = "1.0"
__author__ = """Emerson V. Rafael (EMERVIN)"""
__data_atualizacao__ = "15/10/2021"
from inspect import stack
import cv2
from PIL import Image
def read_image_gray(image_path):
"""
FUNÇÃO PARA LEITURA DE UMA IMAGEM.
# Arguments
caminho_imagem - Required : Caminho da imagem a ser lida (String)
# Returns
img - Required : Imagem após leitura (Array)
"""
# INICIANDO O OBJETO DA IMAGEM
img = None
try:
# A LEITURA É FEITA EM FORMATO RGB
image = cv2.imread(str(image_path))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
except Exception as ex:
print("ERRO NA FUNÇÃO: {} - {}".format(stack()[0][3], ex))
return image
def read_image_rgb(image_path):
"""
FUNÇÃO PARA LEITURA DE UMA IMAGEM.
# Arguments
caminho_imagem - Required : Caminho da imagem a ser lida (String)
# Returns
img - Required : Imagem após leitura (Array)
"""
# INICIANDO O OBJETO DA IMAGEM
img = None
try:
# A LEITURA É FEITA EM FORMATO RGB
image = cv2.imread(str(image_path))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
except Exception as ex:
print("ERRO NA FUNÇÃO: {} - {}".format(stack()[0][3], ex))
return image
def realiza_leitura_imagem(caminho_imagem):
"""
FUNÇÃO PARA LEITURA DE UMA IMAGEM.
# Arguments
caminho_imagem - Required : Caminho da imagem a ser lida (String)
# Returns
img - Required : Imagem após leitura (Object)
"""
# INICIANDO O OBJETO DA IMAGEM
img = None
try:
# UTILIZANDO O OPENCV PARA LEITURA DA IMAGEM
# A LEITURA É FEITA EM FORMATO BGR
img = cv2.imread(caminho_imagem)
except Exception as ex:
print(ex)
return img
def realiza_leitura_imagem_pillow(caminho_imagem):
"""
FUNÇÃO PARA LEITURA DE UMA IMAGEM.
UTILIZA PIL - IMAGE
# Arguments
caminho_imagem - Required : Caminho da imagem a ser lida (String)
# Returns
img - Required : Imagem após leitura (Object)
"""
# INICIANDO O OBJETO DA IMAGEM
img = None
try:
# UTILIZANDO O PILLOW PARA LEITURA DA IMAGEM
# A LEITURA É FEITA EM FORMATO RGB
img = Image.open(caminho_imagem)
except Exception as ex:
print("ERRO NA FUNÇÃO: {} - {}".format(stack()[0][3], ex))
return img
| 22.370968
| 95
| 0.576424
|
c5f84d6c6c787a83da1986df21908308c934a159
| 12,784
|
py
|
Python
|
cryptoapis/model/list_transactions_by_block_hash_ribsb_script_pub_key.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 5
|
2021-05-17T04:45:03.000Z
|
2022-03-23T12:51:46.000Z
|
cryptoapis/model/list_transactions_by_block_hash_ribsb_script_pub_key.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | null | null | null |
cryptoapis/model/list_transactions_by_block_hash_ribsb_script_pub_key.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 2
|
2021-06-02T07:32:26.000Z
|
2022-02-12T02:36:23.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
class ListTransactionsByBlockHashRIBSBScriptPubKey(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'addresses': ([str],), # noqa: E501
'asm': (str,), # noqa: E501
'hex': (str,), # noqa: E501
'req_sigs': (int,), # noqa: E501
'type': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'addresses': 'addresses', # noqa: E501
'asm': 'asm', # noqa: E501
'hex': 'hex', # noqa: E501
'req_sigs': 'reqSigs', # noqa: E501
'type': 'type', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, addresses, asm, hex, req_sigs, type, *args, **kwargs): # noqa: E501
"""ListTransactionsByBlockHashRIBSBScriptPubKey - a model defined in OpenAPI
Args:
addresses ([str]):
asm (str): Represents the assembly of the script public key of the address.
hex (str): Represents the hex of the script public key of the address.
req_sigs (int): Represents the required signatures.
type (str): Represents the script type.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.addresses = addresses
self.asm = asm
self.hex = hex
self.req_sigs = req_sigs
self.type = type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, addresses, asm, hex, req_sigs, type, *args, **kwargs): # noqa: E501
"""ListTransactionsByBlockHashRIBSBScriptPubKey - a model defined in OpenAPI
Args:
addresses ([str]):
asm (str): Represents the assembly of the script public key of the address.
hex (str): Represents the hex of the script public key of the address.
req_sigs (int): Represents the required signatures.
type (str): Represents the script type.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.addresses = addresses
self.asm = asm
self.hex = hex
self.req_sigs = req_sigs
self.type = type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.543554
| 484
| 0.579005
|
c8dd2f7012dea69fc678d6c69f0b4cf5203fe3b9
| 1,787
|
py
|
Python
|
meshnet/routing.py
|
vladzaharia/meshnet
|
87a48c678094870cc273f4940f122e899eb1f5e1
|
[
"MIT"
] | 1
|
2021-02-16T05:29:03.000Z
|
2021-02-16T05:29:03.000Z
|
meshnet/routing.py
|
vladzaharia/meshnet
|
87a48c678094870cc273f4940f122e899eb1f5e1
|
[
"MIT"
] | 1
|
2021-02-16T06:23:08.000Z
|
2021-02-16T06:23:08.000Z
|
meshnet/routing.py
|
vladzaharia/meshnet
|
87a48c678094870cc273f4940f122e899eb1f5e1
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from constants.nodetype import TYPE_PROVISIONING
from util.nodetype import NodeType
TIMEOUT_NODE = 5
TIMEOUT_GATEWAY = 15
class RoutingEntry:
node_id: bytes
node_type: NodeType
expiry: datetime
def __init__(self, node_id: bytes, node_type: NodeType) -> None:
self.node_id = node_id
self.node_type = node_type
if (node_type.is_non_routed()):
raise Exception("Cannot create RoutingEntry for non-routed node")
elif (self.node_type.is_node()):
self.expiry = datetime.now() + timedelta(minutes = TIMEOUT_NODE)
elif (self.node_type.is_gateway()):
self.expiry = datetime.now() + timedelta(minutes = TIMEOUT_GATEWAY)
else:
self.expiry = datetime.now() + timedelta(seconds = 5)
class Routing:
# Singleton instance
_instance = None
neighbors: list[RoutingEntry] = list[RoutingEntry]
def __new__(cls):
if cls._instance is None:
cls._instance = super(Routing, cls).__new__(cls)
cls.neighbors = list()
return cls._instance
def add(self, entry: RoutingEntry):
self.neighbors.append(entry)
def node_neighbors(self):
return list(filter(lambda n: n.node_type.is_node(), self.neighbors))
def gateway_neighbors(self):
return list(filter(lambda n: n.node_type.is_gateway(), self.neighbors))
def clean(self):
neighbors_to_clean = list()
for neighbor in self.neighbors:
if (neighbor.expiry < datetime.now()):
neighbors_to_clean.append(neighbor)
for neighbor in neighbors_to_clean:
self.neighbors.remove(neighbor)
def reset(self):
self.neighbors = list()
| 30.288136
| 79
| 0.647454
|
122053e28aa01cb297db8271c994bab8f8cf39c1
| 426
|
py
|
Python
|
rest/migrations/0022_course_level.py
|
narcotis/Welbot-V2
|
7525216b61036f62d0be0b5ebb6d3476b73323c8
|
[
"MIT"
] | 1
|
2021-06-04T03:28:06.000Z
|
2021-06-04T03:28:06.000Z
|
rest/migrations/0022_course_level.py
|
narcotis/Welbot-V2
|
7525216b61036f62d0be0b5ebb6d3476b73323c8
|
[
"MIT"
] | 2
|
2020-09-09T14:19:10.000Z
|
2020-09-09T14:20:21.000Z
|
rest/migrations/0022_course_level.py
|
narcotis/Welbot-V2
|
7525216b61036f62d0be0b5ebb6d3476b73323c8
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-08-19 06:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rest', '0021_auto_20200819_1513'),
]
operations = [
migrations.AddField(
model_name='course',
name='level',
field=models.CharField(default=0, max_length=5),
preserve_default=False,
),
]
| 21.3
| 60
| 0.593897
|
22b76ca8813f7b272af7d96dea2d68d17ee88025
| 7,351
|
py
|
Python
|
xml_models/xpath_twister.py
|
macropin/xml-models-redux
|
6297f89b0c17ab1858df12fadc4415dd1696620b
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2016-05-05T08:36:24.000Z
|
2016-05-05T08:36:24.000Z
|
xml_models/xpath_twister.py
|
macropin/xml-models-redux
|
6297f89b0c17ab1858df12fadc4415dd1696620b
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xml_models/xpath_twister.py
|
macropin/xml-models-redux
|
6297f89b0c17ab1858df12fadc4415dd1696620b
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
"""
Copyright 2009 Chris Tarttelin and Point2 Technologies
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of the FreeBSD Project.
"""
import unittest
from xml.dom import minidom
import xpath
class MultipleNodesReturnedException(Exception):
pass
lxml_available = False
try:
from lxml import etree, objectify
lxml_available = True
except:
pass
def find_unique(xml, expression, namespace=None):
if lxml_available:
return _lxml_xpath(xml, expression, namespace)
else:
return _pydom_xpath(xml, expression, namespace)
def find_all(xml, expression, namespace=None):
if lxml_available:
return _lxml_xpath_all(xml, expression, namespace)
else:
return _pydom_xpath_all(xml, expression, namespace)
def _lxml_xpath(xml_doc, expression, namespace):
if namespace:
find = etree.XPath(get_xpath(expression, namespace), namespaces={'x': namespace})
else:
find = etree.XPath(get_xpath(expression, namespace))
matches = find(xml_doc)
if len(matches) == 1:
matched = matches[0]
if type(matched) == type(''):
return unicode(matched).strip()
if isinstance(matched, etree._ElementStringResult):
return str(matched)
if isinstance(matched, etree._ElementUnicodeResult):
return unicode(matched)
if matched is None or matched == False:
return unicode(matched.text).strip()
if isinstance(matched, etree._Element):
if matched.text is not None:
return unicode(matched.text)
if len(matches) > 1:
raise MultipleNodesReturnedException
def _lxml_xpath_all(xml, expression, namespace):
if namespace:
find = etree.XPath(get_xpath(expression, namespace), namespaces={'x': namespace})
else:
find = etree.XPath(get_xpath(expression,namespace))
matches = find(xml)
return [etree.tostring(match) for match in matches]
def domify(xml):
if lxml_available:
return objectify.fromstring(xml)
else:
return minidom.parseString(xml)
def _pydom_xpath_all(xml, expression, namespace):
nodelist = xpath.find(expression, xml, default_namespace=namespace)
return [fragment.toxml() for fragment in nodelist]
def _pydom_xpath(xml, expression, namespace):
nodelist = xpath.find(expression, xml, default_namespace=namespace)
if len(nodelist) > 1:
raise MultipleNodesReturnedException
if len(nodelist) == 0:
return None
if nodelist[0].nodeType == minidom.Node.DOCUMENT_NODE:
node = nodelist[0].firstChild.firstChild
else:
node = nodelist[0].firstChild
if node == None:
return None
if node.nodeType == minidom.Node.TEXT_NODE:
return node.nodeValue
else:
return None
def get_xpath(xpath, namespace):
if namespace:
xpath_list = xpath.split('/')
xpath_with_ns = ""
for element in xpath_list:
if not element.startswith('@') and not element == '' :
xpath_with_ns += "/x:" + element
elif element == '':
pass
else:
xpath_with_ns += "/" + element
return xpath_with_ns
else:
return xpath
class XPathTest(unittest.TestCase):
def test_xpath_returns_expected_element_value(self):
#setup
xml = minidom.parseString("<foo><baz>dcba</baz><bar>abcd</bar></foo>")
#execute
val = _pydom_xpath(xml, "/foo/bar", None)
#assert
self.assertEquals("abcd", val)
def test_xpath_returns_expected_element_value_from_unicode_xml_fragment(self):
#setup
xml = minidom.parseString(u"<foo><baz>dcba</baz><bar>abcd\xe9</bar></foo>".encode('utf-8'))
#execute
val = _pydom_xpath(xml, "/foo/bar", None)
#assert
self.assertEquals(u"abcd\xe9", val)
def test_xpath_returns_expected_attribute_value(self):
#setup
xml = minidom.parseString('<foo><baz name="Arthur">dcba</baz><bar>abcd</bar></foo>')
#execute
val = _pydom_xpath(xml, "/foo/baz/@name", None)
#assert
self.assertEquals("Arthur", val)
def test_xpath_returns_expected_attribute_value_from_unicode_xml_fragment(self):
#setup
xml = minidom.parseString(u'<foo><baz name="Arthur\xe9">dcba</baz><bar>abcd</bar></foo>'.encode('utf-8'))
#execute
val = _pydom_xpath(xml, "/foo/baz/@name", None)
#assert
self.assertEquals(u"Arthur\xe9", val)
def test_lxml_returns_expected_element_value(self):
#setup
xml = objectify.fromstring('<foo><baz name="Arthur">dcba</baz><bar>abcd</bar></foo>')
#execute
val = _lxml_xpath(xml, "/foo/bar", None)
#assert
self.assertEquals("abcd", val)
def test_lxml_returns_expected_element_value_from_unicode_xml_fragment(self):
#setup
xml = objectify.fromstring(u'<foo><baz name="Arthur">dcba</baz><bar>abcd\xe9</bar></foo>'.encode('utf-8'))
#execute
val = _lxml_xpath(xml, "/foo/bar", None)
#assert
self.assertEquals(u"abcd\xe9", val)
def test_lxml_returns_expected_attribute_value(self):
#setup
xml = objectify.fromstring('<foo><baz name="Arthur">dcba</baz><bar>abcd</bar></foo>')
#execute
val = _lxml_xpath(xml, "/foo/baz/@name", None)
#assert
self.assertEquals("Arthur", val)
def test_lxml_returns_expected_attribute_value_from_unicode_xml_fragment(self):
#setup
xml = objectify.fromstring(u'<foo><baz name="Arthur\xe9">dcba</baz><bar>abcd</bar></foo>'.encode('utf-8'))
#execute
val = _lxml_xpath(xml, "/foo/baz/@name", None)
#assert
self.assertEquals(u"Arthur\xe9", val)
if __name__=='__main__':
unittest.main()
| 37.891753
| 114
| 0.664127
|
c013b377711a0e125b8b5830a4f6b2ca9431e839
| 1,520
|
py
|
Python
|
setup.py
|
Peking-Epoch/pyfmt
|
34513c1de219d16aa5f3de68b0f908aa10869890
|
[
"MIT"
] | 1
|
2020-07-02T08:23:53.000Z
|
2020-07-02T08:23:53.000Z
|
setup.py
|
Peking-Epoch/pyfmt
|
34513c1de219d16aa5f3de68b0f908aa10869890
|
[
"MIT"
] | null | null | null |
setup.py
|
Peking-Epoch/pyfmt
|
34513c1de219d16aa5f3de68b0f908aa10869890
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=7.0', 'black', 'isort>=4.0', 'autoflake']
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', ]
setup(
author="svtter",
author_email='svtter@qq.com',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Format code use black, autoflake, isort",
entry_points={
'console_scripts': [
'pyfmt=pyfmt.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='pyfmt',
name='pyfmt-svtter',
packages=find_packages(include=['pyfmt', 'pyfmt.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/svtter/pyfmt',
version='0.1.0',
zip_safe=False,
)
| 28.148148
| 65
| 0.626316
|
e2791ce5e7ec888a75b54c1be4af3318b8640674
| 1,859
|
py
|
Python
|
patterns/table_data_gateway/gateways/person_gateway.py
|
xstrengthofonex/enterprise-application-patterns
|
5c9054ec5fc5e05ce53d3fc4190c040d2fedb59b
|
[
"MIT"
] | null | null | null |
patterns/table_data_gateway/gateways/person_gateway.py
|
xstrengthofonex/enterprise-application-patterns
|
5c9054ec5fc5e05ce53d3fc4190c040d2fedb59b
|
[
"MIT"
] | null | null | null |
patterns/table_data_gateway/gateways/person_gateway.py
|
xstrengthofonex/enterprise-application-patterns
|
5c9054ec5fc5e05ce53d3fc4190c040d2fedb59b
|
[
"MIT"
] | null | null | null |
from patterns.table_data_gateway.db import db
class PersonGateway(object):
@staticmethod
def find_all():
sql = "SELECT * FROM person"
with db.connect() as conn:
return conn.execute(sql).fetchall()
@staticmethod
def find_with_lastname(lastname):
sql = "SELECT * FROM person WHERE lastname=:lastname"
with db.connect() as conn:
return conn.execute(sql, {'lastname':lastname}).fetchall()
@staticmethod
def find_where(condition, **params):
sql = "SELECT * FROM person WHERE {}".format(condition)
with db.connect() as conn:
return conn.execute(sql, params).fetchall()
@staticmethod
def find_row(id):
sql = "SELECT 1 FROM person WHERE id=:id"
with db.connect() as conn:
return conn.execute(sql, {'id':id}).fetchone()
@staticmethod
def update(id, firstname, lastname, numberOfDependents):
sql = "UPDATE person "\
"SET firstname=:firstname, lastname=:lastname, " \
"numberOfDependents=:numberOfDependents "\
"WHERE id=:id"
with db.connect() as conn:
conn.execute(sql, {'firstname':firstname, 'lastname':lastname,
'numberOfDependents':numberOfDependents, 'id':id})
@staticmethod
def insert(id, firstname, lastname, numberOfDependents):
sql = "INSERT INTO person "\
"VALUES (:id, :firstname, :lastname, :numberOfDependents)"
with db.connect() as conn:
conn.execute(sql, {'id':id, 'firstname':firstname, 'lastname':lastname,
'numberOfDependents':numberOfDependents})
@staticmethod
def delete(id):
sql = "DELETE FROM person WHERE id=:id"
with db.connect() as conn:
conn.execute(sql, {'id': id})
| 37.18
| 83
| 0.597633
|
99d9edacaff23812ddebbce5e37d899b33ae8988
| 4,895
|
py
|
Python
|
sdk/python/pulumi_azure/mixedreality/spatial_anchors_account.py
|
davidobrien1985/pulumi-azure
|
811beeea473bd798d77354521266a87a2fac5888
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/mixedreality/spatial_anchors_account.py
|
davidobrien1985/pulumi-azure
|
811beeea473bd798d77354521266a87a2fac5888
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/mixedreality/spatial_anchors_account.py
|
davidobrien1985/pulumi-azure
|
811beeea473bd798d77354521266a87a2fac5888
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class SpatialAnchorsAccount(pulumi.CustomResource):
location: pulumi.Output[str]
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
name: pulumi.Output[str]
"""
Specifies the name of the Spatial Anchors Account. Changing this forces a new resource to be created. Must be globally unique.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to create the Spatial Anchors Account.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, location=None, name=None, resource_group_name=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Manages an Azure Spatial Anchors Account.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Spatial Anchors Account. Changing this forces a new resource to be created. Must be globally unique.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Spatial Anchors Account.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['location'] = location
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
super(SpatialAnchorsAccount, __self__).__init__(
'azure:mixedreality/spatialAnchorsAccount:SpatialAnchorsAccount',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, location=None, name=None, resource_group_name=None, tags=None):
"""
Get an existing SpatialAnchorsAccount resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Spatial Anchors Account. Changing this forces a new resource to be created. Must be globally unique.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Spatial Anchors Account.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["location"] = location
__props__["name"] = name
__props__["resource_group_name"] = resource_group_name
__props__["tags"] = tags
return SpatialAnchorsAccount(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 48.95
| 165
| 0.692135
|
74faaf8e12618b202f7f24feee5e18d168a47271
| 11,343
|
py
|
Python
|
redash/query_runner/mongodb.py
|
mozilla/redash
|
3e76946a4182d30ddb8a6f80fdfef8d550a2359e
|
[
"BSD-2-Clause"
] | 24
|
2017-05-10T20:36:55.000Z
|
2020-11-26T14:20:01.000Z
|
redash/query_runner/mongodb.py
|
MaxMood96/redash
|
3e76946a4182d30ddb8a6f80fdfef8d550a2359e
|
[
"BSD-2-Clause"
] | 742
|
2016-09-28T00:38:58.000Z
|
2021-07-29T17:48:45.000Z
|
redash/query_runner/mongodb.py
|
MaxMood96/redash
|
3e76946a4182d30ddb8a6f80fdfef8d550a2359e
|
[
"BSD-2-Clause"
] | 21
|
2016-10-19T18:29:24.000Z
|
2021-07-24T11:47:36.000Z
|
import datetime
import logging
import re
from dateutil.parser import parse
from redash.query_runner import *
from redash.utils import JSONEncoder, json_dumps, json_loads, parse_human_time
logger = logging.getLogger(__name__)
try:
import pymongo
from bson.objectid import ObjectId
from bson.timestamp import Timestamp
from bson.decimal128 import Decimal128
from bson.son import SON
from bson.json_util import object_hook as bson_object_hook
enabled = True
except ImportError:
enabled = False
TYPES_MAP = {
str: TYPE_STRING,
bytes: TYPE_STRING,
int: TYPE_INTEGER,
float: TYPE_FLOAT,
bool: TYPE_BOOLEAN,
datetime.datetime: TYPE_DATETIME,
}
class MongoDBJSONEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
elif isinstance(o, Timestamp):
return super(MongoDBJSONEncoder, self).default(o.as_datetime())
elif isinstance(o, Decimal128):
return o.to_decimal()
return super(MongoDBJSONEncoder, self).default(o)
date_regex = re.compile('ISODate\("(.*)"\)', re.IGNORECASE)
def parse_oids(oids):
if not isinstance(oids, list):
raise Exception("$oids takes an array as input.")
return [bson_object_hook({"$oid": oid}) for oid in oids]
def datetime_parser(dct):
for k, v in dct.items():
if isinstance(v, str):
m = date_regex.findall(v)
if len(m) > 0:
dct[k] = parse(m[0], yearfirst=True)
if "$humanTime" in dct:
return parse_human_time(dct["$humanTime"])
if "$oids" in dct:
return parse_oids(dct["$oids"])
return bson_object_hook(dct)
def parse_query_json(query):
query_data = json_loads(query, object_hook=datetime_parser)
return query_data
def _get_column_by_name(columns, column_name):
for c in columns:
if "name" in c and c["name"] == column_name:
return c
return None
def parse_results(results):
rows = []
columns = []
for row in results:
parsed_row = {}
for key in row:
if isinstance(row[key], dict):
for inner_key in row[key]:
column_name = "{}.{}".format(key, inner_key)
if _get_column_by_name(columns, column_name) is None:
columns.append(
{
"name": column_name,
"friendly_name": column_name,
"type": TYPES_MAP.get(
type(row[key][inner_key]), TYPE_STRING
),
}
)
parsed_row[column_name] = row[key][inner_key]
else:
if _get_column_by_name(columns, key) is None:
columns.append(
{
"name": key,
"friendly_name": key,
"type": TYPES_MAP.get(type(row[key]), TYPE_STRING),
}
)
parsed_row[key] = row[key]
rows.append(parsed_row)
return rows, columns
class MongoDB(BaseQueryRunner):
should_annotate_query = False
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"connectionString": {"type": "string", "title": "Connection String"},
"dbName": {"type": "string", "title": "Database Name"},
"replicaSetName": {"type": "string", "title": "Replica Set Name"},
"readPreference": {
"type": "string",
"extendedEnum": [
{"value": "primaryPreferred", "name": "Primary Preferred"},
{"value": "primary", "name": "Primary"},
{"value": "secondary", "name": "Secondary"},
{"value": "secondaryPreferred", "name": "Secondary Preferred"},
{"value": "nearest", "name": "Nearest"},
],
"title": "Replica Set Read Preference",
},
"toggle_table_string": {
"type": "string",
"title": "Toggle Table String",
"default": "_v",
"info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight.",
},
},
"required": ["connectionString", "dbName"],
}
@classmethod
def enabled(cls):
return enabled
def __init__(self, configuration):
super(MongoDB, self).__init__(configuration)
self.syntax = "json"
self.db_name = self.configuration["dbName"]
self.is_replica_set = (
True
if "replicaSetName" in self.configuration
and self.configuration["replicaSetName"]
else False
)
def _get_db(self):
kwargs = {}
if self.is_replica_set:
kwargs["replicaSet"] = self.configuration["replicaSetName"]
readPreference = self.configuration.get("readPreference")
if readPreference:
kwargs["readPreference"] = readPreference
db_connection = pymongo.MongoClient(
self.configuration["connectionString"], **kwargs
)
return db_connection[self.db_name]
def test_connection(self):
db = self._get_db()
if not db.command("connectionStatus")["ok"]:
raise Exception("MongoDB connection error")
return db
def _merge_property_names(self, columns, document):
for property in document:
if property not in columns:
columns.append(property)
def _is_collection_a_view(self, db, collection_name):
if "viewOn" in db[collection_name].options():
return True
else:
return False
def _get_collection_fields(self, db, collection_name):
# Since MongoDB is a document based database and each document doesn't have
# to have the same fields as another documet in the collection its a bit hard to
# show these attributes as fields in the schema.
#
# For now, the logic is to take the first and last documents (last is determined
# by the Natural Order (http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order)
# as we don't know the correct order. In most single server installations it would be
# fine. In replicaset when reading from non master it might not return the really last
# document written.
collection_is_a_view = self._is_collection_a_view(db, collection_name)
documents_sample = []
if collection_is_a_view:
for d in db[collection_name].find().limit(2):
documents_sample.append(d)
else:
for d in db[collection_name].find().sort([("$natural", 1)]).limit(1):
documents_sample.append(d)
for d in db[collection_name].find().sort([("$natural", -1)]).limit(1):
documents_sample.append(d)
columns = []
for d in documents_sample:
self._merge_property_names(columns, d)
return columns
def get_schema(self, get_stats=False):
schema = {}
db = self._get_db()
for collection_name in db.collection_names():
if collection_name.startswith("system."):
continue
columns = self._get_collection_fields(db, collection_name)
schema[collection_name] = {
"name": collection_name,
"columns": sorted(columns),
}
return list(schema.values())
def run_query(self, query, user):
db = self._get_db()
logger.debug(
"mongodb connection string: %s", self.configuration["connectionString"]
)
logger.debug("mongodb got query: %s", query)
try:
query_data = parse_query_json(query)
except ValueError:
return None, "Invalid query format. The query is not a valid JSON."
if "collection" not in query_data:
return None, "'collection' must have a value to run a query"
else:
collection = query_data["collection"]
q = query_data.get("query", None)
f = None
aggregate = query_data.get("aggregate", None)
if aggregate:
for step in aggregate:
if "$sort" in step:
sort_list = []
for sort_item in step["$sort"]:
sort_list.append((sort_item["name"], sort_item["direction"]))
step["$sort"] = SON(sort_list)
if "fields" in query_data:
f = query_data["fields"]
s = None
if "sort" in query_data and query_data["sort"]:
s = []
for field_data in query_data["sort"]:
s.append((field_data["name"], field_data["direction"]))
columns = []
rows = []
cursor = None
if q or (not q and not aggregate):
if s:
cursor = db[collection].find(q, f).sort(s)
else:
cursor = db[collection].find(q, f)
if "skip" in query_data:
cursor = cursor.skip(query_data["skip"])
if "limit" in query_data:
cursor = cursor.limit(query_data["limit"])
if "count" in query_data:
cursor = cursor.count()
elif aggregate:
allow_disk_use = query_data.get("allowDiskUse", False)
r = db[collection].aggregate(aggregate, allowDiskUse=allow_disk_use)
# Backwards compatibility with older pymongo versions.
#
# Older pymongo version would return a dictionary from an aggregate command.
# The dict would contain a "result" key which would hold the cursor.
# Newer ones return pymongo.command_cursor.CommandCursor.
if isinstance(r, dict):
cursor = r["result"]
else:
cursor = r
if "count" in query_data:
columns.append(
{"name": "count", "friendly_name": "count", "type": TYPE_INTEGER}
)
rows.append({"count": cursor})
else:
rows, columns = parse_results(cursor)
if f:
ordered_columns = []
for k in sorted(f, key=f.get):
column = _get_column_by_name(columns, k)
if column:
ordered_columns.append(column)
columns = ordered_columns
if query_data.get("sortColumns"):
reverse = query_data["sortColumns"] == "desc"
columns = sorted(columns, key=lambda col: col["name"], reverse=reverse)
data = {"columns": columns, "rows": rows}
error = None
json_data = json_dumps(data, cls=MongoDBJSONEncoder)
return json_data, error
register(MongoDB)
| 32.224432
| 178
| 0.550736
|
b8fc6dd2b5384140a2ba92a3a9ba6af348b0b35b
| 8,597
|
py
|
Python
|
tensorflow/python/keras/layers/preprocessing/category_crossing.py
|
ghaiyur-musubi/tensorflow
|
821bd0d4ea30b91dda6f24238aa49de55de20527
|
[
"Apache-2.0"
] | 11
|
2018-01-03T15:11:09.000Z
|
2021-04-13T05:47:27.000Z
|
tensorflow/python/keras/layers/preprocessing/category_crossing.py
|
ghaiyur-musubi/tensorflow
|
821bd0d4ea30b91dda6f24238aa49de55de20527
|
[
"Apache-2.0"
] | 2
|
2020-08-20T18:08:19.000Z
|
2020-10-02T18:35:21.000Z
|
tensorflow/python/keras/layers/preprocessing/category_crossing.py
|
ghaiyur-musubi/tensorflow
|
821bd0d4ea30b91dda6f24238aa49de55de20527
|
[
"Apache-2.0"
] | 10
|
2018-07-31T10:56:21.000Z
|
2019-10-07T08:05:21.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras category crossing preprocessing layers."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.experimental.preprocessing.CategoryCrossing')
class CategoryCrossing(base_preprocessing_layer.PreprocessingLayer):
"""Category crossing layer.
This layer concatenates multiple categorical inputs into a single categorical
output (similar to Cartesian product). The output dtype is string.
Usage:
>>> inp_1 = ['a', 'b', 'c']
>>> inp_2 = ['d', 'e', 'f']
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryCrossing()
>>> layer([inp_1, inp_2])
<tf.Tensor: shape=(3, 1), dtype=string, numpy=
array([[b'a_X_d'],
[b'b_X_e'],
[b'c_X_f']], dtype=object)>
>>> inp_1 = ['a', 'b', 'c']
>>> inp_2 = ['d', 'e', 'f']
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryCrossing(
... separator='-')
>>> layer([inp_1, inp_2])
<tf.Tensor: shape=(3, 1), dtype=string, numpy=
array([[b'a-d'],
[b'b-e'],
[b'c-f']], dtype=object)>
Arguments:
depth: depth of input crossing. By default None, all inputs are crossed into
one output. It can also be an int or tuple/list of ints. Passing an
integer will create combinations of crossed outputs with depth up to that
integer, i.e., [1, 2, ..., `depth`), and passing a tuple of integers will
create crossed outputs with depth for the specified values in the tuple,
i.e., `depth`=(N1, N2) will create all possible crossed outputs with depth
equal to N1 or N2. Passing `None` means a single crossed output with all
inputs. For example, with inputs `a`, `b` and `c`, `depth=2` means the
output will be [a;b;c;cross(a, b);cross(bc);cross(ca)].
separator: A string added between each input being joined. Defaults to
'_X_'.
name: Name to give to the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape: a list of string or int tensors or sparse tensors of shape
`[batch_size, d1, ..., dm]`
Output shape: a single string or int tensor or sparse tensor of shape
`[batch_size, d1, ..., dm]`
Returns:
If any input is `RaggedTensor`, the output is `RaggedTensor`.
Else, if any input is `SparseTensor`, the output is `SparseTensor`.
Otherwise, the output is `Tensor`.
Example: (`depth`=None)
If the layer receives three inputs:
`a=[[1], [4]]`, `b=[[2], [5]]`, `c=[[3], [6]]`
the output will be a string tensor:
`[[b'1_X_2_X_3'], [b'4_X_5_X_6']]`
Example: (`depth` is an integer)
With the same input above, and if `depth`=2,
the output will be a list of 6 string tensors:
`[[b'1'], [b'4']]`
`[[b'2'], [b'5']]`
`[[b'3'], [b'6']]`
`[[b'1_X_2'], [b'4_X_5']]`,
`[[b'2_X_3'], [b'5_X_6']]`,
`[[b'3_X_1'], [b'6_X_4']]`
Example: (`depth` is a tuple/list of integers)
With the same input above, and if `depth`=(2, 3)
the output will be a list of 4 string tensors:
`[[b'1_X_2'], [b'4_X_5']]`,
`[[b'2_X_3'], [b'5_X_6']]`,
`[[b'3_X_1'], [b'6_X_4']]`,
`[[b'1_X_2_X_3'], [b'4_X_5_X_6']]`
"""
def __init__(self, depth=None, name=None, separator=None, **kwargs):
super(CategoryCrossing, self).__init__(name=name, **kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell(
'CategoryCrossing').set(True)
self.depth = depth
if separator is None:
separator = '_X_'
self.separator = separator
if isinstance(depth, (tuple, list)):
self._depth_tuple = depth
elif depth is not None:
self._depth_tuple = tuple([i for i in range(1, depth + 1)])
def partial_crossing(self, partial_inputs, ragged_out, sparse_out):
"""Gets the crossed output from a partial list/tuple of inputs."""
# If ragged_out=True, convert output from sparse to ragged.
if ragged_out:
# TODO(momernick): Support separator with ragged_cross.
if self.separator != '_X_':
raise ValueError('Non-default separator with ragged input is not '
'supported yet, given {}'.format(self.separator))
return ragged_array_ops.cross(partial_inputs)
elif sparse_out:
return sparse_ops.sparse_cross(partial_inputs, separator=self.separator)
else:
return sparse_ops.sparse_tensor_to_dense(
sparse_ops.sparse_cross(partial_inputs, separator=self.separator))
def _preprocess_input(self, inp):
if isinstance(inp, (list, tuple, np.ndarray)):
inp = ops.convert_to_tensor_v2_with_dispatch(inp)
if inp.shape.rank == 1:
inp = array_ops.expand_dims(inp, axis=-1)
return inp
def call(self, inputs):
inputs = [self._preprocess_input(inp) for inp in inputs]
depth_tuple = self._depth_tuple if self.depth else (len(inputs),)
ragged_out = sparse_out = False
if any(tf_utils.is_ragged(inp) for inp in inputs):
ragged_out = True
elif any(isinstance(inp, sparse_tensor.SparseTensor) for inp in inputs):
sparse_out = True
outputs = []
for depth in depth_tuple:
if len(inputs) < depth:
raise ValueError(
'Number of inputs cannot be less than depth, got {} input tensors, '
'and depth {}'.format(len(inputs), depth))
for partial_inps in itertools.combinations(inputs, depth):
partial_out = self.partial_crossing(
partial_inps, ragged_out, sparse_out)
outputs.append(partial_out)
if sparse_out:
return sparse_ops.sparse_concat_v2(axis=1, sp_inputs=outputs)
return array_ops.concat(outputs, axis=1)
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)):
raise ValueError('A `CategoryCrossing` layer should be called '
'on a list of inputs.')
input_shapes = input_shape
batch_size = None
for inp_shape in input_shapes:
inp_tensor_shape = tensor_shape.TensorShape(inp_shape).as_list()
if len(inp_tensor_shape) != 2:
raise ValueError('Inputs must be rank 2, get {}'.format(input_shapes))
if batch_size is None:
batch_size = inp_tensor_shape[0]
# The second dimension is dynamic based on inputs.
output_shape = [batch_size, None]
return tensor_shape.TensorShape(output_shape)
def compute_output_signature(self, input_spec):
input_shapes = [x.shape for x in input_spec]
output_shape = self.compute_output_shape(input_shapes)
if any(
isinstance(inp_spec, ragged_tensor.RaggedTensorSpec)
for inp_spec in input_spec):
return tensor_spec.TensorSpec(shape=output_shape, dtype=dtypes.string)
elif any(
isinstance(inp_spec, sparse_tensor.SparseTensorSpec)
for inp_spec in input_spec):
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=dtypes.string)
return tensor_spec.TensorSpec(shape=output_shape, dtype=dtypes.string)
def get_config(self):
config = {
'depth': self.depth,
'separator': self.separator,
}
base_config = super(CategoryCrossing, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 40.551887
| 80
| 0.680354
|
dd3a18427dccc4f5ce39a65a1740982d6f6d0c3e
| 422
|
py
|
Python
|
URI Online Judge/Mathematics/1161 - Factorial Sum.py
|
wgarcia1309/competitive-programming
|
a1788c8a7cbddaa753c2f468859581c1bac9e322
|
[
"MIT"
] | null | null | null |
URI Online Judge/Mathematics/1161 - Factorial Sum.py
|
wgarcia1309/competitive-programming
|
a1788c8a7cbddaa753c2f468859581c1bac9e322
|
[
"MIT"
] | null | null | null |
URI Online Judge/Mathematics/1161 - Factorial Sum.py
|
wgarcia1309/competitive-programming
|
a1788c8a7cbddaa753c2f468859581c1bac9e322
|
[
"MIT"
] | null | null | null |
#1161 - Factorial Sum
while(True):
try:
s=str(input())
n=s.split(" ")
v1=int(n[0])
v2=int(n[1])
if(v1>v2):
mx=v1+1
else:
mx=v2+1
r1=r2=p=1;
for x in range (2,mx):
p*=x;
if(x==v1):
r1=p;
if(x==v2):
r2=p;
print(r1+r2)
except Exception:
break
| 18.347826
| 30
| 0.343602
|
e7267d09cd4f212b15e39e433ea5a7a1423859f1
| 241
|
py
|
Python
|
test_botnet_dataset.py
|
ailabteam/clone_test_botnet-detection
|
95f02d4967186440f60d83db04f139c197e178b7
|
[
"MIT"
] | 112
|
2020-03-12T17:59:13.000Z
|
2022-03-17T00:26:58.000Z
|
test_botnet_dataset.py
|
ailabteam/clone_test_botnet-detection
|
95f02d4967186440f60d83db04f139c197e178b7
|
[
"MIT"
] | 14
|
2020-05-19T07:14:10.000Z
|
2022-02-23T17:11:43.000Z
|
test_botnet_dataset.py
|
ailabteam/clone_test_botnet-detection
|
95f02d4967186440f60d83db04f139c197e178b7
|
[
"MIT"
] | 35
|
2020-03-12T15:14:23.000Z
|
2022-03-10T05:56:03.000Z
|
from botdet.data.dataset_botnet import BotnetDataset
if __name__ == '__main__':
dataset = BotnetDataset(name='chord', split='train', graph_format='pyg')
print(dataset)
print(len(dataset))
print(dataset[0])
breakpoint()
| 24.1
| 76
| 0.701245
|
0074628ed951e6dcb0df11ee6d7a73b152d6844a
| 21,745
|
py
|
Python
|
server/.vim/bundle/YouCompleteMe/third_party/ycmd/ycmd/tests/clangd/signature_help_test.py
|
hkdb/sysconf
|
99d334f7309657647059c4b37f25e33dffc81fc3
|
[
"MIT"
] | 10
|
2020-07-21T21:59:54.000Z
|
2021-07-19T11:01:47.000Z
|
server/.vim/bundle/YouCompleteMe/third_party/ycmd/ycmd/tests/clangd/signature_help_test.py
|
hkdb/sysconf
|
99d334f7309657647059c4b37f25e33dffc81fc3
|
[
"MIT"
] | null | null | null |
server/.vim/bundle/YouCompleteMe/third_party/ycmd/ycmd/tests/clangd/signature_help_test.py
|
hkdb/sysconf
|
99d334f7309657647059c4b37f25e33dffc81fc3
|
[
"MIT"
] | 1
|
2021-01-30T18:17:01.000Z
|
2021-01-30T18:17:01.000Z
|
# Copyright (C) 2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
import json
import requests
from unittest.mock import patch
from hamcrest import assert_that, contains_exactly, empty, equal_to, has_entries
from ycmd import handlers
from ycmd.tests.clangd import PathToTestFile, SharedYcmd, IsolatedYcmd
from ycmd.tests.test_utils import ( EMPTY_SIGNATURE_HELP,
BuildRequest,
CombineRequest,
ParameterMatcher,
SignatureMatcher,
SignatureAvailableMatcher,
WaitUntilCompleterServerReady )
from ycmd.utils import ReadFile
def RunTest( app, test ):
"""
Method to run a simple completion test and verify the result
Note: Compile commands are extracted from a compile_flags.txt file by clangd
by iteratively looking at the directory containing the source file and its
ancestors.
test is a dictionary containing:
'request': kwargs for BuildRequest
'expect': {
'response': server response code (e.g. requests.codes.ok)
'data': matcher for the server response json
}
"""
request = test[ 'request' ]
filetype = request.get( 'filetype', 'cpp' )
if 'contents' not in request:
contents = ReadFile( request[ 'filepath' ] )
request[ 'contents' ] = contents
request[ 'filetype' ] = filetype
# Because we aren't testing this command, we *always* ignore errors. This
# is mainly because we (may) want to test scenarios where the completer
# throws an exception and the easiest way to do that is to throw from
# within the Settings function.
app.post_json( '/event_notification',
CombineRequest( request, {
'event_name': 'FileReadyToParse',
'filetype': filetype
} ),
expect_errors = True )
WaitUntilCompleterServerReady( app, filetype )
# We also ignore errors here, but then we check the response code ourself.
# This is to allow testing of requests returning errors.
response = app.post_json( '/signature_help',
BuildRequest( **request ),
expect_errors = True )
assert_that( response.status_code,
equal_to( test[ 'expect' ][ 'response' ] ) )
print( 'Completer response: {}'.format( json.dumps(
response.json, indent = 2 ) ) )
assert_that( response.json, test[ 'expect' ][ 'data' ] )
@SharedYcmd
def Signature_Help_Trigger_test( app ):
RunTest( app, {
'description': 'trigger after (',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 14,
'signature_help_state': 'INACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': has_entries( {
'activeSignature': 0,
'activeParameter': 0,
'signatures': contains_exactly(
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'Temperature temp, '
'int sugargs) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 58 ),
] ),
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'double fizziness, '
'Flavour Flavour) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 62 ),
] ),
)
} ),
} )
},
} )
@IsolatedYcmd( { 'disable_signature_help': 1 } )
def Signature_Help_Disabled_test( app ):
RunTest( app, {
'description': 'trigger after (',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 14,
'signature_help_state': 'INACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': EMPTY_SIGNATURE_HELP,
} )
},
} )
@SharedYcmd
def Signature_Help_NoTrigger_test( app ):
RunTest( app, {
'description': 'do not trigger before (',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 13,
'signature_help_state': 'INACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': EMPTY_SIGNATURE_HELP,
} ),
},
} )
@SharedYcmd
def Signature_Help_NoTrigger_After_Trigger_test( app ):
RunTest( app, {
'description': 'do not trigger too far after (',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 15,
'signature_help_state': 'INACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': EMPTY_SIGNATURE_HELP,
} ),
},
} )
@SharedYcmd
def Signature_Help_Trigger_After_Trigger_test( app ):
RunTest( app, {
'description': 'Auto trigger due to state of existing request',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 15,
'signature_help_state': 'ACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': has_entries( {
'activeSignature': 0,
'activeParameter': 0,
'signatures': contains_exactly(
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'Temperature temp, '
'int sugargs) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 58 ),
] ),
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'double fizziness, '
'Flavour Flavour) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 62 ),
] ),
)
} ),
} ),
},
} )
@IsolatedYcmd( { 'disable_signature_help': 1 } )
def Signature_Help_Trigger_After_Trigger_Disabled_test( app ):
RunTest( app, {
'description': 'Auto trigger due to state of existing request',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 15,
'signature_help_state': 'ACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': EMPTY_SIGNATURE_HELP,
} ),
},
} )
@SharedYcmd
def Signature_Help_Trigger_After_Trigger_PlusText_test( app ):
RunTest( app, {
'description': 'Triggering after additional text beyond (',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 17,
'signature_help_state': 'ACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': has_entries( {
'activeSignature': 0,
'activeParameter': 0,
'signatures': contains_exactly(
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'Temperature temp, '
'int sugargs) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 58 ),
] ),
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'double fizziness, '
'Flavour Flavour) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 62 ),
] ),
)
} ),
} ),
},
} )
@SharedYcmd
def Signature_Help_Trigger_After_Trigger_PlusCompletion_test( app ):
RunTest( app, {
'description': 'Triggering after semantic trigger after (',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 28,
'signature_help_state': 'ACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': has_entries( {
'activeSignature': 0,
'activeParameter': 0,
'signatures': contains_exactly(
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'Temperature temp, '
'int sugargs) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 58 ),
] ),
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'double fizziness, '
'Flavour Flavour) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 62 ),
] ),
)
} ),
} ),
},
} )
@SharedYcmd
def Signature_Help_Trigger_After_OtherTrigger_test( app ):
RunTest( app, {
'description': 'Triggering after ,',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 35,
'signature_help_state': 'INACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': has_entries( {
'activeSignature': 0,
'activeParameter': 1,
'signatures': contains_exactly(
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'Temperature temp, '
'int sugargs) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 58 ),
] ),
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'double fizziness, '
'Flavour Flavour) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 62 ),
] ),
)
} ),
} ),
},
} )
@SharedYcmd
def Signature_Help_Trigger_After_Arguments_Narrow_test( app ):
RunTest( app, {
'description': 'After resolution of overload',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 41,
'signature_help_state': 'ACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': has_entries( {
'activeSignature': 0,
'activeParameter': 2,
'signatures': contains_exactly(
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'double fizziness, '
'Flavour Flavour) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 62 ),
] )
)
} ),
} ),
},
} )
@SharedYcmd
def Signature_Help_Trigger_After_Arguments_Narrow2_test( app ):
RunTest( app, {
'description': 'After resolution of overload not the first one',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 8,
'column_num': 53,
'signature_help_state': 'ACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': has_entries( {
'activeSignature': 0,
'activeParameter': 2,
'signatures': contains_exactly(
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'Temperature temp, '
'int sugargs) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 58 ),
] )
)
} ),
} ),
},
} )
@SharedYcmd
def Signature_Help_Trigger_After_OtherTrigger_ReTrigger_test( app ):
RunTest( app, {
'description': 'Triggering after , but already ACTIVE',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 35,
'signature_help_state': 'ACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': has_entries( {
'activeSignature': 0,
'activeParameter': 1,
'signatures': contains_exactly(
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'Temperature temp, '
'int sugargs) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 58 ),
] ),
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'double fizziness, '
'Flavour Flavour) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 62 ),
] ),
)
} ),
} ),
},
} )
@SharedYcmd
def Signature_Help_Trigger_JustBeforeClose_test( app ):
RunTest( app, {
'description': 'Last argument, before )',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 8,
'column_num': 33,
'signature_help_state': 'ACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': has_entries( {
'activeSignature': 0,
'activeParameter': 0,
'signatures': contains_exactly(
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'Temperature temp, '
'int sugargs) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 58 ),
] ),
SignatureMatcher( 'make_drink(TypeOfDrink type, '
'double fizziness, '
'Flavour Flavour) -> Drink &', [
ParameterMatcher( 11, 27 ),
ParameterMatcher( 29, 45 ),
ParameterMatcher( 47, 62 ),
] ),
)
} ),
} ),
},
} )
@SharedYcmd
def Signature_Help_Clears_After_EndFunction_test( app ):
RunTest( app, {
'description': 'Empty response on )',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 70,
'signature_help_state': 'ACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': EMPTY_SIGNATURE_HELP,
} ),
},
} )
@SharedYcmd
def Signature_Help_Clears_After_Function_Call_test( app ):
RunTest( app, {
'description': 'Empty response after )',
'request': {
'filetype' : 'cpp',
'filepath' : PathToTestFile( 'general_fallback',
'make_drink.cc' ),
'line_num' : 7,
'column_num': 71,
'signature_help_state': 'ACTIVE',
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': EMPTY_SIGNATURE_HELP,
} ),
},
} )
@patch( 'ycmd.completers.completer.Completer.ShouldUseSignatureHelpNow',
return_value = True )
@patch( 'ycmd.completers.language_server.language_server_completer.'
'LanguageServerCompleter._ServerIsInitialized', return_value = False )
@IsolatedYcmd()
def Signature_Help_Server_Not_Initialized_test( should_use_sig,
server_init,
app ):
filepath = PathToTestFile( 'general_fallback', 'make_drink.cc' )
request = {
'filetype' : 'cpp',
'filepath' : filepath,
'line_num' : 7,
'column_num': 71,
'signature_help_state': 'INACTIVE',
'contents': ReadFile( filepath )
}
response = app.post_json( '/signature_help',
BuildRequest( **request ),
expect_errors = True )
assert_that( response.json, has_entries( {
'errors': empty(),
'signature_help': EMPTY_SIGNATURE_HELP,
} ) )
def Signature_Help_Available_Server_Not_Initialized_test():
completer = handlers._server_state.GetFiletypeCompleter( [ 'cpp' ] )
@SharedYcmd
@patch.object( completer, '_ServerIsInitialized', return_value = False )
def Test( app ):
response = app.get( '/signature_help_available',
{ 'subserver': 'cpp' } ).json
assert_that( response, SignatureAvailableMatcher( 'PENDING' ) )
@SharedYcmd
def Signature_Help_Supported_test( app ):
request = { 'filepath' : PathToTestFile( 'goto.cc' ) }
app.post_json( '/event_notification',
CombineRequest( request, {
'event_name': 'FileReadyToParse',
'filetype': 'cpp'
} ),
expect_errors = True )
WaitUntilCompleterServerReady( app, 'cpp' )
response = app.get( '/signature_help_available',
{ 'subserver': 'cpp' } ).json
assert_that( response, SignatureAvailableMatcher( 'YES' ) )
@IsolatedYcmd( { 'disable_signature_help': 1 } )
def Signature_Help_Available_Disabled_By_User_test( app, *args ):
request = { 'filepath' : PathToTestFile( 'goto.cc' ) }
app.post_json( '/event_notification',
CombineRequest( request, {
'event_name': 'FileReadyToParse',
'filetype': 'cpp'
} ),
expect_errors = True )
WaitUntilCompleterServerReady( app, 'cpp' )
response = app.get( '/signature_help_available',
{ 'subserver': 'cpp' } ).json
assert_that( response, SignatureAvailableMatcher( 'NO' ) )
| 34.029734
| 80
| 0.501173
|
fad78eab3bf3ce972584a9e70c8115709ed068fd
| 28,150
|
py
|
Python
|
virt_env/virt1/lib/python2.7/site-packages/WebTest-2.0.17-py2.7.egg/webtest/app.py
|
grepme/CMPUT410Lab01
|
810e565d347718334edd89114647d0264259cc3d
|
[
"Apache-2.0"
] | null | null | null |
virt_env/virt1/lib/python2.7/site-packages/WebTest-2.0.17-py2.7.egg/webtest/app.py
|
grepme/CMPUT410Lab01
|
810e565d347718334edd89114647d0264259cc3d
|
[
"Apache-2.0"
] | 3
|
2015-02-15T18:31:10.000Z
|
2015-02-22T19:56:05.000Z
|
virt_env/virt1/lib/python2.7/site-packages/WebTest-2.0.17-py2.7.egg/webtest/app.py
|
grepme/CMPUT410Lab01
|
810e565d347718334edd89114647d0264259cc3d
|
[
"Apache-2.0"
] | null | null | null |
# (c) 2005 Ian Bicking and contributors; written for Paste
# (http://pythonpaste.org)
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
Routines for testing WSGI applications.
Most interesting is TestApp
"""
from __future__ import unicode_literals
import os
import re
import json
import random
import fnmatch
import mimetypes
from base64 import b64encode
from six import StringIO
from six import BytesIO
from six import string_types
from six import binary_type
from six import text_type
from six.moves import http_cookiejar
from webtest.compat import urlparse
from webtest.compat import urlencode
from webtest.compat import to_bytes
from webtest.compat import escape_cookie_value
from webtest.response import TestResponse
from webtest import forms
from webtest import lint
from webtest import utils
import webob
__all__ = ['TestApp', 'TestRequest']
class AppError(Exception):
def __init__(self, message, *args):
if isinstance(message, binary_type):
message = message.decode('utf8')
str_args = ()
for arg in args:
if isinstance(arg, webob.Response):
body = arg.body
if isinstance(body, binary_type):
if arg.charset:
arg = body.decode(arg.charset)
else:
arg = repr(body)
elif isinstance(arg, binary_type):
try:
arg = arg.decode('utf8')
except UnicodeDecodeError:
arg = repr(arg)
str_args += (arg,)
message = message % str_args
Exception.__init__(self, message)
class CookiePolicy(http_cookiejar.DefaultCookiePolicy):
"""A subclass of DefaultCookiePolicy to allow cookie set for
Domain=localhost."""
def return_ok_domain(self, cookie, request):
if cookie.domain == '.localhost':
return True
return http_cookiejar.DefaultCookiePolicy.return_ok_domain(
self, cookie, request)
def set_ok_domain(self, cookie, request):
if cookie.domain == '.localhost':
return True
return http_cookiejar.DefaultCookiePolicy.set_ok_domain(
self, cookie, request)
class TestRequest(webob.BaseRequest):
"""A subclass of webob.Request"""
ResponseClass = TestResponse
class TestApp(object):
"""
Wraps a WSGI application in a more convenient interface for
testing. It uses extended version of :class:`webob.BaseRequest`
and :class:`webob.Response`.
:param app:
May be an WSGI application or Paste Deploy app,
like ``'config:filename.ini#test'``.
.. versionadded:: 2.0
It can also be an actual full URL to an http server and webtest
will proxy requests with `wsgiproxy`.
:type app:
WSGI application
:param extra_environ:
A dictionary of values that should go
into the environment for each request. These can provide a
communication channel with the application.
:type extra_environ:
dict
:param relative_to:
A directory used for file
uploads are calculated relative to this. Also ``config:``
URIs that aren't absolute.
:type relative_to:
string
:param cookiejar:
:class:`cookielib.CookieJar` alike API that keeps cookies
across requets.
:type cookiejar:
CookieJar instance
.. attribute:: cookies
A convenient shortcut for a dict of all cookies in
``cookiejar``.
:param parser_features:
Passed to BeautifulSoup when parsing responses.
:type parser_features:
string or list
:param json_encoder:
Passed to json.dumps when encoding json
:type json_encoder:
A subclass of json.JSONEncoder
:param lint:
If True (default) then check that the application is WSGI compliant
:type lint:
A boolean
"""
RequestClass = TestRequest
def __init__(self, app, extra_environ=None, relative_to=None,
use_unicode=True, cookiejar=None, parser_features=None,
json_encoder=None, lint=True):
if 'WEBTEST_TARGET_URL' in os.environ:
app = os.environ['WEBTEST_TARGET_URL']
if isinstance(app, string_types):
if app.startswith('http'):
try:
from wsgiproxy import HostProxy
except ImportError: # pragma: no cover
raise ImportError((
'Using webtest with a real url requires WSGIProxy2. '
'Please install it with: '
'pip install WSGIProxy2'))
if '#' not in app:
app += '#httplib'
url, client = app.split('#', 1)
app = HostProxy(url, client=client)
else:
from paste.deploy import loadapp
# @@: Should pick up relative_to from calling module's
# __file__
app = loadapp(app, relative_to=relative_to)
self.app = app
self.lint = lint
self.relative_to = relative_to
if extra_environ is None:
extra_environ = {}
self.extra_environ = extra_environ
self.use_unicode = use_unicode
if cookiejar is None:
cookiejar = http_cookiejar.CookieJar(policy=CookiePolicy())
self.cookiejar = cookiejar
if parser_features is None:
parser_features = 'html.parser'
self.RequestClass.ResponseClass.parser_features = parser_features
if json_encoder is None:
json_encoder = json.JSONEncoder
self.JSONEncoder = json_encoder
def get_authorization(self):
"""Allow to set the HTTP_AUTHORIZATION environ key. Value should looks
like ``('Basic', ('user', 'password'))``
If value is None the the HTTP_AUTHORIZATION is removed
"""
return self.authorization_value
def set_authorization(self, value):
self.authorization_value = value
if value is not None:
invalid_value = (
"You should use a value like ('Basic', ('user', 'password'))"
)
if isinstance(value, (list, tuple)) and len(value) == 2:
authtype, val = value
if authtype == 'Basic' and val and \
isinstance(val, (list, tuple)):
val = ':'.join(list(val))
val = b64encode(to_bytes(val)).strip()
val = val.decode('latin1')
else:
raise ValueError(invalid_value)
value = str('%s %s' % (authtype, val))
else:
raise ValueError(invalid_value)
self.extra_environ.update({
'HTTP_AUTHORIZATION': value,
})
else:
if 'HTTP_AUTHORIZATION' in self.extra_environ:
del self.extra_environ['HTTP_AUTHORIZATION']
authorization = property(get_authorization, set_authorization)
@property
def cookies(self):
return dict([(cookie.name, cookie.value) for cookie in self.cookiejar])
def set_cookie(self, name, value):
"""
Sets a cookie to be passed through with requests.
"""
value = escape_cookie_value(value)
cookie = http_cookiejar.Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain='.localhost',
domain_specified=True,
domain_initial_dot=False,
path='/',
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest=None
)
self.cookiejar.set_cookie(cookie)
def reset(self):
"""
Resets the state of the application; currently just clears
saved cookies.
"""
self.cookiejar.clear()
def set_parser_features(self, parser_features):
"""
Changes the parser used by BeautifulSoup. See its documentation to
know the supported parsers.
"""
self.RequestClass.ResponseClass.parser_features = parser_features
def get(self, url, params=None, headers=None, extra_environ=None,
status=None, expect_errors=False, xhr=False):
"""
Do a GET request given the url path.
:param params:
A query string, or a dictionary that will be encoded
into a query string. You may also include a URL query
string on the ``url``.
:param headers:
Extra headers to send.
:type headers:
dictionary
:param extra_environ:
Environmental variables that should be added to the request.
:type extra_environ:
dictionary
:param status:
The HTTP status code you expect in response (if not 200 or 3xx).
You can also use a wildcard, like ``'3*'`` or ``'*'``.
:type status:
integer or string
:param expect_errors:
If this is False, then if anything is written to
environ ``wsgi.errors`` it will be an error.
If it is True, then non-200/3xx responses are also okay.
:type expect_errors:
boolean
:param xhr:
If this is true, then marks response as ajax. The same as
headers={'X-REQUESTED-WITH': 'XMLHttpRequest', }
:type xhr:
boolean
:returns: :class:`webtest.TestResponse` instance.
"""
environ = self._make_environ(extra_environ)
url = str(url)
url = self._remove_fragment(url)
if params:
if not isinstance(params, string_types):
params = urlencode(params, doseq=True)
if str('?') in url:
url += str('&')
else:
url += str('?')
url += params
if str('?') in url:
url, environ['QUERY_STRING'] = url.split(str('?'), 1)
else:
environ['QUERY_STRING'] = str('')
req = self.RequestClass.blank(url, environ)
if xhr:
headers = self._add_xhr_header(headers)
if headers:
req.headers.update(headers)
return self.do_request(req, status=status,
expect_errors=expect_errors)
def post(self, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False,
content_type=None, xhr=False):
"""
Do a POST request. Similar to :meth:`~webtest.TestApp.get`.
:param params:
Are put in the body of the request. If params is a
iterator it will be urlencoded, if it is string it will not
be encoded, but placed in the body directly.
Can be a collections.OrderedDict with
:class:`webtest.forms.Upload` fields included::
app.post('/myurl', collections.OrderedDict([
('textfield1', 'value1'),
('uploadfield', webapp.Upload('filename.txt', 'contents'),
('textfield2', 'value2')])))
:param upload_files:
It should be a list of ``(fieldname, filename, file_content)``.
You can also use just ``(fieldname, filename)`` and the file
contents will be read from disk.
:type upload_files:
list
:param content_type:
HTTP content type, for example `application/json`.
:type content_type:
string
:param xhr:
If this is true, then marks response as ajax. The same as
headers={'X-REQUESTED-WITH': 'XMLHttpRequest', }
:type xhr:
boolean
:returns: :class:`webtest.TestResponse` instance.
"""
if xhr:
headers = self._add_xhr_header(headers)
return self._gen_request('POST', url, params=params, headers=headers,
extra_environ=extra_environ, status=status,
upload_files=upload_files,
expect_errors=expect_errors,
content_type=content_type)
def put(self, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False,
content_type=None, xhr=False):
"""
Do a PUT request. Similar to :meth:`~webtest.TestApp.post`.
:returns: :class:`webtest.TestResponse` instance.
"""
if xhr:
headers = self._add_xhr_header(headers)
return self._gen_request('PUT', url, params=params, headers=headers,
extra_environ=extra_environ, status=status,
upload_files=upload_files,
expect_errors=expect_errors,
content_type=content_type,
)
def patch(self, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False,
content_type=None, xhr=False):
"""
Do a PATCH request. Similar to :meth:`~webtest.TestApp.post`.
:returns: :class:`webtest.TestResponse` instance.
"""
if xhr:
headers = self._add_xhr_header(headers)
return self._gen_request('PATCH', url, params=params, headers=headers,
extra_environ=extra_environ, status=status,
upload_files=upload_files,
expect_errors=expect_errors,
content_type=content_type)
def delete(self, url, params='', headers=None,
extra_environ=None, status=None, expect_errors=False,
content_type=None, xhr=False):
"""
Do a DELETE request. Similar to :meth:`~webtest.TestApp.get`.
:returns: :class:`webtest.TestResponse` instance.
"""
if xhr:
headers = self._add_xhr_header(headers)
return self._gen_request('DELETE', url, params=params, headers=headers,
extra_environ=extra_environ, status=status,
upload_files=None,
expect_errors=expect_errors,
content_type=content_type)
def options(self, url, headers=None, extra_environ=None,
status=None, expect_errors=False, xhr=False):
"""
Do a OPTIONS request. Similar to :meth:`~webtest.TestApp.get`.
:returns: :class:`webtest.TestResponse` instance.
"""
if xhr:
headers = self._add_xhr_header(headers)
return self._gen_request('OPTIONS', url, headers=headers,
extra_environ=extra_environ, status=status,
upload_files=None,
expect_errors=expect_errors)
def head(self, url, headers=None, extra_environ=None,
status=None, expect_errors=False, xhr=False):
"""
Do a HEAD request. Similar to :meth:`~webtest.TestApp.get`.
:returns: :class:`webtest.TestResponse` instance.
"""
if xhr:
headers = self._add_xhr_header(headers)
return self._gen_request('HEAD', url, headers=headers,
extra_environ=extra_environ, status=status,
upload_files=None,
expect_errors=expect_errors)
post_json = utils.json_method('POST')
put_json = utils.json_method('PUT')
patch_json = utils.json_method('PATCH')
delete_json = utils.json_method('DELETE')
def encode_multipart(self, params, files):
"""
Encodes a set of parameters (typically a name/value list) and
a set of files (a list of (name, filename, file_body, mimetype)) into a
typical POST body, returning the (content_type, body).
"""
boundary = to_bytes(str(random.random()))[2:]
boundary = b'----------a_BoUnDaRy' + boundary + b'$'
lines = []
def _append_file(file_info):
key, filename, value, fcontent = self._get_file_info(file_info)
if isinstance(key, text_type):
try:
key = key.encode('ascii')
except: # pragma: no cover
raise # file name must be ascii
if isinstance(filename, text_type):
try:
filename = filename.encode('utf8')
except: # pragma: no cover
raise # file name must be ascii or utf8
if not fcontent:
fcontent = mimetypes.guess_type(filename.decode('utf8'))[0]
fcontent = to_bytes(fcontent)
fcontent = fcontent or b'application/octet-stream'
lines.extend([
b'--' + boundary,
b'Content-Disposition: form-data; ' +
b'name="' + key + b'"; filename="' + filename + b'"',
b'Content-Type: ' + fcontent, b'', value])
for key, value in params:
if isinstance(key, text_type):
try:
key = key.encode('ascii')
except: # pragma: no cover
raise # field name are always ascii
if isinstance(value, forms.File):
if value.value:
_append_file([key] + list(value.value))
elif isinstance(value, forms.Upload):
file_info = [key, value.filename]
if value.content is not None:
file_info.append(value.content)
if value.content_type is not None:
file_info.append(value.content_type)
_append_file(file_info)
else:
if isinstance(value, text_type):
value = value.encode('utf8')
lines.extend([
b'--' + boundary,
b'Content-Disposition: form-data; name="' + key + b'"',
b'', value])
for file_info in files:
_append_file(file_info)
lines.extend([b'--' + boundary + b'--', b''])
body = b'\r\n'.join(lines)
boundary = boundary.decode('ascii')
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, body
def request(self, url_or_req, status=None, expect_errors=False,
**req_params):
"""
Creates and executes a request. You may either pass in an
instantiated :class:`TestRequest` object, or you may pass in a
URL and keyword arguments to be passed to
:meth:`TestRequest.blank`.
You can use this to run a request without the intermediary
functioning of :meth:`TestApp.get` etc. For instance, to
test a WebDAV method::
resp = app.request('/new-col', method='MKCOL')
Note that the request won't have a body unless you specify it,
like::
resp = app.request('/test.txt', method='PUT', body='test')
You can use :class:`webtest.TestRequest`::
req = webtest.TestRequest.blank('/url/', method='GET')
resp = app.do_request(req)
"""
if isinstance(url_or_req, text_type):
url_or_req = str(url_or_req)
for (k, v) in req_params.items():
if isinstance(v, text_type):
req_params[k] = str(v)
if isinstance(url_or_req, string_types):
req = self.RequestClass.blank(url_or_req, **req_params)
else:
req = url_or_req.copy()
for name, value in req_params.items():
setattr(req, name, value)
req.environ['paste.throw_errors'] = True
for name, value in self.extra_environ.items():
req.environ.setdefault(name, value)
return self.do_request(req,
status=status,
expect_errors=expect_errors,
)
def do_request(self, req, status=None, expect_errors=None):
"""
Executes the given webob Request (``req``), with the expected
``status``. Generally :meth:`~webtest.TestApp.get` and
:meth:`~webtest.TestApp.post` are used instead.
To use this::
req = webtest.TestRequest.blank('url', ...args...)
resp = app.do_request(req)
.. note::
You can pass any keyword arguments to
``TestRequest.blank()``, which will be set on the request.
These can be arguments like ``content_type``, ``accept``, etc.
"""
errors = StringIO()
req.environ['wsgi.errors'] = errors
script_name = req.environ.get('SCRIPT_NAME', '')
if script_name and req.path_info.startswith(script_name):
req.path_info = req.path_info[len(script_name):]
# set framework hooks
req.environ['paste.testing'] = True
req.environ['paste.testing_variables'] = {}
# set request cookies
self.cookiejar.add_cookie_header(utils._RequestCookieAdapter(req))
# verify wsgi compatibility
app = lint.middleware(self.app) if self.lint else self.app
## FIXME: should it be an option to not catch exc_info?
res = req.get_response(app, catch_exc_info=True)
# be sure to decode the content
res.decode_content()
# set a few handy attributes
res._use_unicode = self.use_unicode
res.request = req
res.app = app
res.test_app = self
# We do this to make sure the app_iter is exausted:
try:
res.body
except TypeError: # pragma: no cover
pass
res.errors = errors.getvalue()
for name, value in req.environ['paste.testing_variables'].items():
if hasattr(res, name):
raise ValueError(
"paste.testing_variables contains the variable %r, but "
"the response object already has an attribute by that "
"name" % name)
setattr(res, name, value)
if not expect_errors:
self._check_status(status, res)
self._check_errors(res)
# merge cookies back in
self.cookiejar.extract_cookies(utils._ResponseCookieAdapter(res),
utils._RequestCookieAdapter(req))
return res
def _check_status(self, status, res):
if status == '*':
return
res_status = res.status
if (isinstance(status, string_types) and '*' in status):
if re.match(fnmatch.translate(status), res_status, re.I):
return
if isinstance(status, string_types):
if status == res_status:
return
if isinstance(status, (list, tuple)):
if res.status_int not in status:
raise AppError(
"Bad response: %s (not one of %s for %s)\n%s",
res_status, ', '.join(map(str, status)),
res.request.url, res)
return
if status is None:
if res.status_int >= 200 and res.status_int < 400:
return
raise AppError(
"Bad response: %s (not 200 OK or 3xx redirect for %s)\n%s",
res_status, res.request.url,
res)
if status != res.status_int:
raise AppError(
"Bad response: %s (not %s)", res_status, status)
def _check_errors(self, res):
errors = res.errors
if errors:
raise AppError(
"Application had errors logged:\n%s", errors)
def _make_environ(self, extra_environ=None):
environ = self.extra_environ.copy()
environ['paste.throw_errors'] = True
if extra_environ:
environ.update(extra_environ)
return environ
def _remove_fragment(self, url):
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
return urlparse.urlunsplit((scheme, netloc, path, query, ""))
def _gen_request(self, method, url, params=utils.NoDefault,
headers=None, extra_environ=None, status=None,
upload_files=None, expect_errors=False,
content_type=None):
"""
Do a generic request.
"""
environ = self._make_environ(extra_environ)
inline_uploads = []
# this supports OrderedDict
if isinstance(params, dict) or hasattr(params, 'items'):
params = list(params.items())
if isinstance(params, (list, tuple)):
inline_uploads = [v for (k, v) in params
if isinstance(v, (forms.File, forms.Upload))]
if len(inline_uploads) > 0:
content_type, params = self.encode_multipart(
params, upload_files or ())
environ['CONTENT_TYPE'] = content_type
else:
params = utils.encode_params(params, content_type)
if upload_files or \
(content_type and
to_bytes(content_type).startswith(b'multipart')):
params = urlparse.parse_qsl(params, keep_blank_values=True)
content_type, params = self.encode_multipart(
params, upload_files or ())
environ['CONTENT_TYPE'] = content_type
elif params:
environ.setdefault('CONTENT_TYPE',
str('application/x-www-form-urlencoded'))
if content_type is not None:
environ['CONTENT_TYPE'] = content_type
environ['REQUEST_METHOD'] = str(method)
url = str(url)
url = self._remove_fragment(url)
req = self.RequestClass.blank(url, environ)
if isinstance(params, text_type):
params = params.encode(req.charset or 'utf8')
req.environ['wsgi.input'] = BytesIO(params)
req.content_length = len(params)
if headers:
req.headers.update(headers)
return self.do_request(req, status=status,
expect_errors=expect_errors)
def _get_file_info(self, file_info):
if len(file_info) == 2:
# It only has a filename
filename = file_info[1]
if self.relative_to:
filename = os.path.join(self.relative_to, filename)
f = open(filename, 'rb')
content = f.read()
f.close()
return (file_info[0], filename, content, None)
elif 3 <= len(file_info) <= 4:
content = file_info[2]
if not isinstance(content, binary_type):
raise ValueError('File content must be %s not %s'
% (binary_type, type(content)))
if len(file_info) == 3:
return tuple(file_info) + (None,)
else:
return file_info
else:
raise ValueError(
"upload_files need to be a list of tuples of (fieldname, "
"filename, filecontent, mimetype) or (fieldname, "
"filename, filecontent) or (fieldname, filename); "
"you gave: %r"
% repr(file_info)[:100])
@staticmethod
def _add_xhr_header(headers):
headers = headers or {}
# if remove str we will be have an error in lint.middleware
headers.update({'X-REQUESTED-WITH': str('XMLHttpRequest')})
return headers
| 36.558442
| 79
| 0.561456
|
a756296c512c1fc7bcb7c81df835739631884656
| 6,082
|
py
|
Python
|
tests/python/gpu/test_kvstore_gpu.py
|
NathanYyc/incubator-mxnet
|
5eeb25995eaf702b3bdaa84c4f0e378f27d5839c
|
[
"Apache-2.0"
] | 1
|
2019-12-20T11:25:06.000Z
|
2019-12-20T11:25:06.000Z
|
tests/python/gpu/test_kvstore_gpu.py
|
NathanYyc/incubator-mxnet
|
5eeb25995eaf702b3bdaa84c4f0e378f27d5839c
|
[
"Apache-2.0"
] | 4
|
2021-03-30T13:16:50.000Z
|
2021-10-04T08:49:33.000Z
|
tests/python/gpu/test_kvstore_gpu.py
|
NathanYyc/incubator-mxnet
|
5eeb25995eaf702b3bdaa84c4f0e378f27d5839c
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import sys
import os
import mxnet as mx
import numpy as np
import pytest
from mxnet.test_utils import assert_almost_equal, default_context, environment
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
shape = (4, 4)
keys = [5, 7, 11]
str_keys = ['b', 'c', 'd']
def init_kv_with_str(stype='default', kv_type='local'):
"""init kv """
kv = mx.kv.create(kv_type)
# single
kv.init('a', mx.nd.zeros(shape, stype=stype))
# list
kv.init(str_keys, [mx.nd.zeros(shape=shape, stype=stype)] * len(keys))
return kv
# 1. Test seed 89411477 (module seed 1829754103) resulted in a py3-gpu CI runner core dump.
# 2. Test seed 1155716252 (module seed 1032824746) resulted in py3-mkldnn-gpu have error
# src/operator/nn/mkldnn/mkldnn_base.cc:567: Check failed: similar
# Both of them are not reproducible, so this test is back on random seeds.
@pytest.mark.skipif(mx.context.num_gpus() < 2, reason="test_rsp_push_pull needs more than 1 GPU")
@pytest.mark.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14189")
@pytest.mark.serial
def test_rsp_push_pull():
def check_rsp_push_pull(kv_type, sparse_pull, is_push_cpu=True):
kv = init_kv_with_str('row_sparse', kv_type)
kv.init('e', mx.nd.ones(shape).tostype('row_sparse'))
push_ctxs = [mx.cpu(i) if is_push_cpu else mx.gpu(i) for i in range(2)]
kv.push('e', [mx.nd.ones(shape, ctx=context).tostype('row_sparse') for context in push_ctxs])
def check_rsp_pull(kv, ctxs, sparse_pull, is_same_rowid=False, use_slice=False):
count = len(ctxs)
num_rows = shape[0]
row_ids = []
all_row_ids = np.arange(num_rows)
vals = [mx.nd.sparse.zeros(shape=shape, ctx=ctxs[i], stype='row_sparse') for i in range(count)]
if is_same_rowid:
row_id = np.random.randint(num_rows, size=num_rows)
row_ids = [mx.nd.array(row_id)] * count
elif use_slice:
total_row_ids = mx.nd.array(np.random.randint(num_rows, size=count*num_rows))
row_ids = [total_row_ids[i*num_rows : (i+1)*num_rows] for i in range(count)]
else:
for _ in range(count):
row_id = np.random.randint(num_rows, size=num_rows)
row_ids.append(mx.nd.array(row_id))
row_ids_to_pull = row_ids[0] if (len(row_ids) == 1 or is_same_rowid) else row_ids
vals_to_pull = vals[0] if len(vals) == 1 else vals
kv.row_sparse_pull('e', out=vals_to_pull, row_ids=row_ids_to_pull)
for val, row_id in zip(vals, row_ids):
retained = val.asnumpy()
excluded_row_ids = np.setdiff1d(all_row_ids, row_id.asnumpy())
for row in range(num_rows):
expected_val = np.zeros_like(retained[row])
expected_val += 0 if row in excluded_row_ids else 2
assert_almost_equal(retained[row], expected_val)
if sparse_pull is True:
kv.pull('e', out=vals_to_pull, ignore_sparse=False)
for val in vals:
retained = val.asnumpy()
expected_val = np.zeros_like(retained)
expected_val[:] = 2
assert_almost_equal(retained, expected_val)
check_rsp_pull(kv, [mx.gpu(0)], sparse_pull)
check_rsp_pull(kv, [mx.cpu(0)], sparse_pull)
check_rsp_pull(kv, [mx.gpu(i//2) for i in range(4)], sparse_pull)
check_rsp_pull(kv, [mx.gpu(i//2) for i in range(4)], sparse_pull, is_same_rowid=True)
check_rsp_pull(kv, [mx.cpu(i) for i in range(4)], sparse_pull)
check_rsp_pull(kv, [mx.cpu(i) for i in range(4)], sparse_pull, is_same_rowid=True)
check_rsp_pull(kv, [mx.gpu(i//2) for i in range(4)], sparse_pull, use_slice=True)
check_rsp_pull(kv, [mx.cpu(i) for i in range(4)], sparse_pull, use_slice=True)
envs = [None, '1']
key = 'MXNET_KVSTORE_USETREE'
for val in envs:
with environment(key, val):
if val is '1':
sparse_pull = False
else:
sparse_pull = True
check_rsp_push_pull('local', sparse_pull)
check_rsp_push_pull('device', sparse_pull)
check_rsp_push_pull('device', sparse_pull, is_push_cpu=False)
def test_row_sparse_pull_single_device():
kvstore = mx.kv.create('device')
copy = mx.nd.random_normal(shape=(4,4), ctx=mx.gpu(0))
grad = copy.tostype("row_sparse")
key = 0
kvstore.init(key, grad)
idx = grad.indices
kvstore.push(key, grad)
kvstore.row_sparse_pull(key, out=grad, row_ids=idx)
assert_almost_equal(grad.asnumpy(), copy.asnumpy())
@pytest.mark.serial
def test_rsp_push_pull_large_rowid():
num_rows = 793470
val = mx.nd.ones((num_rows, 1)).tostype('row_sparse').copyto(mx.gpu())
kv = mx.kv.create('device')
kv.init('a', val)
out = mx.nd.zeros((num_rows,1), stype='row_sparse').copyto(mx.gpu())
kv.push('a', val)
kv.row_sparse_pull('a', out=out, row_ids=mx.nd.arange(0, num_rows, dtype='int64'))
assert(out.indices.shape[0] == num_rows)
| 44.394161
| 107
| 0.649622
|
086bb46cdc0ebfd47f7eea3e66c1e61280174ffb
| 465
|
py
|
Python
|
moonleap/verbs.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
moonleap/verbs.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
moonleap/verbs.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
configured_by = ("configured",)
uses = ("use", "uses")
contains = ("has", "have", "contain", "contains")
stores = ("stores",)
has = contains + uses
is_created_as = "is-created-as"
runs = ("run", "runs", "running")
with_ = ("with",)
wraps = ("wraps",)
connects = ("connects",)
loads = ("loads",)
posts = ("posts",)
returns = ("returns",)
deletes = ("deletes",)
shows = ("shows", "displays")
supports = ("supports",)
provides = ("provides",)
receives = ("receives",)
| 24.473684
| 49
| 0.606452
|
ea45d23053a3c6a7277d5f4726af58285791b93f
| 703
|
py
|
Python
|
talk.py
|
j-potter/ConditionOne
|
2f7e8598f64029fde4000bc4dc3f646aedaf56d3
|
[
"MIT"
] | 4
|
2018-10-31T05:35:36.000Z
|
2021-11-08T10:28:34.000Z
|
talk.py
|
j-potter/ConditionOne
|
2f7e8598f64029fde4000bc4dc3f646aedaf56d3
|
[
"MIT"
] | null | null | null |
talk.py
|
j-potter/ConditionOne
|
2f7e8598f64029fde4000bc4dc3f646aedaf56d3
|
[
"MIT"
] | 1
|
2021-06-24T19:48:41.000Z
|
2021-06-24T19:48:41.000Z
|
class talk:
def __init__(self, GivenDay, GivenTime, GivenTrack, GivenTitle, GivenSpeakers, GivenDescription, GivenIDNum):
self.day = GivenDay
self.time = GivenTime
self.track = GivenTrack
self.title = GivenTitle
self.speakers = GivenSpeakers
self.description = GivenDescription
self.idNum = GivenIDNum
def ShowInfo(self):
print "Talk " + str(self.idNum) + " On " + self.day + " at " + self.time + " in track " + str(self.track) + ": " + self.title
def ShowDescription(self):
print ""
print "Title: " + self.title + "\n"
print "Speaker(s): " + self.speakers + "\n"
print self.description + "\n"
| 39.055556
| 133
| 0.600284
|
2fa6ad8c608b6a2e05661acea9380f2c525ef378
| 214
|
py
|
Python
|
tests/test_ssm.py
|
MITLibraries/alma-scripts
|
c312692a71a83dc0b5e60761bc3e7b37d7d42099
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ssm.py
|
MITLibraries/alma-scripts
|
c312692a71a83dc0b5e60761bc3e7b37d7d42099
|
[
"Apache-2.0"
] | 16
|
2021-07-23T20:46:29.000Z
|
2022-03-10T19:34:10.000Z
|
tests/test_ssm.py
|
MITLibraries/alma-scripts
|
c312692a71a83dc0b5e60761bc3e7b37d7d42099
|
[
"Apache-2.0"
] | null | null | null |
from llama.ssm import SSM
def test_ssm_get_parameter_value(mocked_ssm):
ssm = SSM()
parameter_value = ssm.get_parameter_value("/test/example/ALMA_API_ACQ_READ_KEY")
assert parameter_value == "abc123"
| 26.75
| 84
| 0.766355
|
815511bdef274be6d1e1e47a34a646719dd2e0b8
| 814
|
py
|
Python
|
SimPEG/potential_fields/magnetics/sources.py
|
jcapriot/simpeg
|
e88e653673c6b818592b6c075f76ee9215fe82b7
|
[
"MIT"
] | 1
|
2020-06-04T21:57:47.000Z
|
2020-06-04T21:57:47.000Z
|
SimPEG/potential_fields/magnetics/sources.py
|
jcapriot/simpeg
|
e88e653673c6b818592b6c075f76ee9215fe82b7
|
[
"MIT"
] | null | null | null |
SimPEG/potential_fields/magnetics/sources.py
|
jcapriot/simpeg
|
e88e653673c6b818592b6c075f76ee9215fe82b7
|
[
"MIT"
] | 1
|
2021-01-05T18:16:54.000Z
|
2021-01-05T18:16:54.000Z
|
import properties
import numpy as np
from scipy.constants import mu_0
from ...survey import BaseSrc
from .analytics import IDTtoxyz
from ...utils.code_utils import deprecate_class
class SourceField(BaseSrc):
""" Define the inducing field """
def __init__(self, receiver_list=None, parameters=[50000, 90, 0], **kwargs):
assert (
len(parameters) == 3
), "Inducing field 'parameters' must be a list or tuple of length 3 (amplitude, inclination, declination"
self.parameters = parameters
self.b0 = IDTtoxyz(-parameters[1], parameters[2], parameters[0])
super(SourceField, self).__init__(
receiver_list=receiver_list, parameters=parameters, **kwargs
)
@deprecate_class(removal_version="0.15.0")
class SrcField(SourceField):
pass
| 30.148148
| 113
| 0.692875
|
675032299b2c9826ea3e048e7012c901b05d25f1
| 17,113
|
py
|
Python
|
Python/update_installed_firmware_with_dup.py
|
jzcmyz/OpenManage-Enterprise
|
8c8724d71b62992c2bb94ecf385edf7467990344
|
[
"Apache-2.0"
] | 61
|
2019-01-28T17:47:38.000Z
|
2022-03-07T21:04:44.000Z
|
Python/update_installed_firmware_with_dup.py
|
jzcmyz/OpenManage-Enterprise
|
8c8724d71b62992c2bb94ecf385edf7467990344
|
[
"Apache-2.0"
] | 126
|
2019-07-31T15:46:44.000Z
|
2021-10-21T13:22:06.000Z
|
Python/update_installed_firmware_with_dup.py
|
jzcmyz/OpenManage-Enterprise
|
8c8724d71b62992c2bb94ecf385edf7467990344
|
[
"Apache-2.0"
] | 47
|
2019-05-05T23:10:38.000Z
|
2021-10-19T21:29:06.000Z
|
#
# _author_ = Raajeev Kalyanaraman <Raajeev.Kalyanaraman@Dell.com>
#
# Copyright (c) 2021 Dell EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
#### Synopsis
Script to update firmware for a device or applicable devices
within a group using a DUP
#### Description
This script uses the OME REST API to allow updating a device
or a group of devices by using a single DUP file.
Note that the credentials entered are not stored to disk.
#### Python Example
python update_installed_firmware_with_dup.py --ip <ip addr> --user admin
--password <passwd> --groupid 25315
--dupfile iDRAC-with-Lifecycle-Controller_Firmware_387FW_WN64_3.21.21.21_A00.EXE
#### API workflow:
1. POST on SessionService/Sessions
2. If new session is created (201) parse headers
for x-auth token and update headers with token
3. All subsequent requests use X-auth token and not
user name and password entered by user
4. Upload the DUP file to OME and retrieve a file
token to use in subsequent requests
POST on UpdateService.UploadFile
5. Determine device or groups that DUP file applies to
using a POST on UpdateService.GetSingleDupReport
6. Create a firmware update task with the required targets
using a POST on /api/JobService/Jobs
7. Parse returned job id and monitor it to completion
8. If job fails then GET Job Execution History Details
and print info to screen
"""
import argparse
import copy
import json
import os
import sys
import time
from argparse import RawTextHelpFormatter
from getpass import getpass
import requests
import urllib3
def authenticate_with_ome(ip_address, user_name, password):
""" X-auth session creation """
auth_success = False
session_url = "https://%s/api/SessionService/Sessions" % ip_address
user_details = {'UserName': user_name,
'Password': password,
'SessionType': 'API'}
headers = {'content-type': 'application/json'}
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=headers)
if session_info.status_code == 201:
headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
auth_success = True
else:
error_msg = "Failed create of session with {0} - Status code = {1}"
print(error_msg.format(ip_address, session_info.status_code))
return auth_success, headers
def get_group_list(ip_address, headers):
""" Get list of groups from OME """
group_list = None
group_url = 'https://%s/api/GroupService/Groups' % ip_address
response = requests.get(group_url, headers=headers, verify=False)
if response.status_code == 200:
group_response = response.json()
if group_response['@odata.count'] > 0:
group_list = [x['Id'] for x in group_response['value']]
else:
print("No groups found at ", ip_address)
else:
print("No groups found at ", ip_address)
return group_list
def get_device_list(ip_address, headers):
""" Get list of devices from OME """
ome_device_list = []
next_link_url = 'https://%s/api/DeviceService/Devices' % ip_address
while next_link_url is not None:
device_response = requests.get(next_link_url, headers=headers, verify=False)
next_link_url = None
if device_response.status_code == 200:
dev_json_response = device_response.json()
if dev_json_response['@odata.count'] <= 0:
print("No devices found at ", ip_address)
return
if '@odata.nextLink' in dev_json_response:
next_link_url = 'https://%s/' % ip_address + dev_json_response['@odata.nextLink']
if dev_json_response['@odata.count'] > 0:
ome_device_list = ome_device_list + [x['Id'] for x in dev_json_response['value']]
else:
print("No devices found at ", ip_address)
return ome_device_list
def upload_dup_file(ip_address, headers, file_path):
""" Upload DUP file to OME and get a file token in return """
token = None
upload_success = False
url = 'https://%s/api/UpdateService/Actions/UpdateService.UploadFile' % ip_address
curr_headers = copy.deepcopy(headers)
curr_headers['content-type'] = 'application/octet-stream'
if os.path.isfile(file_path):
if os.path.getsize(file_path) > 0:
with open(file_path, 'rb') as payload:
print("Uploading %s .. This may take a while" % file_path)
response = requests.post(url, data=payload, verify=False,
headers=curr_headers)
if response.status_code == 200:
upload_success = True
token = str(response.text)
print("Successfully uploaded ", file_path)
else:
print("Unable to upload %s to %s" % (file_path, ip_address))
print("Request Status Code = %s" % response.status_code)
else:
print("File %s seems to be empty ... Exiting" % file_path)
else:
print("File not found ... Retry")
return upload_success, token
def get_dup_applicability_payload(file_token, param_map):
""" Returns the DUP applicability JSON payload """
dup_applicability_payload = {'SingleUpdateReportBaseline': [],
'SingleUpdateReportGroup': [],
'SingleUpdateReportTargets': [],
'SingleUpdateReportFileToken': file_token
}
if param_map['group_id']:
dup_applicability_payload['SingleUpdateReportGroup'].append(param_map['group_id'])
elif param_map['device_id']:
dup_applicability_payload['SingleUpdateReportTargets'].append(param_map['device_id'])
else:
pass
return dup_applicability_payload
def get_applicable_components(ip_address, headers, dup_payload):
""" Get the target array to be used in spawning jobs for update """
# Parse the single dup update report and print out versions needing
# an update. In addition add them to the target_data as needed for
# the job payload
target_data = []
dup_url = 'https://%s/api/UpdateService/Actions/UpdateService.GetSingleDupReport' % ip_address
dup_resp = requests.post(dup_url, headers=headers,
data=json.dumps(dup_payload), verify=False)
if dup_resp.status_code == 200:
dup_data = dup_resp.json()
file_token = str(dup_payload['SingleUpdateReportFileToken'])
for device in dup_data:
device_name = str(device['DeviceReport']['DeviceServiceTag'])
device_ip = str(device['DeviceReport']['DeviceIPAddress'])
for component in device['DeviceReport']['Components']:
curr_ver = str(component['ComponentCurrentVersion'])
avail_ver = str(component['ComponentVersion'])
upd_action = str(component['ComponentUpdateAction'])
update_crit = str(component['ComponentCriticality'])
reboot_req = str(component['ComponentRebootRequired'])
comp_name = str(component['ComponentName'])
print("\n---------------------------------------------------")
print("Device =", device_name)
print("IPAddress =", device_ip)
print("Current Ver =", curr_ver)
print("Avail Ver =", avail_ver)
print("Action =", upd_action)
print("Criticality =", update_crit)
print("Reboot Req =", reboot_req)
print("Component Name =", comp_name)
if avail_ver > curr_ver:
temp_map = {'Id': device['DeviceId'],
'Data': str(component['ComponentSourceName']) + "=" + file_token, 'TargetType': {}}
temp_map['TargetType']['Id'] = int(device['DeviceReport']['DeviceTypeId'])
temp_map['TargetType']['Name'] = str(device['DeviceReport']['DeviceTypeName'])
target_data.append(temp_map)
else:
print("Unable to get components DUP applies to .. Exiting")
return target_data
def form_job_payload_for_update(target_data):
""" Formulate the payload to initiate a firmware update job """
payload = {
"Id": 0,
"JobName": "Firmware Update Task",
"JobDescription": "dup test",
"Schedule": "startnow",
"State": "Enabled",
"CreatedBy": "admin",
"JobType": {
"Id": 5,
"Name": "Update_Task"
},
"Targets": target_data,
"Params": [
{
"JobId": 0,
"Key": "operationName",
"Value": "INSTALL_FIRMWARE"
},
{
"JobId": 0,
"Key": "complianceUpdate",
"Value": "false"
},
{
"JobId": 0,
"Key": "stagingValue",
"Value": "false"
},
{
"JobId": 0,
"Key": "signVerify",
"Value": "true"
}
]
}
return payload
def spawn_update_job(ip_address, headers, job_payload):
""" Spawns an update job and tracks it to completion """
job_id = -1
job_url = 'https://%s/api/JobService/Jobs' % ip_address
job_resp = requests.post(job_url, headers=headers,
json=job_payload,
verify=False)
if job_resp.status_code == 201:
job_id = (job_resp.json())['Id']
print("Successfully spawned update job", job_id)
else:
print("Unable to spawn update job .. Exiting")
return job_id
def track_job_to_completion(ip_address, headers, job_id):
""" Tracks the update job to completion / error """
job_status_map = {
"2020": "Scheduled",
"2030": "Queued",
"2040": "Starting",
"2050": "Running",
"2060": "Completed",
"2070": "Failed",
"2090": "Warning",
"2080": "New",
"2100": "Aborted",
"2101": "Paused",
"2102": "Stopped",
"2103": "Canceled"
}
max_retries = 20
sleep_interval = 60
failed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]
job_url = 'https://%s/api/JobService/Jobs(%s)' % (ip_address, job_id)
loop_ctr = 0
job_incomplete = True
print("Polling %s to completion ..." % job_id)
while loop_ctr < max_retries:
loop_ctr += 1
time.sleep(sleep_interval)
job_resp = requests.get(job_url, headers=headers, verify=False)
if job_resp.status_code == 200:
job_status = str((job_resp.json())['LastRunStatus']['Id'])
print("Iteration %s: Status of %s is %s" % (loop_ctr, job_id, job_status_map[job_status]))
if int(job_status) == 2060:
job_incomplete = False
print("Completed updating firmware successfully ... Exiting")
break
elif int(job_status) in failed_job_status:
job_incomplete = False
print("Update job failed ... ")
job_hist_url = str(job_url) + "/ExecutionHistories"
job_hist_resp = requests.get(job_hist_url, headers=headers, verify=False)
if job_hist_resp.status_code == 200:
job_history_id = str((job_hist_resp.json())['value'][0]['Id'])
job_hist_det_url = str(job_hist_url) + "(" + job_history_id + ")/ExecutionHistoryDetails"
job_hist_det_resp = requests.get(job_hist_det_url,
headers=headers,
verify=False)
if job_hist_det_resp.status_code == 200:
print(job_hist_det_resp.text)
else:
print("Unable to parse job execution history .. Exiting")
break
else:
print("Unable to poll status of %s - Iteration %s " % (job_id, loop_ctr))
if job_incomplete:
print("Job %s incomplete after polling %s times...Check status" % (job_id, max_retries))
if __name__ == '__main__':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", required=True, help="OME Appliance IP")
parser.add_argument("--user", required=False,
help="Username for OME Appliance",
default="admin")
parser.add_argument("--password", required=False,
help="Password for OME Appliance")
parser.add_argument("--dupfile", required=True,
help="Path to DUP file that will be flashed")
mutex_group = parser.add_mutually_exclusive_group(required=True)
mutex_group.add_argument("--groupid", type=int,
help="Id of the group to update")
mutex_group.add_argument("--deviceid", type=int,
help="Id of the device to update")
args = parser.parse_args()
ip_address = args.ip
user_name = args.user
if not args.password:
if not sys.stdin.isatty():
# notify user that they have a bad terminal
# perhaps if os.name == 'nt': , prompt them to use winpty?
print("Your terminal is not compatible with Python's getpass module. You will need to provide the"
" --password argument instead. See https://stackoverflow.com/a/58277159/4427375")
sys.exit(0)
else:
password = getpass()
else:
password = args.password
dup_file = args.dupfile
param_map = {}
target_data = []
try:
auth_success, headers = authenticate_with_ome(ip_address, user_name,
password)
if auth_success:
if args.groupid:
group_id = args.groupid
param_map['group_id'] = group_id
param_map['device_id'] = None
group_list = get_group_list(ip_address, headers)
if group_list:
if group_id in group_list:
pass
else:
raise ValueError("Group %s not found on %s ... Exiting" % (group_id, ip_address))
else:
device_id = args.deviceid
param_map['device_id'] = device_id
param_map['group_id'] = None
device_list = get_device_list(ip_address, headers)
if device_list:
if device_id in device_list:
pass
else:
raise ValueError("Device %s not found on %s ... Exiting" % (device_id, ip_address))
upload_success, file_token = upload_dup_file(ip_address, headers,
dup_file)
if upload_success:
report_payload = get_dup_applicability_payload(file_token, param_map)
if report_payload:
print("Determining which components the DUP file applies to ... ")
target_data = get_applicable_components(ip_address,
headers,
report_payload)
if target_data:
print("Forming job payload for update ... ")
job_payload = form_job_payload_for_update(target_data)
job_id = spawn_update_job(ip_address,
headers,
job_payload)
if job_id != -1:
track_job_to_completion(ip_address, headers,
job_id)
else:
print("No components available for update ... Exiting")
else:
print("Unable to authenticate with OME .. Check IP/Username/Pwd")
except Exception as error:
print("Unexpected error:", str(error))
| 41.536408
| 115
| 0.576988
|
a1e63556e813d011a2b71b116a345f8427cb53c3
| 106,746
|
py
|
Python
|
src/opserver/opserver.py
|
sagarc-contrail/contrail-controller
|
834302367f3ff81f1ce93f4036b6b3788dfd6994
|
[
"Apache-2.0"
] | null | null | null |
src/opserver/opserver.py
|
sagarc-contrail/contrail-controller
|
834302367f3ff81f1ce93f4036b6b3788dfd6994
|
[
"Apache-2.0"
] | null | null | null |
src/opserver/opserver.py
|
sagarc-contrail/contrail-controller
|
834302367f3ff81f1ce93f4036b6b3788dfd6994
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Opserver
#
# Operational State Server for VNC
#
from gevent import monkey
monkey.patch_all()
try:
from collections import OrderedDict
except ImportError:
# python 2.6 or earlier, use backport
from ordereddict import OrderedDict
from collections import namedtuple
TableSchema = namedtuple("TableSchema", ("name", "datatype", "index", "suffixes"))
from uveserver import UVEServer
import math
import sys
import ConfigParser
import bottle
import json
import uuid
import argparse
import time
import redis
import base64
import socket
import struct
import signal
import random
import hashlib
import errno
import copy
import datetime
import pycassa
import platform
from analytics_db import AnalyticsDb
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
from pysandesh.util import UTCTimestampUsec
from pysandesh.sandesh_base import *
from pysandesh.sandesh_session import SandeshWriter
from pysandesh.gen_py.sandesh_trace.ttypes import SandeshTraceRequest
from pysandesh.connection_info import ConnectionState
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, CategoryNames,\
ModuleCategoryMap, Module2NodeType, NodeTypeNames, ModuleIds,\
INSTANCE_ID_DEFAULT, \
ANALYTICS_API_SERVER_DISCOVERY_SERVICE_NAME, ALARM_GENERATOR_SERVICE_NAME, \
OpServerAdminPort, CLOUD_ADMIN_ROLE, APIAAAModes, \
AAA_MODE_CLOUD_ADMIN, AAA_MODE_NO_AUTH, \
ServicesDefaultConfigurationFiles, SERVICE_OPSERVER
from sandesh.viz.constants import _TABLES, _OBJECT_TABLES,\
_OBJECT_TABLE_SCHEMA, _OBJECT_TABLE_COLUMN_VALUES, \
_STAT_TABLES, STAT_OBJECTID_FIELD, STAT_VT_PREFIX, \
STAT_TIME_FIELD, STAT_TIMEBIN_FIELD, STAT_UUID_FIELD, \
STAT_SOURCE_FIELD, SOURCE, MODULE
from sandesh.viz.constants import *
from sandesh.analytics.ttypes import *
from sandesh.nodeinfo.ttypes import NodeStatusUVE, NodeStatus
from sandesh.nodeinfo.cpuinfo.ttypes import *
from sandesh.nodeinfo.process_info.ttypes import *
from opserver_util import OpServerUtils
from opserver_util import AnalyticsDiscovery
from sandesh_req_impl import OpserverSandeshReqImpl
from sandesh.analytics_database.ttypes import *
from sandesh.analytics_database.constants import PurgeStatusString
from sandesh.analytics.ttypes import DbInfoSetRequest, \
DbInfoGetRequest, DbInfoResponse
from overlay_to_underlay_mapper import OverlayToUnderlayMapper, \
OverlayToUnderlayMapperError
from generator_introspect_util import GeneratorIntrospectUtil
from stevedore import hook, extension
from partition_handler import PartInfo, UveStreamer, UveCacheProcessor
from functools import wraps
from vnc_cfg_api_client import VncCfgApiClient
from opserver_local import LocalApp
from opserver_util import AnalyticsDiscovery
_ERRORS = {
errno.EBADMSG: 400,
errno.ENOBUFS: 403,
errno.EINVAL: 404,
errno.ENOENT: 410,
errno.EIO: 500,
errno.EBUSY: 503
}
@bottle.error(400)
@bottle.error(403)
@bottle.error(404)
@bottle.error(410)
@bottle.error(500)
@bottle.error(503)
def opserver_error(err):
return err.body
#end opserver_error
class LinkObject(object):
def __init__(self, name, href):
self.name = name
self.href = href
# end __init__
# end class LinkObject
class ContrailGeventServer(bottle.GeventServer):
def run(self, handler):
from gevent import wsgi as wsgi_fast, pywsgi, monkey, local
if self.options.get('monkey', True):
import threading
if not threading.local is local.local: monkey.patch_all()
wsgi = wsgi_fast if self.options.get('fast') else pywsgi
self.srv = wsgi.WSGIServer((self.host, self.port), handler)
self.srv.serve_forever()
def stop(self):
if hasattr(self, 'srv'):
self.srv.stop()
gevent.sleep(0)
def obj_to_dict(obj):
# Non-null fields in object get converted to json fields
return dict((k, v) for k, v in obj.__dict__.iteritems())
# end obj_to_dict
def redis_query_start(host, port, redis_password, qid, inp, columns):
redish = redis.StrictRedis(db=0, host=host, port=port,
password=redis_password)
for key, value in inp.items():
redish.hset("QUERY:" + qid, key, json.dumps(value))
col_list = []
if columns is not None:
for col in columns:
m = TableSchema(name = col.name, datatype = col.datatype, index = col.index, suffixes = col.suffixes)
col_list.append(m._asdict())
query_metadata = {}
query_metadata['enqueue_time'] = OpServerUtils.utc_timestamp_usec()
redish.hset("QUERY:" + qid, 'query_metadata', json.dumps(query_metadata))
redish.hset("QUERY:" + qid, 'enqueue_time',
OpServerUtils.utc_timestamp_usec())
redish.hset("QUERY:" + qid, 'table_schema', json.dumps(col_list))
redish.lpush("QUERYQ", qid)
res = redish.blpop("REPLY:" + qid, 10)
if res is None:
return None
# Put the status back on the queue for the use of the status URI
redish.lpush("REPLY:" + qid, res[1])
resp = json.loads(res[1])
return int(resp["progress"])
# end redis_query_start
def redis_query_status(host, port, redis_password, qid):
redish = redis.StrictRedis(db=0, host=host, port=port,
password=redis_password)
resp = {"progress": 0}
chunks = []
# For now, the number of chunks will be always 1
res = redish.lrange("REPLY:" + qid, -1, -1)
if not res:
return None
chunk_resp = json.loads(res[0])
ttl = redish.ttl("REPLY:" + qid)
if int(ttl) != -1:
chunk_resp["ttl"] = int(ttl)
query_time = redish.hmget("QUERY:" + qid, ["start_time", "end_time"])
chunk_resp["start_time"] = query_time[0]
chunk_resp["end_time"] = query_time[1]
if chunk_resp["progress"] == 100:
chunk_resp["href"] = "/analytics/query/%s/chunk-final/%d" % (qid, 0)
chunks.append(chunk_resp)
resp["progress"] = chunk_resp["progress"]
resp["chunks"] = chunks
return resp
# end redis_query_status
def redis_query_chunk_iter(host, port, redis_password, qid, chunk_id):
redish = redis.StrictRedis(db=0, host=host, port=port,
password=redis_password)
iters = 0
fin = False
while not fin:
#import pdb; pdb.set_trace()
# Keep the result line valid while it is being read
redish.persist("RESULT:" + qid + ":" + str(iters))
elems = redish.lrange("RESULT:" + qid + ":" + str(iters), 0, -1)
yield elems
if elems == []:
fin = True
else:
redish.delete("RESULT:" + qid + ":" + str(iters), 0, -1)
iters += 1
return
# end redis_query_chunk_iter
def redis_query_chunk(host, port, redis_password, qid, chunk_id):
res_iter = redis_query_chunk_iter(host, port, redis_password, qid, chunk_id)
dli = u''
starter = True
fin = False
yield u'{"value": ['
outcount = 0
while not fin:
#import pdb; pdb.set_trace()
# Keep the result line valid while it is being read
elems = res_iter.next()
fin = True
for elem in elems:
fin = False
outcount += 1
if starter:
dli += '\n' + elem
starter = False
else:
dli += ', ' + elem
if not fin:
yield dli + '\n'
dli = u''
if outcount == 0:
yield '\n' + u']}'
else:
yield u']}'
return
# end redis_query_chunk
def redis_query_result(host, port, redis_password, qid):
try:
status = redis_query_status(host, port, redis_password, qid)
except redis.exceptions.ConnectionError:
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
self._logger.error("Exception: %s" % e)
yield bottle.HTTPError(_ERRORS[errno.EIO], 'Error: %s' % e)
else:
if status is None:
yield bottle.HTTPError(_ERRORS[errno.ENOENT],
'Invalid query id (or) query result purged from DB')
if status['progress'] == 100:
for chunk in status['chunks']:
chunk_id = int(chunk['href'].rsplit('/', 1)[1])
for gen in redis_query_chunk(host, port, redis_password, qid,
chunk_id):
yield gen
else:
yield {}
return
# end redis_query_result
def redis_query_result_dict(host, port, redis_password, qid):
stat = redis_query_status(host, port, redis_password, qid)
prg = int(stat["progress"])
res = []
if (prg < 0) or (prg == 100):
done = False
gen = redis_query_result(host, port, redis_password, qid)
result = u''
while not done:
try:
result += gen.next()
#import pdb; pdb.set_trace()
except StopIteration:
done = True
res = (json.loads(result))['value']
return prg, res
# end redis_query_result_dict
def redis_query_info(redish, qid):
query_data = {}
query_dict = redish.hgetall('QUERY:' + qid)
query_metadata = json.loads(query_dict['query_metadata'])
del query_dict['query_metadata']
query_data['query_id'] = qid
query_data['query'] = str(query_dict)
query_data['enqueue_time'] = query_metadata['enqueue_time']
return query_data
# end redis_query_info
class OpStateServer(object):
def __init__(self, logger, redis_password=None):
self._logger = logger
self._redis_list = []
self._redis_password= redis_password
# end __init__
def update_redis_list(self, redis_list):
self._redis_list = redis_list
# end update_redis_list
def redis_publish(self, msg_type, destination, msg):
# Get the sandesh encoded in XML format
sandesh = SandeshWriter.encode_sandesh(msg)
msg_encode = base64.b64encode(sandesh)
redis_msg = '{"type":"%s","destination":"%s","message":"%s"}' \
% (msg_type, destination, msg_encode)
# Publish message in the Redis bus
for redis_server in self._redis_list:
redis_inst = redis.StrictRedis(redis_server[0],
redis_server[1], db=0,
password=self._redis_password)
try:
redis_inst.publish('analytics', redis_msg)
except redis.exceptions.ConnectionError:
self._logger.error('No Connection to Redis [%s:%d].'
'Failed to publish message.' \
% (redis_server[0], redis_server[1]))
return True
# end redis_publish
# end class OpStateServer
class AnalyticsApiStatistics(object):
def __init__(self, sandesh, obj_type):
self.obj_type = obj_type
self.time_start = UTCTimestampUsec()
self.api_stats = None
self.sandesh = sandesh
def collect(self, resp_size, resp_size_bytes):
time_finish = UTCTimestampUsec()
useragent = bottle.request.headers.get('X-Contrail-Useragent')
if not useragent:
useragent = bottle.request.headers.get('User-Agent')
# Create api stats object
self.api_stats = AnalyticsApiSample(
operation_type=bottle.request.method,
remote_ip=bottle.request.environ.get('REMOTE_ADDR'),
request_url=bottle.request.url,
object_type=self.obj_type,
response_time_in_usec=(time_finish - self.time_start),
response_size_objects=resp_size,
response_size_bytes=resp_size_bytes,
resp_code='200',
useragent=useragent,
node=self.sandesh.source_id())
def sendwith(self):
stats_log = AnalyticsApiStats(api_stats=self.api_stats,
sandesh=self.sandesh)
stats_log.send(sandesh=self.sandesh)
class OpServer(object):
"""
This class provides ReST API to get operational state of
Contrail VNS system.
The supported **GET** APIs are:
* ``/analytics/virtual-network/<name>``
* ``/analytics/virtual-machine/<name>``
* ``/analytics/vrouter/<name>``:
* ``/analytics/bgp-router/<name>``
* ``/analytics/bgp-peer/<name>``
* ``/analytics/xmpp-peer/<name>``
* ``/analytics/collector/<name>``
* ``/analytics/tables``:
* ``/analytics/table/<table>``:
* ``/analytics/table/<table>/schema``:
* ``/analytics/table/<table>/column-values``:
* ``/analytics/table/<table>/column-values/<column>``:
* ``/analytics/query/<queryId>``
* ``/analytics/query/<queryId>/chunk-final/<chunkId>``
* ``/analytics/send-tracebuffer/<source>/<module>/<name>``
* ``/analytics/operation/analytics-data-start-time``
The supported **POST** APIs are:
* ``/analytics/query``:
* ``/analytics/operation/database-purge``:
"""
def validate_user_token(func):
@wraps(func)
def _impl(self, *f_args, **f_kwargs):
if self._args.auth_conf_info.get('cloud_admin_access_only') and \
bottle.request.app == bottle.app():
user_token = bottle.request.headers.get('X-Auth-Token')
if not user_token or not \
self._vnc_api_client.is_role_cloud_admin(user_token):
raise bottle.HTTPResponse(status = 401,
body = 'Authentication required',
headers = self._reject_auth_headers())
return func(self, *f_args, **f_kwargs)
return _impl
# end validate_user_token
def _reject_auth_headers(self):
header_val = 'Keystone uri=\'%s\'' % \
self._args.auth_conf_info.get('auth_uri')
return { "WWW-Authenticate" : header_val }
def __init__(self, args_str=' '.join(sys.argv[1:])):
self.gevs = []
self._args = None
self._parse_args(args_str)
print args_str
self._homepage_links = []
self._homepage_links.append(
LinkObject('documentation', '/documentation/index.html'))
self._homepage_links.append(
LinkObject('Message documentation', '/documentation/messages/index.html'))
self._homepage_links.append(LinkObject('analytics', '/analytics'))
super(OpServer, self).__init__()
self._webserver = None
module = Module.OPSERVER
self._moduleid = ModuleNames[module]
node_type = Module2NodeType[module]
self._node_type_name = NodeTypeNames[node_type]
if self._args.worker_id:
self._instance_id = self._args.worker_id
else:
self._instance_id = INSTANCE_ID_DEFAULT
self.table = "ObjectCollectorInfo"
self._hostname = socket.gethostname()
if self._args.dup:
self._hostname += 'dup'
self._sandesh = Sandesh()
self.disk_usage_percentage = 0
self.pending_compaction_tasks = 0
opserver_sandesh_req_impl = OpserverSandeshReqImpl(self)
# Reset the sandesh send rate limit value
if self._args.sandesh_send_rate_limit is not None:
SandeshSystem.set_sandesh_send_rate_limit( \
self._args.sandesh_send_rate_limit)
self.random_collectors = self._args.collectors
if self._args.collectors:
self._chksum = hashlib.md5("".join(self._args.collectors)).hexdigest()
self.random_collectors = random.sample(self._args.collectors, \
len(self._args.collectors))
self._sandesh.init_generator(
self._moduleid, self._hostname, self._node_type_name,
self._instance_id, self.random_collectors, 'opserver_context',
int(self._args.http_server_port), ['opserver.sandesh'],
logger_class=self._args.logger_class,
logger_config_file=self._args.logging_conf,
config=self._args.sandesh_config)
self._sandesh.set_logging_params(
enable_local_log=self._args.log_local,
category=self._args.log_category,
level=self._args.log_level,
file=self._args.log_file,
enable_syslog=self._args.use_syslog,
syslog_facility=self._args.syslog_facility)
ConnectionState.init(self._sandesh, self._hostname, self._moduleid,
self._instance_id,
staticmethod(ConnectionState.get_process_state_cb),
NodeStatusUVE, NodeStatus, self.table)
self._uvepartitions_state = None
# Trace buffer list
self.trace_buf = [
{'name':'DiscoveryMsg', 'size':1000}
]
# Create trace buffers
for buf in self.trace_buf:
self._sandesh.trace_buffer_create(name=buf['name'], size=buf['size'])
self._logger = self._sandesh._logger
self._get_common = self._http_get_common
self._put_common = self._http_put_common
self._delete_common = self._http_delete_common
self._post_common = self._http_post_common
self._collector_pool = None
self._state_server = OpStateServer(self._logger, self._args.redis_password)
body = gevent.queue.Queue()
self._vnc_api_client = None
if self._args.auth_conf_info.get('cloud_admin_access_only'):
self._vnc_api_client = VncCfgApiClient(self._args.auth_conf_info,
self._sandesh, self._logger)
self._uvedbstream = UveStreamer(self._logger, None, None,
self.get_agp, self._args.redis_password)
# On olders version of linux, kafka cannot be
# relied upon. DO NOT use it to serve UVEs
self._usecache = True
(PLATFORM, VERSION, EXTRA) = platform.linux_distribution()
if PLATFORM.lower() == 'ubuntu':
if VERSION.find('12.') == 0:
self._usecache = False
if PLATFORM.lower() == 'centos':
if VERSION.find('6.') == 0:
self._usecache = False
if self._args.partitions == 0:
self._usecache = False
if not self._usecache:
self._logger.error("NOT using UVE Cache")
else:
self._logger.error("Initializing UVE Cache")
self._LEVEL_LIST = []
for k in SandeshLevel._VALUES_TO_NAMES:
if (k < SandeshLevel.UT_START):
d = {}
d[k] = SandeshLevel._VALUES_TO_NAMES[k]
self._LEVEL_LIST.append(d)
self._CATEGORY_MAP =\
dict((ModuleNames[k], [CategoryNames[ce] for ce in v])
for k, v in ModuleCategoryMap.iteritems())
self.agp = {}
if self._usecache:
ConnectionState.update(conn_type = ConnectionType.UVEPARTITIONS,
name = 'UVE-Aggregation', status = ConnectionStatus.INIT)
self._uvepartitions_state = ConnectionStatus.INIT
else:
ConnectionState.update(conn_type = ConnectionType.UVEPARTITIONS,
name = 'UVE-Aggregation', status = ConnectionStatus.UP)
self._uvepartitions_state = ConnectionStatus.UP
self.redis_uve_list = []
if type(self._args.redis_uve_list) is str:
self._args.redis_uve_list = self._args.redis_uve_list.split()
ad_freq = 10
us_freq = 5
is_local = True
for redis_uve in self._args.redis_uve_list:
redis_ip_port = redis_uve.split(':')
if redis_ip_port[0] != "127.0.0.1":
is_local = False
redis_elem = (redis_ip_port[0], int(redis_ip_port[1]))
self.redis_uve_list.append(redis_elem)
if is_local:
ad_freq = 2
us_freq = 2
if self._args.zk_list:
self._ad = AnalyticsDiscovery(self._logger,
','.join(self._args.zk_list),
ANALYTICS_API_SERVER_DISCOVERY_SERVICE_NAME,
self._hostname + "-" + self._instance_id,
{ALARM_GENERATOR_SERVICE_NAME:self.disc_agp},
self._args.zk_prefix,
ad_freq)
else:
self._ad = None
if self._args.partitions != 0:
# Assume all partitions are on the 1st redis server
# and there is only one redis server
redis_ip_port = self._args.redis_uve_list[0].split(':')
assert(len(self._args.redis_uve_list) == 1)
for part in range(0,self._args.partitions):
pi = PartInfo(ip_address = redis_ip_port[0],
acq_time = UTCTimestampUsec(),
instance_id = "0",
port = int(redis_ip_port[1]))
self.agp[part] = pi
self._uve_server = UVEServer(self.redis_uve_list,
self._logger,
self._args.redis_password,
self._uvedbstream, self._usecache,
freq = us_freq)
self._state_server.update_redis_list(self.redis_uve_list)
self._analytics_links = ['uves', 'uve-types', 'tables',
'queries', 'alarms', 'uve-stream', 'alarm-stream']
self._VIRTUAL_TABLES = copy.deepcopy(_TABLES)
listmgrs = extension.ExtensionManager('contrail.analytics.alarms')
for elem in listmgrs:
self._logger.info('Loaded extensions for %s: %s doc %s' % \
(elem.name , elem.entry_point, elem.plugin.__doc__))
for t in _OBJECT_TABLES:
obj = query_table(
name=t, display_name=_OBJECT_TABLES[t].objtable_display_name,
schema=_OBJECT_TABLE_SCHEMA,
columnvalues=_OBJECT_TABLE_COLUMN_VALUES)
self._VIRTUAL_TABLES.append(obj)
stat_tables = []
# read the stat table schemas from vizd first
for t in _STAT_TABLES:
attributes = []
for attr in t.attributes:
suffixes = []
if attr.suffixes:
for suffix in attr.suffixes:
suffixes.append(suffix)
attributes.append({"name":attr.name,"datatype":attr.datatype,"index":attr.index,"suffixes":suffixes})
new_table = {"stat_type":t.stat_type,
"stat_attr":t.stat_attr,
"display_name":t.display_name,
"obj_table":t.obj_table,
"attributes":attributes}
stat_tables.append(new_table)
# read all the _stats_tables.json files for remaining stat table schema
topdir = os.path.dirname(__file__) + "/stats_schema/"
extn = '_stats_tables.json'
stat_schema_files = []
for dirpath, dirnames, files in os.walk(topdir):
for name in files:
if name.lower().endswith(extn):
stat_schema_files.append(os.path.join(dirpath, name))
for schema_file in stat_schema_files:
with open(schema_file) as data_file:
data = json.load(data_file)
for _, tables in data.iteritems():
for table in tables:
if table not in stat_tables:
stat_tables.append(table)
for table in stat_tables:
stat_id = table["stat_type"] + "." + table["stat_attr"]
scols = []
keyln = stat_query_column(name=STAT_SOURCE_FIELD, datatype='string', index=True)
scols.append(keyln)
tln = stat_query_column(name=STAT_TIME_FIELD, datatype='int', index=False)
scols.append(tln)
tcln = stat_query_column(name="CLASS(" + STAT_TIME_FIELD + ")",
datatype='int', index=False)
scols.append(tcln)
teln = stat_query_column(name=STAT_TIMEBIN_FIELD, datatype='int', index=False)
scols.append(teln)
tecln = stat_query_column(name="CLASS(" + STAT_TIMEBIN_FIELD+ ")",
datatype='int', index=False)
scols.append(tecln)
uln = stat_query_column(name=STAT_UUID_FIELD, datatype='uuid', index=False)
scols.append(uln)
cln = stat_query_column(name="COUNT(" + table["stat_attr"] + ")",
datatype='int', index=False)
scols.append(cln)
isname = False
for aln in table["attributes"]:
if aln["name"]==STAT_OBJECTID_FIELD:
isname = True
if "suffixes" in aln.keys():
aln_col = stat_query_column(name=aln["name"], datatype=aln["datatype"], index=aln["index"], suffixes=aln["suffixes"]);
else:
aln_col = stat_query_column(name=aln["name"], datatype=aln["datatype"], index=aln["index"]);
scols.append(aln_col)
if aln["datatype"] in ['int','double']:
sln = stat_query_column(name= "SUM(" + aln["name"] + ")",
datatype=aln["datatype"], index=False)
scols.append(sln)
scln = stat_query_column(name= "CLASS(" + aln["name"] + ")",
datatype=aln["datatype"], index=False)
scols.append(scln)
sln = stat_query_column(name= "MAX(" + aln["name"] + ")",
datatype=aln["datatype"], index=False)
scols.append(sln)
scln = stat_query_column(name= "MIN(" + aln["name"] + ")",
datatype=aln["datatype"], index=False)
scols.append(scln)
scln = stat_query_column(name= "PERCENTILES(" + aln["name"] + ")",
datatype='percentiles', index=False)
scols.append(scln)
scln = stat_query_column(name= "AVG(" + aln["name"] + ")",
datatype='avg', index=False)
scols.append(scln)
if not isname:
keyln = stat_query_column(name=STAT_OBJECTID_FIELD, datatype='string', index=True)
scols.append(keyln)
sch = query_schema_type(type='STAT', columns=scols)
stt = query_table(
name = STAT_VT_PREFIX + "." + stat_id,
display_name = table["display_name"],
schema = sch,
columnvalues = [STAT_OBJECTID_FIELD, SOURCE])
self._VIRTUAL_TABLES.append(stt)
self._analytics_db = AnalyticsDb(self._logger,
self._args.cassandra_server_list,
self._args.redis_query_port,
self._args.redis_password,
self._args.cassandra_user,
self._args.cassandra_password,
self._args.cluster_id)
bottle.route('/', 'GET', self.homepage_http_get)
bottle.route('/analytics', 'GET', self.analytics_http_get)
bottle.route('/analytics/uves', 'GET', self.uves_http_get)
bottle.route('/analytics/uve-types', 'GET', self.uve_types_http_get)
bottle.route('/analytics/alarms/acknowledge', 'POST',
self.alarms_ack_http_post)
bottle.route('/analytics/query', 'POST', self.query_process)
bottle.route(
'/analytics/query/<queryId>', 'GET', self.query_status_get)
bottle.route('/analytics/query/<queryId>/chunk-final/<chunkId>',
'GET', self.query_chunk_get)
bottle.route('/analytics/queries', 'GET', self.show_queries)
bottle.route('/analytics/tables', 'GET', self.tables_process)
bottle.route('/analytics/operation/database-purge',
'POST', self.process_purge_request)
bottle.route('/analytics/operation/analytics-data-start-time',
'GET', self._get_analytics_data_start_time)
bottle.route('/analytics/table/<table>', 'GET', self.table_process)
bottle.route('/analytics/table/<table>/schema',
'GET', self.table_schema_process)
for i in range(0, len(self._VIRTUAL_TABLES)):
if len(self._VIRTUAL_TABLES[i].columnvalues) > 0:
bottle.route('/analytics/table/<table>/column-values',
'GET', self.column_values_process)
bottle.route('/analytics/table/<table>/column-values/<column>',
'GET', self.column_process)
bottle.route('/analytics/send-tracebuffer/<source>/<module>/<instance_id>/<name>',
'GET', self.send_trace_buffer)
bottle.route('/doc-style.css', 'GET',
self.documentation_messages_css_get)
bottle.route('/documentation/messages/<module>',
'GET', self.documentation_messages_http_get)
bottle.route('/documentation/messages/<module>/<sfilename>',
'GET', self.documentation_messages_http_get)
bottle.route('/documentation/<filename:path>',
'GET', self.documentation_http_get)
bottle.route('/analytics/uve-stream', 'GET', self.uve_stream)
bottle.route('/analytics/alarm-stream', 'GET', self.alarm_stream)
bottle.route('/analytics/uves/<tables>', 'GET', self.dyn_list_http_get)
bottle.route('/analytics/uves/<table>/<name:path>', 'GET', self.dyn_http_get)
bottle.route('/analytics/uves/<tables>', 'POST', self.dyn_http_post)
bottle.route('/analytics/alarms', 'GET', self.alarms_http_get)
# start gevent to monitor disk usage and automatically purge
if (self._args.auto_db_purge):
self.gevs.append(gevent.spawn(self._auto_purge))
# end __init__
def _parse_args(self, args_str=' '.join(sys.argv[1:])):
'''
Eg. python opserver.py --host_ip 127.0.0.1
--redis_query_port 6379
--redis_password
--collectors 127.0.0.1:8086
--cassandra_server_list 127.0.0.1:9160
--http_server_port 8090
--rest_api_port 8081
--rest_api_ip 0.0.0.0
--log_local
--log_level SYS_DEBUG
--log_category test
--log_file <stdout>
--use_syslog
--syslog_facility LOG_USER
--worker_id 0
--partitions 15
--zk_list 127.0.0.1:2181
--redis_uve_list 127.0.0.1:6379
--auto_db_purge
--zk_list 127.0.0.1:2181
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file", action='append',
help="Specify config file", metavar="FILE",
default=ServicesDefaultConfigurationFiles.get(
SERVICE_OPSERVER, None))
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'host_ip' : "127.0.0.1",
'collectors' : None,
'cassandra_server_list' : ['127.0.0.1:9160'],
'http_server_port' : 8090,
'rest_api_port' : 8081,
'rest_api_ip' : '0.0.0.0',
'log_local' : False,
'log_level' : 'SYS_DEBUG',
'log_category' : '',
'log_file' : Sandesh._DEFAULT_LOG_FILE,
'use_syslog' : False,
'syslog_facility' : Sandesh._DEFAULT_SYSLOG_FACILITY,
'dup' : False,
'auto_db_purge' : True,
'db_purge_threshold' : 70,
'db_purge_level' : 40,
'analytics_data_ttl' : 48,
'analytics_config_audit_ttl' : -1,
'analytics_statistics_ttl' : -1,
'analytics_flow_ttl' : -1,
'logging_conf': '',
'logger_class': None,
'partitions' : 15,
'zk_list' : None,
'zk_prefix' : '',
'sandesh_send_rate_limit': SandeshSystem. \
get_sandesh_send_rate_limit(),
'aaa_mode' : AAA_MODE_CLOUD_ADMIN,
'api_server' : '127.0.0.1:8082',
'admin_port' : OpServerAdminPort,
'cloud_admin_role' : CLOUD_ADMIN_ROLE,
'api_server_use_ssl': False,
}
redis_opts = {
'redis_query_port' : 6379,
'redis_password' : None,
'redis_uve_list' : ['127.0.0.1:6379'],
}
database_opts = {
'cluster_id' : '',
}
cassandra_opts = {
'cassandra_user' : None,
'cassandra_password' : None,
}
keystone_opts = {
'auth_host': '127.0.0.1',
'auth_protocol': 'http',
'auth_port': 35357,
'admin_user': 'admin',
'admin_password': 'contrail123',
'admin_tenant_name': 'default-domain'
}
sandesh_opts = {
'sandesh_keyfile': '/etc/contrail/ssl/private/server-privkey.pem',
'sandesh_certfile': '/etc/contrail/ssl/certs/server.pem',
'sandesh_ca_cert': '/etc/contrail/ssl/certs/ca-cert.pem',
'sandesh_ssl_enable': False,
'introspect_ssl_enable': False
}
# read contrail-analytics-api own conf file
config = None
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read(args.conf_file)
if 'DEFAULTS' in config.sections():
defaults.update(dict(config.items("DEFAULTS")))
if 'REDIS' in config.sections():
redis_opts.update(dict(config.items('REDIS')))
if 'CASSANDRA' in config.sections():
cassandra_opts.update(dict(config.items('CASSANDRA')))
if 'KEYSTONE' in config.sections():
keystone_opts.update(dict(config.items('KEYSTONE')))
if 'SANDESH' in config.sections():
sandesh_opts.update(dict(config.items('SANDESH')))
if 'sandesh_ssl_enable' in config.options('SANDESH'):
sandesh_opts['sandesh_ssl_enable'] = config.getboolean(
'SANDESH', 'sandesh_ssl_enable')
if 'introspect_ssl_enable' in config.options('SANDESH'):
sandesh_opts['introspect_ssl_enable'] = config.getboolean(
'SANDESH', 'introspect_ssl_enable')
if 'DATABASE' in config.sections():
database_opts.update(dict(config.items('DATABASE')))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
defaults.update(redis_opts)
defaults.update(cassandra_opts)
defaults.update(database_opts)
defaults.update(keystone_opts)
defaults.update(sandesh_opts)
parser.set_defaults(**defaults)
parser.add_argument("--host_ip",
help="Host IP address")
parser.add_argument("--redis_server_port",
type=int,
help="Redis server port")
parser.add_argument("--redis_query_port",
type=int,
help="Redis query port")
parser.add_argument("--redis_password",
help="Redis server password")
parser.add_argument("--collectors",
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument("--http_server_port",
type=int,
help="HTTP server port")
parser.add_argument("--rest_api_port",
type=int,
help="REST API port")
parser.add_argument("--rest_api_ip",
help="REST API IP address")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument("--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--use_syslog",
action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--dup", action="store_true",
help="Internal use")
parser.add_argument("--redis_uve_list",
help="List of redis-uve in ip:port format. For internal use only",
nargs="+")
parser.add_argument(
"--worker_id",
help="Worker Id")
parser.add_argument("--cassandra_server_list",
help="List of cassandra_server_ip in ip:port format",
nargs="+")
parser.add_argument("--auto_db_purge", action="store_true",
help="Automatically purge database if disk usage cross threshold")
parser.add_argument(
"--logging_conf",
help=("Optional logging configuration file, default: None"))
parser.add_argument(
"--logger_class",
help=("Optional external logger class, default: None"))
parser.add_argument("--cluster_id",
help="Analytics Cluster Id")
parser.add_argument("--cassandra_user",
help="Cassandra user name")
parser.add_argument("--cassandra_password",
help="Cassandra password")
parser.add_argument("--partitions", type=int,
help="Number of partitions for hashing UVE keys")
parser.add_argument("--zk_list",
help="List of zookeepers in ip:port format",
nargs="+")
parser.add_argument("--zk_prefix",
help="System Prefix for zookeeper")
parser.add_argument("--sandesh_send_rate_limit", type=int,
help="Sandesh send rate limit in messages/sec")
parser.add_argument("--cloud_admin_role",
help="Name of cloud-admin role")
parser.add_argument("--aaa_mode", choices=APIAAAModes,
help="AAA mode")
parser.add_argument("--auth_host",
help="IP address of keystone server")
parser.add_argument("--auth_protocol",
help="Keystone authentication protocol")
parser.add_argument("--auth_port", type=int,
help="Keystone server port")
parser.add_argument("--admin_user",
help="Name of keystone admin user")
parser.add_argument("--admin_password",
help="Password of keystone admin user")
parser.add_argument("--admin_tenant_name",
help="Tenant name for keystone admin user")
parser.add_argument("--api_server",
help="Address of VNC API server in ip:port format")
parser.add_argument("--admin_port",
help="Port with local auth for admin access")
parser.add_argument("--api_server_use_ssl",
help="Use SSL to connect with API server")
parser.add_argument("--sandesh_keyfile",
help="Sandesh ssl private key")
parser.add_argument("--sandesh_certfile",
help="Sandesh ssl certificate")
parser.add_argument("--sandesh_ca_cert",
help="Sandesh CA ssl certificate")
parser.add_argument("--sandesh_ssl_enable", action="store_true",
help="Enable ssl for sandesh connection")
parser.add_argument("--introspect_ssl_enable", action="store_true",
help="Enable ssl for introspect connection")
self._args = parser.parse_args(remaining_argv)
if type(self._args.collectors) is str:
self._args.collectors = self._args.collectors.split()
if type(self._args.redis_uve_list) is str:
self._args.redis_uve_list = self._args.redis_uve_list.split()
if type(self._args.cassandra_server_list) is str:
self._args.cassandra_server_list = self._args.cassandra_server_list.split()
if type(self._args.zk_list) is str:
self._args.zk_list= self._args.zk_list.split()
auth_conf_info = {}
auth_conf_info['admin_user'] = self._args.admin_user
auth_conf_info['admin_password'] = self._args.admin_password
auth_conf_info['admin_tenant_name'] = self._args.admin_tenant_name
auth_conf_info['auth_protocol'] = self._args.auth_protocol
auth_conf_info['auth_host'] = self._args.auth_host
auth_conf_info['auth_port'] = self._args.auth_port
auth_conf_info['auth_uri'] = '%s://%s:%d' % (self._args.auth_protocol,
self._args.auth_host, self._args.auth_port)
auth_conf_info['api_server_use_ssl'] = self._args.api_server_use_ssl
auth_conf_info['cloud_admin_access_only'] = \
False if self._args.aaa_mode == AAA_MODE_NO_AUTH else True
auth_conf_info['cloud_admin_role'] = self._args.cloud_admin_role
auth_conf_info['admin_port'] = self._args.admin_port
api_server_info = self._args.api_server.split(':')
auth_conf_info['api_server_ip'] = api_server_info[0]
auth_conf_info['api_server_port'] = int(api_server_info[1])
self._args.auth_conf_info = auth_conf_info
self._args.conf_file = args.conf_file
self._args.sandesh_config = SandeshConfig(self._args.sandesh_keyfile,
self._args.sandesh_certfile, self._args.sandesh_ca_cert,
self._args.sandesh_ssl_enable, self._args.introspect_ssl_enable)
# end _parse_args
def get_args(self):
return self._args
# end get_args
def get_http_server_port(self):
return int(self._args.http_server_port)
# end get_http_server_port
def get_uve_server(self):
return self._uve_server
# end get_uve_server
def homepage_http_get(self):
json_body = {}
json_links = []
base_url = bottle.request.urlparts.scheme + \
'://' + bottle.request.urlparts.netloc
for link in self._homepage_links:
json_links.append(
{'link': obj_to_dict(
LinkObject(link.name, base_url + link.href))})
json_body = \
{"href": base_url,
"links": json_links
}
return json_body
# end homepage_http_get
def cleanup_uve_streamer(self, gv):
self.gevs.remove(gv)
def _serve_streams(self, alarmsonly):
req = bottle.request.query
try:
filters = OpServer._uve_filter_set(req)
except Exception as e:
return bottle.HTTPError(_ERRORS[errno.EBADMSG], e)
if alarmsonly:
filters['cfilt'] = {'UVEAlarms':set()}
kfilter = filters.get('kfilt')
patterns = None
if kfilter is not None:
patterns = set()
for filt in kfilter:
patterns.add(self._uve_server.get_uve_regex(filt))
bottle.response.set_header('Content-Type', 'text/event-stream')
bottle.response.set_header('Cache-Control', 'no-cache')
# This is needed to detect when the client hangs up
rfile = bottle.request.environ['wsgi.input'].rfile
body = gevent.queue.Queue()
ph = UveStreamer(self._logger, body, rfile, self.get_agp,
self._args.redis_password,
filters['tablefilt'], filters['cfilt'], patterns)
ph.set_cleanup_callback(self.cleanup_uve_streamer)
self.gevs.append(ph)
ph.start()
return body
@validate_user_token
def uve_stream(self):
return self._serve_streams(False)
@validate_user_token
def alarm_stream(self):
return self._serve_streams(True)
def documentation_http_get(self, filename):
return bottle.static_file(
filename, root='/usr/share/doc/contrail-analytics-api/html')
# end documentation_http_get
def documentation_messages_http_get(self, module, sfilename=None):
filename = module
if sfilename:
filename = module + '/' + sfilename
return bottle.static_file(
filename, root='/usr/share/doc/contrail-docs/html/messages')
# end documentation_messages_http_get
def documentation_messages_css_get(self):
return bottle.static_file('/doc-style.css',
root='/usr/share/doc/contrail-docs/html/messages')
# end documentation_messages_css_get
def _http_get_common(self, request):
return (True, '')
# end _http_get_common
def _http_put_common(self, request, obj_dict):
return (True, '')
# end _http_put_common
def _http_delete_common(self, request, id):
return (True, '')
# end _http_delete_common
def _http_post_common(self, request, obj_dict):
return (True, '')
# end _http_post_common
@staticmethod
def _get_redis_query_ip_from_qid(qid):
try:
ip = qid.rsplit('-', 1)[1]
redis_ip = socket.inet_ntop(socket.AF_INET,
struct.pack('>I', int(ip, 16)))
except Exception as err:
return None
return redis_ip
# end _get_redis_query_ip_from_qid
def _query_status(self, request, qid):
resp = {}
redis_query_ip = OpServer._get_redis_query_ip_from_qid(qid)
if redis_query_ip is None:
return bottle.HTTPError(_ERRORS[errno.EINVAL],
'Invalid query id')
try:
resp = redis_query_status(host=redis_query_ip,
port=int(self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=qid)
except redis.exceptions.ConnectionError:
return bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
self._logger.error("Exception: %s" % e)
return bottle.HTTPError(_ERRORS[errno.EIO], 'Error: %s' % e)
else:
if resp is None:
return bottle.HTTPError(_ERRORS[errno.ENOENT],
'Invalid query id or Abandoned query id')
resp_header = {'Content-Type': 'application/json'}
resp_code = 200
self._logger.debug("query [%s] status: %s" % (qid, resp))
return bottle.HTTPResponse(
json.dumps(resp), resp_code, resp_header)
# end _query_status
def _query_chunk(self, request, qid, chunk_id):
redis_query_ip = OpServer._get_redis_query_ip_from_qid(qid)
if redis_query_ip is None:
yield bottle.HTTPError(_ERRORS[errno.EINVAL],
'Invalid query id')
try:
done = False
gen = redis_query_chunk(host=redis_query_ip,
port=int(self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=qid, chunk_id=chunk_id)
bottle.response.set_header('Content-Type', 'application/json')
while not done:
try:
yield gen.next()
except StopIteration:
done = True
except redis.exceptions.ConnectionError:
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
self._logger.error("Exception: %s" % str(e))
yield bottle.HTTPError(_ERRORS[errno.ENOENT], 'Error: %s' % e)
else:
self._logger.info(
"Query [%s] chunk #%d read at time %d"
% (qid, chunk_id, time.time()))
# end _query_chunk
def _is_valid_stats_table_query(self, request, tabn):
isT_ = False
isT = False
for key, value in request.iteritems():
if key == "select_fields":
for select_field in value:
if select_field == STAT_TIME_FIELD:
isT = True
elif select_field.find(STAT_TIMEBIN_FIELD) == 0:
isT_ = True
else:
agg_field = select_field.split('(')
if len(agg_field) == 2:
oper = agg_field[0]
field = agg_field[1].split(')')[0]
if oper != "COUNT":
if field == STAT_TIME_FIELD:
isT = True
elif field == STAT_TIMEBIN_FIELD:
isT_ = True
else:
field_found = False
for column in self._VIRTUAL_TABLES[tabn].schema.columns:
if column.name == field:
if column.datatype != "":
field_found = True
if field_found == False:
reply = bottle.HTTPError(_ERRORS[errno.EINVAL], \
'Unknown field %s' %field)
return reply
elif field.split('.')[-1] != \
self._VIRTUAL_TABLES[tabn].name.split('.')[-1]:
reply = bottle.HTTPError(_ERRORS[errno.EINVAL], \
'Invalid COUNT field %s' %field)
return reply
elif len(agg_field) == 1:
field_found = False
for column in self._VIRTUAL_TABLES[tabn].schema.columns:
if column.name == select_field:
if column.datatype != "":
field_found = True
if field_found == False:
reply = bottle.HTTPError(_ERRORS[errno.EINVAL], \
'Invalid field %s' %select_field)
return reply
if isT and isT_:
reply = bottle.HTTPError(_ERRORS[errno.EINVAL], \
"Stats query cannot have both T and T=")
return reply
return None
# end _is_valid_stats_table_query
def _query(self, request):
reply = {}
try:
redis_query_ip, = struct.unpack('>I', socket.inet_pton(
socket.AF_INET, self._args.host_ip))
qid = str(uuid.uuid1(redis_query_ip))
self._logger.info('Received Query: %s' % (str(request.json)))
self._logger.info("Starting Query %s" % qid)
tabl = ""
for key, value in request.json.iteritems():
if key == "table":
tabl = value
self._logger.info("Table is " + tabl)
tabn = None
for i in range(0, len(self._VIRTUAL_TABLES)):
if self._VIRTUAL_TABLES[i].name == tabl:
tabn = i
if (tabn is not None) and (tabl.find("StatTable") == 0):
query_err = self._is_valid_stats_table_query(request.json, tabn)
if query_err is not None:
yield query_err
return
if (tabn is not None):
tabtypes = {}
for cols in self._VIRTUAL_TABLES[tabn].schema.columns:
if cols.datatype in ['long', 'int']:
tabtypes[cols.name] = 'int'
elif cols.datatype in ['ipv4']:
tabtypes[cols.name] = 'ipv4'
else:
tabtypes[cols.name] = 'string'
self._logger.info(str(tabtypes))
if (tabn is None):
if not tabl.startswith("StatTable."):
tables = self._uve_server.get_tables()
if not tabl in tables:
reply = bottle.HTTPError(_ERRORS[errno.ENOENT],
'Table %s not found' % tabl)
yield reply
return
else:
self._logger.info("Schema not known for dynamic table %s" % tabl)
if tabl == OVERLAY_TO_UNDERLAY_FLOW_MAP:
overlay_to_underlay_map = OverlayToUnderlayMapper(
request.json, 'localhost',
self._args.auth_conf_info['admin_port'],
self._args.auth_conf_info['admin_user'],
self._args.auth_conf_info['admin_password'], self._logger)
try:
yield overlay_to_underlay_map.process_query()
except OverlayToUnderlayMapperError as e:
yield bottle.HTTPError(_ERRORS[errno.EIO], str(e))
return
prg = redis_query_start('127.0.0.1',
int(self._args.redis_query_port),
self._args.redis_password,
qid, request.json,
self._VIRTUAL_TABLES[tabn].schema.columns
if tabn else None)
if prg is None:
self._logger.error('QE Not Responding')
yield bottle.HTTPError(_ERRORS[errno.EBUSY],
'Query Engine is not responding')
return
except redis.exceptions.ConnectionError:
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
self._logger.error("Exception: %s" % str(e))
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Error: %s' % e)
else:
redish = None
if prg < 0:
cod = -prg
self._logger.error(
"Query Failed. Found Error %s" % errno.errorcode[cod])
reply = bottle.HTTPError(_ERRORS[cod], errno.errorcode[cod])
yield reply
else:
self._logger.info(
"Query Accepted at time %d , Progress %d"
% (time.time(), prg))
# In Async mode, we should return with "202 Accepted" here
# and also give back the status URI "/analytic/query/<qid>"
# OpServers's client will poll the status URI
if request.get_header('Expect') == '202-accepted' or\
request.get_header('Postman-Expect') == '202-accepted':
href = '/analytics/query/%s' % (qid)
resp_data = json.dumps({'href': href})
yield bottle.HTTPResponse(
resp_data, 202, {'Content-type': 'application/json'})
else:
for gen in self._sync_query(request, qid):
yield gen
# end _query
def _sync_query(self, request, qid):
# In Sync mode, Keep polling query status until final result is
# available
try:
self._logger.info("Polling %s for query result" % ("REPLY:" + qid))
prg = 0
done = False
while not done:
gevent.sleep(1)
resp = redis_query_status(host='127.0.0.1',
port=int(
self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=qid)
# We want to print progress only if it has changed
if int(resp["progress"]) == prg:
continue
self._logger.info(
"Query Progress is %s time %d" % (str(resp), time.time()))
prg = int(resp["progress"])
# Either there was an error, or the query is complete
if (prg < 0) or (prg == 100):
done = True
if prg < 0:
cod = -prg
self._logger.error("Found Error %s" % errno.errorcode[cod])
reply = bottle.HTTPError(_ERRORS[cod], errno.errorcode[cod])
yield reply
return
# In Sync mode, its time to read the final result. Status is in
# "resp"
done = False
gen = redis_query_result(host='127.0.0.1',
port=int(self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=qid)
bottle.response.set_header('Content-Type', 'application/json')
while not done:
try:
yield gen.next()
except StopIteration:
done = True
'''
final_res = {}
prg, final_res['value'] =\
redis_query_result_dict(host=self._args.redis_server_ip,
port=int(self._args.redis_query_port),
qid=qid)
yield json.dumps(final_res)
'''
except redis.exceptions.ConnectionError:
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
self._logger.error("Exception: %s" % str(e))
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Error: %s' % e)
else:
self._logger.info(
"Query Result available at time %d" % time.time())
return
# end _sync_query
@validate_user_token
def query_process(self):
self._post_common(bottle.request, None)
result = self._query(bottle.request)
return result
# end query_process
@validate_user_token
def query_status_get(self, queryId):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
return self._query_status(bottle.request, queryId)
# end query_status_get
def query_chunk_get(self, queryId, chunkId):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
return self._query_chunk(bottle.request, queryId, int(chunkId))
# end query_chunk_get
@validate_user_token
def show_queries(self):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
queries = {}
try:
redish = redis.StrictRedis(db=0, host='127.0.0.1',
port=int(self._args.redis_query_port),
password=self._args.redis_password)
pending_queries = redish.lrange('QUERYQ', 0, -1)
pending_queries_info = []
for query_id in pending_queries:
query_data = redis_query_info(redish, query_id)
pending_queries_info.append(query_data)
queries['pending_queries'] = pending_queries_info
processing_queries = redish.lrange(
'ENGINE:' + socket.gethostname(), 0, -1)
processing_queries_info = []
abandoned_queries_info = []
error_queries_info = []
for query_id in processing_queries:
status = redis_query_status(host='127.0.0.1',
port=int(
self._args.redis_query_port),
redis_password=self._args.redis_password,
qid=query_id)
query_data = redis_query_info(redish, query_id)
if status is None:
abandoned_queries_info.append(query_data)
elif status['progress'] < 0:
query_data['error_code'] = status['progress']
error_queries_info.append(query_data)
else:
query_data['progress'] = status['progress']
processing_queries_info.append(query_data)
queries['queries_being_processed'] = processing_queries_info
queries['abandoned_queries'] = abandoned_queries_info
queries['error_queries'] = error_queries_info
except redis.exceptions.ConnectionError:
return bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as err:
self._logger.error("Exception in show queries: %s" % str(err))
return bottle.HTTPError(_ERRORS[errno.EIO], 'Error: %s' % err)
else:
bottle.response.set_header('Content-Type', 'application/json')
return json.dumps(queries)
# end show_queries
@staticmethod
def _get_tfilter(cfilt):
tfilter = {}
for tfilt in cfilt:
afilt = tfilt.split(':')
try:
attr_list = tfilter[afilt[0]]
except KeyError:
tfilter[afilt[0]] = set()
attr_list = tfilter[afilt[0]]
finally:
if len(afilt) > 1:
attr_list.add(afilt[1])
tfilter[afilt[0]] = attr_list
return tfilter
# end _get_tfilter
@staticmethod
def _uve_filter_set(req):
filters = {}
filters['sfilt'] = req.get('sfilt')
filters['mfilt'] = req.get('mfilt')
if req.get('tablefilt'):
infos = req['tablefilt'].split(',')
filters['tablefilt'] = []
for tf in infos:
if tf and tf in UVE_MAP:
filters['tablefilt'].append(UVE_MAP[tf])
else:
filters['tablefilt'].append(tf)
else:
filters['tablefilt'] = None
if req.get('cfilt'):
infos = req['cfilt'].split(',')
filters['cfilt'] = OpServer._get_tfilter(infos)
else:
filters['cfilt'] = None
if req.get('kfilt'):
filters['kfilt'] = req['kfilt'].split(',')
else:
filters['kfilt'] = None
filters['ackfilt'] = req.get('ackfilt')
if filters['ackfilt'] is not None:
if filters['ackfilt'] != 'true' and filters['ackfilt'] != 'false':
raise ValueError('Invalid ackfilt. ackfilt must be true|false')
return filters
# end _uve_filter_set
@staticmethod
def _uve_http_post_filter_set(req):
filters = {}
try:
filters['kfilt'] = req['kfilt']
if not isinstance(filters['kfilt'], list):
raise ValueError('Invalid kfilt')
except KeyError:
filters['kfilt'] = ['*']
filters['sfilt'] = req.get('sfilt')
filters['mfilt'] = req.get('mfilt')
try:
cfilt = req['cfilt']
if not isinstance(cfilt, list):
raise ValueError('Invalid cfilt')
except KeyError:
filters['cfilt'] = None
else:
filters['cfilt'] = OpServer._get_tfilter(cfilt)
try:
ackfilt = req['ackfilt']
except KeyError:
filters['ackfilt'] = None
else:
if not isinstance(ackfilt, bool):
raise ValueError('Invalid ackfilt. ackfilt must be bool')
filters['ackfilt'] = 'true' if ackfilt else 'false'
return filters
# end _uve_http_post_filter_set
@validate_user_token
def dyn_http_post(self, tables):
(ok, result) = self._post_common(bottle.request, None)
base_url = bottle.request.urlparts.scheme + \
'://' + bottle.request.urlparts.netloc
if not ok:
(code, msg) = result
bottle.abort(code, msg)
uve_type = tables
uve_tbl = uve_type
if uve_type in UVE_MAP:
uve_tbl = UVE_MAP[uve_type]
try:
req = bottle.request.json
filters = OpServer._uve_http_post_filter_set(req)
except Exception as err:
yield bottle.HTTPError(_ERRORS[errno.EBADMSG], err)
else:
stats = AnalyticsApiStatistics(self._sandesh, uve_type)
bottle.response.set_header('Content-Type', 'application/json')
yield u'{"value": ['
first = True
num = 0
byt = 0
for key in filters['kfilt']:
if key.find('*') != -1:
for gen in self._uve_server.multi_uve_get(uve_tbl, True,
filters,
base_url):
dp = json.dumps(gen)
byt += len(dp)
if first:
yield u'' + dp
first = False
else:
yield u', ' + dp
num += 1
stats.collect(num,byt)
stats.sendwith()
yield u']}'
return
first = True
for key in filters['kfilt']:
uve_name = uve_tbl + ':' + key
_, rsp = self._uve_server.get_uve(uve_name, True, filters,
base_url=base_url)
num += 1
if rsp != {}:
data = {'name': key, 'value': rsp}
dp = json.dumps(data)
byt += len(dp)
if first:
yield u'' + dp
first = False
else:
yield u', ' + dp
stats.collect(num,byt)
stats.sendwith()
yield u']}'
# end _uve_alarm_http_post
@validate_user_token
def dyn_http_get(self, table, name):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
base_url = bottle.request.urlparts.scheme + \
'://' + bottle.request.urlparts.netloc
if not ok:
(code, msg) = result
bottle.abort(code, msg)
uve_tbl = table
if table in UVE_MAP:
uve_tbl = UVE_MAP[table]
bottle.response.set_header('Content-Type', 'application/json')
uve_name = uve_tbl + ':' + name
req = bottle.request.query
try:
filters = OpServer._uve_filter_set(req)
except Exception as e:
yield bottle.HTTPError(_ERRORS[errno.EBADMSG], e)
flat = False
if 'flat' in req.keys() or any(filters.values()):
flat = True
stats = AnalyticsApiStatistics(self._sandesh, table)
uve_name = uve_tbl + ':' + name
if name.find('*') != -1:
flat = True
yield u'{"value": ['
first = True
if filters['kfilt'] is None:
filters['kfilt'] = [name]
num = 0
byt = 0
for gen in self._uve_server.multi_uve_get(uve_tbl, flat,
filters, base_url):
dp = json.dumps(gen)
byt += len(dp)
if first:
yield u'' + dp
first = False
else:
yield u', ' + dp
num += 1
stats.collect(num,byt)
stats.sendwith()
yield u']}'
else:
_, rsp = self._uve_server.get_uve(uve_name, flat, filters,
base_url=base_url)
dp = json.dumps(rsp)
stats.collect(1, len(dp))
stats.sendwith()
yield dp
# end dyn_http_get
@validate_user_token
def alarms_http_get(self):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
bottle.response.set_header('Content-Type', 'application/json')
req = bottle.request.query
try:
filters = OpServer._uve_filter_set(req)
except Exception as e:
return bottle.HTTPError(_ERRORS[errno.EBADMSG], e)
else:
filters['cfilt'] = { 'UVEAlarms':set() }
alarm_list = self._uve_server.get_alarms(filters)
alms = {}
for ak,av in alarm_list.iteritems():
alm_type = ak
if ak in _OBJECT_TABLES:
alm_type = _OBJECT_TABLES[ak].log_query_name
ulist = []
for uk, uv in av.iteritems():
ulist.append({'name':uk, 'value':uv})
alms[alm_type ] = ulist
if self._uvepartitions_state == ConnectionStatus.UP:
return json.dumps(alms)
else:
return bottle.HTTPError(_ERRORS[errno.EIO],json.dumps(alms))
# end alarms_http_get
@validate_user_token
def dyn_list_http_get(self, tables):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
arg_line = bottle.request.url.rsplit('/', 1)[1]
uve_args = arg_line.split('?')
uve_type = tables[:-1]
if len(uve_args) != 1:
uve_filters = ''
filters = uve_args[1].split('&')
filters = \
[filt for filt in filters if filt[:len('kfilt')] != 'kfilt']
if len(filters):
uve_filters = '&'.join(filters)
else:
uve_filters = 'flat'
else:
uve_filters = 'flat'
bottle.response.set_header('Content-Type', 'application/json')
uve_tbl = uve_type
if uve_type in UVE_MAP:
uve_tbl = UVE_MAP[uve_type]
req = bottle.request.query
try:
filters = OpServer._uve_filter_set(req)
except Exception as e:
return bottle.HTTPError(_ERRORS[errno.EBADMSG], e)
else:
uve_list = self._uve_server.get_uve_list(
uve_tbl, filters, True)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + \
'/analytics/uves/%s/' % (uve_type)
uve_links =\
[obj_to_dict(LinkObject(uve,
base_url + uve + "?" + uve_filters))
for uve in uve_list]
return json.dumps(uve_links)
# end dyn_list_http_get
@validate_user_token
def analytics_http_get(self):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/'
analytics_links = [obj_to_dict(LinkObject(link, base_url + link))
for link in self._analytics_links]
bottle.response.set_header('Content-Type', 'application/json')
return json.dumps(analytics_links)
# end analytics_http_get
@validate_user_token
def uves_http_get(self):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/uves/'
uvetype_links = []
# Show the list of UVE table-types based on actual raw UVE contents
tables = self._uve_server.get_tables()
known = set()
for apiname,rawname in UVE_MAP.iteritems():
known.add(rawname)
entry = obj_to_dict(LinkObject(apiname + 's',
base_url + apiname + 's'))
uvetype_links.append(entry)
for rawname in tables:
if not rawname in known:
entry = obj_to_dict(LinkObject(rawname + 's',
base_url + rawname + 's'))
uvetype_links.append(entry)
bottle.response.set_header('Content-Type', 'application/json')
if self._uvepartitions_state == ConnectionStatus.UP:
return json.dumps(uvetype_links)
else:
return bottle.HTTPError(_ERRORS[errno.EIO],json.dumps(uvetype_links))
# end _uves_http_get
@validate_user_token
def uve_types_http_get(self):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
uve_types = {}
for name, info in _OBJECT_TABLES.iteritems():
if info.is_uve:
uve_types[info.log_query_name] = {
'global_system_object': info.global_system_object}
bottle.response.set_header('Content-Type', 'application/json')
return json.dumps(uve_types)
# end uve_types_http_get
@validate_user_token
def alarms_ack_http_post(self):
self._post_common(bottle.request, None)
if ('application/json' not in bottle.request.headers['Content-Type']):
self._logger.error('Content-type is not JSON')
return bottle.HTTPError(_ERRORS[errno.EBADMSG],
'Content-Type must be JSON')
self._logger.info('Alarm Acknowledge request: %s' %
(bottle.request.json))
alarm_ack_fields = set(['table', 'name', 'type', 'token'])
bottle_req_fields = set(bottle.request.json.keys())
if len(alarm_ack_fields - bottle_req_fields):
return bottle.HTTPError(_ERRORS[errno.EINVAL],
'Alarm acknowledge request does not contain the fields '
'{%s}' % (', '.join(alarm_ack_fields - bottle_req_fields)))
try:
table = UVE_MAP[bottle.request.json['table']]
except KeyError:
# If the table name is not present in the UVE_MAP, then
# send the raw table name to the generator.
table = bottle.request.json['table']
# Decode generator ip, introspect port and timestamp from the
# the token field.
try:
token = json.loads(base64.b64decode(bottle.request.json['token']))
except (TypeError, ValueError):
self._logger.error('Alarm Ack Request: Failed to decode "token"')
return bottle.HTTPError(_ERRORS[errno.EINVAL],
'Failed to decode "token"')
exp_token_fields = set(['host_ip', 'http_port', 'timestamp'])
actual_token_fields = set(token.keys())
if len(exp_token_fields - actual_token_fields):
self._logger.error('Alarm Ack Request: Invalid token value')
return bottle.HTTPError(_ERRORS[errno.EINVAL],
'Invalid token value')
generator_introspect = GeneratorIntrospectUtil(token['host_ip'],
token['http_port'], self._args.sandesh_config)
try:
res = generator_introspect.send_alarm_ack_request(
table, bottle.request.json['name'],
bottle.request.json['type'], token['timestamp'])
except Exception as e:
self._logger.error('Alarm Ack Request: Introspect request failed')
return bottle.HTTPError(_ERRORS[errno.EBUSY],
'Failed to process the Alarm Ack Request')
self._logger.debug('Alarm Ack Response: %s' % (res))
if res['status'] == 'false':
return bottle.HTTPError(_ERRORS[errno.EIO], res['error_msg'])
self._logger.info('Alarm Ack Request successfully processed')
return bottle.HTTPResponse(status=200)
# end alarms_ack_http_post
@validate_user_token
def send_trace_buffer(self, source, module, instance_id, name):
response = {}
trace_req = SandeshTraceRequest(name)
if module not in ModuleIds:
response['status'] = 'fail'
response['error'] = 'Invalid module'
return json.dumps(response)
module_id = ModuleIds[module]
node_type = Module2NodeType[module_id]
node_type_name = NodeTypeNames[node_type]
if self._state_server.redis_publish(msg_type='send-tracebuffer',
destination=source + ':' +
node_type_name + ':' + module +
':' + instance_id,
msg=trace_req):
response['status'] = 'pass'
else:
response['status'] = 'fail'
response['error'] = 'No connection to Redis'
return json.dumps(response)
# end send_trace_buffer
@validate_user_token
def tables_process(self):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/table/'
json_links = []
known = set()
for i in range(0, len(self._VIRTUAL_TABLES)):
known.add(self._VIRTUAL_TABLES[i].name)
link = LinkObject(self._VIRTUAL_TABLES[
i].name, base_url + self._VIRTUAL_TABLES[i].name)
tbl_info = obj_to_dict(link)
tbl_info['type'] = self._VIRTUAL_TABLES[i].schema.type
if (self._VIRTUAL_TABLES[i].display_name is not None):
tbl_info['display_name'] =\
self._VIRTUAL_TABLES[i].display_name
json_links.append(tbl_info)
# Show the list of UVE table-types based on actual raw UVE contents
tables = self._uve_server.get_tables()
for rawname in tables:
if not rawname in known:
link = LinkObject(rawname, base_url + rawname)
tbl_info = obj_to_dict(link)
tbl_info['type'] = 'OBJECT'
tbl_info['display_name'] = rawname
json_links.append(tbl_info)
bottle.response.set_header('Content-Type', 'application/json')
return json.dumps(json_links)
# end tables_process
def get_purge_cutoff(self, purge_input, start_times):
# currently not use analytics start time
# purge_input is assumed to be percent of time since
# TTL for which data has to be purged
purge_cutoff = {}
current_time = UTCTimestampUsec()
self._logger.error("start times:" + str(start_times))
analytics_ttls = self._analytics_db.get_analytics_ttls()
analytics_time_range = min(
(current_time - start_times[SYSTEM_OBJECT_START_TIME]),
60*60*1000000*analytics_ttls[SYSTEM_OBJECT_GLOBAL_DATA_TTL])
flow_time_range = min(
(current_time - start_times[SYSTEM_OBJECT_FLOW_START_TIME]),
60*60*1000000*analytics_ttls[SYSTEM_OBJECT_FLOW_DATA_TTL])
stat_time_range = min(
(current_time - start_times[SYSTEM_OBJECT_STAT_START_TIME]),
60*60*1000000*analytics_ttls[SYSTEM_OBJECT_STATS_DATA_TTL])
# currently using config audit TTL for message table (to be changed)
msg_time_range = min(
(current_time - start_times[SYSTEM_OBJECT_MSG_START_TIME]),
60*60*1000000*analytics_ttls[SYSTEM_OBJECT_CONFIG_AUDIT_TTL])
purge_cutoff['flow_cutoff'] = int(current_time - (float(100 - purge_input)*
float(flow_time_range)/100.0))
purge_cutoff['stats_cutoff'] = int(current_time - (float(100 - purge_input)*
float(stat_time_range)/100.0))
purge_cutoff['msg_cutoff'] = int(current_time - (float(100 - purge_input)*
float(msg_time_range)/100.0))
purge_cutoff['other_cutoff'] = int(current_time - (float(100 - purge_input)*
float(analytics_time_range)/100.0))
return purge_cutoff
#end get_purge_cutoff
@validate_user_token
def process_purge_request(self):
self._post_common(bottle.request, None)
if ("application/json" not in bottle.request.headers['Content-Type']):
self._logger.error('Content-type is not JSON')
response = {
'status': 'failed', 'reason': 'Content-type is not JSON'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
start_times = self._analytics_db.get_analytics_start_time()
if (start_times == None):
self._logger.info("Failed to get the analytics start time")
response = {'status': 'failed',
'reason': 'Failed to get the analytics start time'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EIO],
{'Content-type': 'application/json'})
analytics_start_time = start_times[SYSTEM_OBJECT_START_TIME]
purge_cutoff = {}
if ("purge_input" in bottle.request.json.keys()):
value = bottle.request.json["purge_input"]
if (type(value) is int):
if ((value <= 100) and (value > 0)):
purge_cutoff = self.get_purge_cutoff(float(value), start_times)
else:
response = {'status': 'failed',
'reason': 'Valid % range is [1, 100]'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
elif (type(value) is unicode):
try:
purge_input = OpServerUtils.convert_to_utc_timestamp_usec(value)
if (purge_input <= analytics_start_time):
response = {'status': 'failed',
'reason': 'purge input is less than analytics start time'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EIO],
{'Content-type': 'application/json'})
# cutoff time for purging flow data
purge_cutoff['flow_cutoff'] = purge_input
# cutoff time for purging stats data
purge_cutoff['stats_cutoff'] = purge_input
# cutoff time for purging message tables
purge_cutoff['msg_cutoff'] = purge_input
# cutoff time for purging other tables
purge_cutoff['other_cutoff'] = purge_input
except:
response = {'status': 'failed',
'reason': 'Valid time formats are: \'%Y %b %d %H:%M:%S.%f\', '
'\'now\', \'now-h/m/s\', \'-/h/m/s\' in purge_input'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
else:
response = {'status': 'failed',
'reason': 'Valid purge_input format is % or time'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
else:
response = {'status': 'failed',
'reason': 'purge_input not specified'}
return bottle.HTTPResponse(
json.dumps(response), _ERRORS[errno.EBADMSG],
{'Content-type': 'application/json'})
res = self._analytics_db.get_analytics_db_purge_status(
self._state_server._redis_list)
if (res == None):
purge_request_ip, = struct.unpack('>I', socket.inet_pton(
socket.AF_INET, self._args.host_ip))
purge_id = str(uuid.uuid1(purge_request_ip))
resp = self._analytics_db.set_analytics_db_purge_status(purge_id,
purge_cutoff)
if (resp == None):
self.gevs.append(gevent.spawn(self.db_purge_operation,
purge_cutoff, purge_id))
response = {'status': 'started', 'purge_id': purge_id}
return bottle.HTTPResponse(json.dumps(response), 200,
{'Content-type': 'application/json'})
elif (resp['status'] == 'failed'):
return bottle.HTTPResponse(json.dumps(resp), _ERRORS[errno.EBUSY],
{'Content-type': 'application/json'})
elif (res['status'] == 'running'):
return bottle.HTTPResponse(json.dumps(res), 200,
{'Content-type': 'application/json'})
elif (res['status'] == 'failed'):
return bottle.HTTPResponse(json.dumps(res), _ERRORS[errno.EBUSY],
{'Content-type': 'application/json'})
# end process_purge_request
def db_purge_operation(self, purge_cutoff, purge_id):
self._logger.info("purge_id %s START Purging!" % str(purge_id))
purge_stat = DatabasePurgeStats()
purge_stat.request_time = UTCTimestampUsec()
purge_info = DatabasePurgeInfo(sandesh=self._sandesh)
self._analytics_db.number_of_purge_requests += 1
total_rows_deleted, purge_status_details = \
self._analytics_db.db_purge(purge_cutoff, purge_id)
self._analytics_db.delete_db_purge_status()
if (total_rows_deleted > 0):
# update start times in cassandra
start_times = {}
start_times[SYSTEM_OBJECT_START_TIME] = purge_cutoff['other_cutoff']
start_times[SYSTEM_OBJECT_FLOW_START_TIME] = purge_cutoff['flow_cutoff']
start_times[SYSTEM_OBJECT_STAT_START_TIME] = purge_cutoff['stats_cutoff']
start_times[SYSTEM_OBJECT_MSG_START_TIME] = purge_cutoff['msg_cutoff']
self._analytics_db._update_analytics_start_time(start_times)
end_time = UTCTimestampUsec()
duration = end_time - purge_stat.request_time
purge_stat.purge_id = purge_id
if (total_rows_deleted < 0):
purge_stat.purge_status = PurgeStatusString[PurgeStatus.FAILURE]
self._logger.error("purge_id %s purging Failed" % str(purge_id))
else:
purge_stat.purge_status = PurgeStatusString[PurgeStatus.SUCCESS]
self._logger.info("purge_id %s purging DONE" % str(purge_id))
purge_stat.purge_status_details = ', '.join(purge_status_details)
purge_stat.rows_deleted = total_rows_deleted
purge_stat.duration = duration
purge_info.name = self._hostname
purge_info.stats = purge_stat
purge_info.send(sandesh=self._sandesh)
#end db_purge_operation
def handle_db_info(self,
disk_usage_percentage = None,
pending_compaction_tasks = None):
if (disk_usage_percentage != None):
self.disk_usage_percentage = disk_usage_percentage
if (pending_compaction_tasks != None):
self.pending_compaction_tasks = pending_compaction_tasks
source = self._hostname
module_id = Module.COLLECTOR
module = ModuleNames[module_id]
node_type = Module2NodeType[module_id]
node_type_name = NodeTypeNames[node_type]
instance_id_str = INSTANCE_ID_DEFAULT
destination = source + ':' + node_type_name + ':' \
+ module + ':' + instance_id_str
req = DbInfoSetRequest(disk_usage_percentage, pending_compaction_tasks)
if (disk_usage_percentage != None):
req.disk_usage_percentage = disk_usage_percentage
if (pending_compaction_tasks != None):
req.pending_compaction_tasks = pending_compaction_tasks
if self._state_server.redis_publish(msg_type='db-info',
destination=destination,
msg=req):
self._logger.info("redis-publish success for db_info usage(%u)"
" pending_compaction_tasks(%u)",
req.disk_usage_percentage,
req.pending_compaction_tasks);
else:
self._logger.error("redis-publish failure for db_info usage(%u)"
" pending_compaction_tasks(%u)",
req.disk_usage_percentage,
req.pending_compaction_tasks);
# end handle_db_info
def _auto_purge(self):
""" monitor dbusage continuously and purge the db accordingly """
# wait for 10 minutes before starting to monitor
gevent.sleep(10*60)
# continuously monitor and purge
while True:
trigger_purge = False
db_node_usage = self._analytics_db.get_dbusage_info(
'localhost',
self._args.auth_conf_info['admin_port'],
self._args.auth_conf_info['admin_user'],
self._args.auth_conf_info['admin_password'])
self._logger.info("node usage:" + str(db_node_usage) )
self._logger.info("threshold:" + str(self._args.db_purge_threshold))
# check database disk usage on each node
for node in db_node_usage:
if (int(db_node_usage[node]) >
int(self._args.db_purge_threshold)):
self._logger.error("Database usage of %d on %s"
" exceeds threshold", db_node_usage[node], node)
trigger_purge = True
break
else:
self._logger.info("Database usage of %d on %s does not"
" exceed threshold", db_node_usage[node], node)
# get max disk-usage-percentage value from dict
disk_usage_percentage = None
if (len(db_node_usage)):
disk_usage_percentage = \
int(math.ceil(max(db_node_usage.values())))
pending_compaction_tasks_info = \
self._analytics_db.get_pending_compaction_tasks(
'localhost',
self._args.auth_conf_info['admin_port'],
self._args.auth_conf_info['admin_user'],
self._args.auth_conf_info['admin_password'])
self._logger.info("node pending-compaction-tasks:" +
str(pending_compaction_tasks_info) )
# get max pending-compaction-tasks value from dict
pending_compaction_tasks = None
if (len(pending_compaction_tasks_info)):
pending_compaction_tasks = \
max(pending_compaction_tasks_info.values())
if ((disk_usage_percentage != None) or
(pending_compaction_tasks != None)):
self.handle_db_info(disk_usage_percentage,
pending_compaction_tasks)
# check if there is a purge already going on
purge_id = str(uuid.uuid1())
resp = self._analytics_db.get_analytics_db_purge_status(
self._state_server._redis_list)
if (resp != None):
trigger_purge = False
if (trigger_purge):
# trigger purge
start_times = self._analytics_db.get_analytics_start_time()
purge_cutoff = self.get_purge_cutoff(
(100.0 - float(self._args.db_purge_level)),
start_times)
self._logger.info("Starting purge")
self.db_purge_operation(purge_cutoff, purge_id)
self._logger.info("Ending purge")
gevent.sleep(60*30) # sleep for 30 minutes
# end _auto_purge
@validate_user_token
def _get_analytics_data_start_time(self):
analytics_start_time = (self._analytics_db.get_analytics_start_time())[SYSTEM_OBJECT_START_TIME]
response = {'analytics_data_start_time': analytics_start_time}
return bottle.HTTPResponse(
json.dumps(response), 200, {'Content-type': 'application/json'})
# end _get_analytics_data_start_time
def table_process(self, table):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/table/' + table + '/'
json_links = []
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
link = LinkObject('schema', base_url + 'schema')
json_links.append(obj_to_dict(link))
if len(self._VIRTUAL_TABLES[i].columnvalues) > 0:
link = LinkObject(
'column-values', base_url + 'column-values')
json_links.append(obj_to_dict(link))
break
if(len(json_links) == 0):
# search the UVE table in raw UVE content
tables = self._uve_server.get_tables()
if table in tables:
link = LinkObject('schema', base_url + 'schema')
json_links.append(obj_to_dict(link))
link = LinkObject('column-values', base_url + 'column-values')
json_links.append(obj_to_dict(link))
bottle.response.set_header('Content-Type', 'application/json')
return json.dumps(json_links)
# end table_process
@validate_user_token
def table_schema_process(self, table):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
bottle.response.set_header('Content-Type', 'application/json')
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
return json.dumps(self._VIRTUAL_TABLES[i].schema,
default=lambda obj: obj.__dict__)
# Also check for the table in actual raw UVE contents
tables = self._uve_server.get_tables()
if table in tables:
return json.dumps(_OBJECT_TABLE_SCHEMA,
default=lambda obj: obj.__dict__)
return (json.dumps({}))
# end table_schema_process
@validate_user_token
def column_values_process(self, table):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + \
'/analytics/table/' + table + '/column-values/'
bottle.response.set_header('Content-Type', 'application/json')
json_links = []
found_table = False
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
found_table = True
for col in self._VIRTUAL_TABLES[i].columnvalues:
link = LinkObject(col, base_url + col)
json_links.append(obj_to_dict(link))
break
if (found_table == False):
# Also check for the table in actual raw UVE contents
tables = self._uve_server.get_tables()
if table in tables:
for col in _OBJECT_TABLE_COLUMN_VALUES:
link = LinkObject(col, base_url + col)
json_links.append(obj_to_dict(link))
return (json.dumps(json_links))
# end column_values_process
def generator_info(self, table, column):
if ((column == MODULE) or (column == SOURCE)):
sources = []
moduleids = []
ulist = self.redis_uve_list
for redis_uve in ulist:
redish = redis.StrictRedis(
db=1,
host=redis_uve[0],
port=redis_uve[1],
password=self._args.redis_password)
try:
for key in redish.smembers("NGENERATORS"):
source = key.split(':')[0]
module = key.split(':')[2]
if (sources.count(source) == 0):
sources.append(source)
if (moduleids.count(module) == 0):
moduleids.append(module)
except Exception as e:
self._logger.error('Exception: %s' % e)
if column == MODULE:
return moduleids
elif column == SOURCE:
return sources
elif (column == 'Category'):
return self._CATEGORY_MAP
elif (column == 'Level'):
return self._LEVEL_LIST
elif (column == STAT_OBJECTID_FIELD):
objtab = None
for t in self._VIRTUAL_TABLES:
if t.schema.type == 'STAT':
self._logger.error("found stat table %s" % t)
stat_table = STAT_VT_PREFIX + "." + \
t.stat_type + "." + t.stat_attr
if (table == stat_table):
objtab = t.obj_table
break
if (objtab != None) and (objtab != "None"):
return list(self._uve_server.get_uve_list(objtab))
return []
# end generator_info
@validate_user_token
def column_process(self, table, column):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
bottle.abort(code, msg)
bottle.response.set_header('Content-Type', 'application/json')
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
if self._VIRTUAL_TABLES[i].columnvalues.count(column) > 0:
return (json.dumps(self.generator_info(table, column)))
# Also check for the table in actual raw UVE contents
tables = self._uve_server.get_tables()
if table in tables:
return (json.dumps(self.generator_info(table, column)))
return (json.dumps([]))
# end column_process
def start_uve_server(self):
self._uve_server.run()
#end start_uve_server
def stop_webserver(self):
if self._webserver:
self._webserver.stop()
self._webserver = None
def start_webserver(self):
pipe_start_app = bottle.app()
try:
self._webserver = ContrailGeventServer(
host=self._args.rest_api_ip,
port=self._args.rest_api_port)
bottle.run(app=pipe_start_app, server=self._webserver)
except Exception as e:
self._logger.error("Exception: %s" % e)
sys.exit()
# end start_webserver
def disc_agp(self, clist):
new_agp = {}
for elem in clist:
instance_id = elem['instance-id']
port = int(elem['redis-port'])
ip_address = elem['ip-address']
# If AlarmGenerator sends partitions as NULL, its
# unable to provide service
if not elem['partitions']:
continue
parts = json.loads(elem['partitions'])
for partstr,acq_time in parts.iteritems():
partno = int(partstr)
pi = PartInfo(instance_id = instance_id,
ip_address = ip_address,
acq_time = acq_time,
port = port)
if partno not in new_agp:
new_agp[partno] = pi
else:
if pi.acq_time > new_agp[partno].acq_time:
new_agp[partno] = pi
if len(new_agp) == self._args.partitions and \
len(self.agp) != self._args.partitions:
ConnectionState.update(conn_type = ConnectionType.UVEPARTITIONS,
name = 'UVE-Aggregation', status = ConnectionStatus.UP,
message = 'Partitions:%d' % len(new_agp))
self._uvepartitions_state = ConnectionStatus.UP
if self._usecache and len(new_agp) != self._args.partitions:
ConnectionState.update(conn_type = ConnectionType.UVEPARTITIONS,
name = 'UVE-Aggregation', status = ConnectionStatus.DOWN,
message = 'Partitions:%d' % len(new_agp))
self._uvepartitions_state = ConnectionStatus.DOWN
self.agp = new_agp
def get_agp(self):
return self.agp
def run(self):
self._uvedbstream.start()
self.gevs += [
self._uvedbstream,
gevent.spawn(self.start_webserver),
gevent.spawn(self.start_uve_server),
]
if self._ad is not None:
self._ad.start()
if self._vnc_api_client:
self.gevs.append(gevent.spawn(self._vnc_api_client.connect))
self._local_app = LocalApp(bottle.app(), self._args.auth_conf_info)
self.gevs.append(gevent.spawn(self._local_app.start_http_server))
try:
gevent.joinall(self.gevs)
except KeyboardInterrupt:
self._logger.error('Exiting on ^C')
except gevent.GreenletExit:
self._logger.error('Exiting on gevent-kill')
except:
raise
finally:
self._logger.error('stopping everything')
self.stop()
def stop(self):
self._sandesh._client._connection.set_admin_state(down=True)
self._sandesh.uninit()
self.stop_webserver()
if self._ad is not None:
self._ad.kill()
l = len(self.gevs)
for idx in range(0,l):
self._logger.error('killing %d of %d' % (idx+1, l))
self.gevs[0].kill()
self._logger.error('joining %d of %d' % (idx+1, l))
self.gevs[0].join()
self._logger.error('stopped %d of %d' % (idx+1, l))
self.gevs.pop(0)
def sigterm_handler(self):
self.stop()
exit()
def sighup_handler(self):
if self._args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read(self._args.conf_file)
if 'DEFAULTS' in config.sections():
try:
collectors = config.get('DEFAULTS', 'collectors')
if type(collectors) is str:
collectors = collectors.split()
new_chksum = hashlib.md5("".join(collectors)).hexdigest()
if new_chksum != self._chksum:
self._chksum = new_chksum
random_collectors = random.sample(collectors, len(collectors))
self._sandesh.reconfig_collectors(random_collectors)
except ConfigParser.NoOptionError as e:
pass
# end sighup_handler
def main(args_str=' '.join(sys.argv[1:])):
opserver = OpServer(args_str)
gevent.hub.signal(signal.SIGTERM, opserver.sigterm_handler)
""" @sighup
SIGHUP handler to indicate configuration changes
"""
gevent.hub.signal(signal.SIGHUP, opserver.sighup_handler)
gv = gevent.getcurrent()
gv._main_obj = opserver
opserver.run()
if __name__ == '__main__':
main()
| 41.762911
| 138
| 0.557417
|
9161d825f7864bca2a01a9e6e0a4719000a879f7
| 1,462
|
py
|
Python
|
scripts/create-ecs-task.py
|
094459/blogpost-airflow-hybrid
|
3ed53c75049e0ab7e9ea364d9b069bab9e3f5806
|
[
"MIT"
] | 1
|
2022-03-24T13:19:44.000Z
|
2022-03-24T13:19:44.000Z
|
scripts/create-ecs-task.py
|
094459/blogpost-airflow-hybrid
|
3ed53c75049e0ab7e9ea364d9b069bab9e3f5806
|
[
"MIT"
] | null | null | null |
scripts/create-ecs-task.py
|
094459/blogpost-airflow-hybrid
|
3ed53c75049e0ab7e9ea364d9b069bab9e3f5806
|
[
"MIT"
] | null | null | null |
#Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#SPDX-License-Identifier: Apache-2.0
import boto3
import json
client = boto3.client("ecs", region_name="eu-west-2")
def create_task():
response = client.register_task_definition(
containerDefinitions=[
{
"name": "airflow-hybrid-boto3",
"image": "public.ecr.aws/a4b5h6u6/beachgeek:latest",
"cpu": 0,
"portMappings": [],
"essential": True,
"environment": [],
"mountPoints": [],
"volumesFrom": [],
"command": ["ricsue-airflow-hybrid","period1/temp.csv", "select * from customers WHERE location = \"Spain\"", "rds-airflow-hybrid","eu-west-2"],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/ecs/test-external",
"awslogs-region": "eu-west-2",
"awslogs-stream-prefix": "ecs"
}
}
}
],
taskRoleArn="arn:aws:iam::704533066374:role/ecsTaskExecutionRole",
executionRoleArn="arn:aws:iam::704533066374:role/ecsTaskExecutionRole",
family= "test-external",
networkMode="HOST",
requiresCompatibilities= [
"EXTERNAL"
],
cpu= "256",
memory= "512")
| 34.809524
| 160
| 0.50684
|
2d806bd41964a2615435282dfb901c55dca336b7
| 1,563
|
py
|
Python
|
qishibaikecrawler/crawler.py
|
solodom/crawler
|
fcaf8570b85452194bac18a49f5a61d88ca16a60
|
[
"MIT"
] | null | null | null |
qishibaikecrawler/crawler.py
|
solodom/crawler
|
fcaf8570b85452194bac18a49f5a61d88ca16a60
|
[
"MIT"
] | null | null | null |
qishibaikecrawler/crawler.py
|
solodom/crawler
|
fcaf8570b85452194bac18a49f5a61d88ca16a60
|
[
"MIT"
] | null | null | null |
from urllib import request
import re
from bs4 import BeautifulSoup
def getcontent(url):
# 模拟浏览器
headers=('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36')
opener=request.build_opener()
opener.addheaders=[headers]
# 安装全局opener
request.install_opener(opener)
# 爬取回的源代码是二进制代码,需要字符串化
data=request.urlopen(url).read().decode('utf-8')
# 设置beautifulsoup对象,参数为文件与解析器
soup=BeautifulSoup(data,'html.parser')
# return a list of h2 tags
usertag_list=soup.find_all('h2')
user_list=[]
# .string return the according tag's contents
for user in usertag_list:
# string属性只能返回单节点的字符串,如果内容中含有<br/>,需要用get_text()
user_str=user.string
# 去点字符串前后的‘\n’,strip返回新字符串,字符串不能更改,所有字符串方法都要返回新的字符串
user_str=user_str.strip('\n')
user_list.append(user_str)
# return a list of span tags with class attribute being 'content'
contenttag_list=soup.find_all('div',class_='content')
content_list=[]
for content in contenttag_list:
# 内容中含有<br/>,不能用string属性,需要用get_text()
# 用.来获取子节点
content_str=content.span.get_text()
content_str=content_str.strip('\n')
content_list.append(content_str)
for i in range(1,len(user_list)+1):
print('用户{}{}发表的内容是:\n{}'.format(str(i),user_list[i-1],content_list[i-1]))
base_url='https://www.qiushibaike.com/text/page/'
for i in range (1,3):
url=base_url+str(i)
print('第{}页:'.format(str(i)))
getcontent(url)
| 33.255319
| 139
| 0.674984
|
15f3c4ec655c515a901c01c9a526baa6138e508c
| 170
|
py
|
Python
|
fwtheme_django_jasmin/__init__.py
|
cedadev/fwtheme-django-jasmin
|
bd1ab7a52244ca4d73a3a9d4c502132c955e8771
|
[
"MIT"
] | 1
|
2019-09-08T05:39:31.000Z
|
2019-09-08T05:39:31.000Z
|
fwtheme_django_jasmin/__init__.py
|
cedadev/fwtheme-django-jasmin
|
bd1ab7a52244ca4d73a3a9d4c502132c955e8771
|
[
"MIT"
] | 1
|
2021-09-01T12:59:41.000Z
|
2021-09-01T12:59:41.000Z
|
fwtheme_django_jasmin/__init__.py
|
cedadev/fwtheme-django-jasmin
|
bd1ab7a52244ca4d73a3a9d4c502132c955e8771
|
[
"MIT"
] | null | null | null |
"""
Main module for the Django app.
"""
__author__ = "Matt Pritchard"
__copyright__ = "Copyright 2018 UK Science and Technology Facilities Council"
__version__ = "0.5"
| 18.888889
| 77
| 0.741176
|
4bc9ef3ca2b805444eda7e48953ff65a1b61daf7
| 466
|
py
|
Python
|
dsmr_parser/value_types.py
|
Aeroid/dsmr_parser
|
dc2f4937391a0b5fdc2699979e679248571916f9
|
[
"MIT"
] | 82
|
2016-11-07T13:59:38.000Z
|
2022-03-21T14:48:45.000Z
|
dsmr_parser/value_types.py
|
Aeroid/dsmr_parser
|
dc2f4937391a0b5fdc2699979e679248571916f9
|
[
"MIT"
] | 91
|
2016-09-10T20:13:18.000Z
|
2022-03-07T20:51:35.000Z
|
dsmr_parser/value_types.py
|
Aeroid/dsmr_parser
|
dc2f4937391a0b5fdc2699979e679248571916f9
|
[
"MIT"
] | 52
|
2016-11-06T19:38:32.000Z
|
2022-03-19T23:16:34.000Z
|
import datetime
import pytz
def timestamp(value):
naive_datetime = datetime.datetime.strptime(value[:-1], '%y%m%d%H%M%S')
# TODO comment on this exception
if len(value) == 13:
is_dst = value[12] == 'S' # assume format 160322150000W
else:
is_dst = False
local_tz = pytz.timezone('Europe/Amsterdam')
localized_datetime = local_tz.localize(naive_datetime, is_dst=is_dst)
return localized_datetime.astimezone(pytz.utc)
| 24.526316
| 75
| 0.684549
|
b9fc9f3c7ea6f3c2aa7406ba6b1db69c8c191a2d
| 15,011
|
py
|
Python
|
scholarly/scholarly.py
|
gerardcanal/scholarly
|
76b7a20f0ae4c1f6c69c3898ea47caa0bbaeb74d
|
[
"Unlicense"
] | null | null | null |
scholarly/scholarly.py
|
gerardcanal/scholarly
|
76b7a20f0ae4c1f6c69c3898ea47caa0bbaeb74d
|
[
"Unlicense"
] | null | null | null |
scholarly/scholarly.py
|
gerardcanal/scholarly
|
76b7a20f0ae4c1f6c69c3898ea47caa0bbaeb74d
|
[
"Unlicense"
] | null | null | null |
"""scholarly.py"""
from __future__ import absolute_import, division, print_function, unicode_literals
from bs4 import BeautifulSoup
import arrow
import bibtexparser
import codecs
import hashlib
import pprint
import random
import re
import requests
import sys
import time
_GOOGLEID = hashlib.md5(str(random.random()).encode('utf-8')).hexdigest()[:16]
_COOKIES = {'GSP': 'ID={0}:CF=4'.format(_GOOGLEID)}
_HEADERS = {
'accept-language': 'en-US,en',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/41.0.2272.76 Chrome/41.0.2272.76 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml'
}
_HOST = 'https://scholar.google.com'
_AUTHSEARCH = '/citations?view_op=search_authors&hl=en&mauthors={0}'
_CITATIONAUTH = '/citations?user={0}&hl=en'
_CITATIONPUB = '/citations?view_op=view_citation&citation_for_view={0}'
_KEYWORDSEARCH = '/citations?view_op=search_authors&hl=en&mauthors=label:{0}'
_PUBSEARCH = '/scholar?q={0}'
_SCHOLARPUB = '/scholar?oi=bibs&hl=en&cites={0}'
_CITATIONAUTHRE = r'user=([\w-]*)'
_CITATIONPUBRE = r'citation_for_view=([\w-]*:[\w-]*)'
_SCHOLARCITERE = r'gs_ocit\(event,\'([\w-]*)\''
_SCHOLARPUBRE = r'cites=([\w-]*)'
_EMAILAUTHORRE = r'Verified email at '
_SESSION = requests.Session()
_PAGESIZE = 100
def use_proxy(http='socks5://127.0.0.1:9050', https='socks5://127.0.0.1:9050'):
""" Routes scholarly through a proxy (e.g. tor).
Requires pysocks
Proxy must be running."""
_SESSION.proxies ={
'http': http,
'https': https
}
def _handle_captcha(url):
# TODO: PROBLEMS HERE! NEEDS ATTENTION
# Get the captcha image
captcha_url = _HOST + '/sorry/image?id={0}'.format(g_id)
captcha = _SESSION.get(captcha_url, headers=_HEADERS)
# Upload to remote host and display to user for human verification
img_upload = requests.post('http://postimage.org/',
files={'upload[]': ('scholarly_captcha.jpg', captcha.text)})
print(img_upload.text)
img_url_soup = BeautifulSoup(img_upload.text, 'html.parser')
img_url = img_url_soup.find_all(alt='scholarly_captcha')[0].get('src')
print('CAPTCHA image URL: {0}'.format(img_url))
# Need to check Python version for input
if sys.version[0]=="3":
g_response = input('Enter CAPTCHA: ')
else:
g_response = raw_input('Enter CAPTCHA: ')
# Once we get a response, follow through and load the new page.
url_response = _HOST+'/sorry/CaptchaRedirect?continue={0}&id={1}&captcha={2}&submit=Submit'.format(dest_url, g_id, g_response)
resp_captcha = _SESSION.get(url_response, headers=_HEADERS, cookies=_COOKIES)
print('Forwarded to {0}'.format(resp_captcha.url))
return resp_captcha.url
def _get_page(pagerequest):
"""Return the data for a page on scholar.google.com"""
# Note that we include a sleep to avoid overloading the scholar server
time.sleep(5+random.uniform(0, 5))
resp = _SESSION.get(pagerequest, headers=_HEADERS, cookies=_COOKIES)
if resp.status_code == 200:
return resp.text
if resp.status_code == 503:
# Inelegant way of dealing with the G captcha
raise Exception('Error: {0} {1}'.format(resp.status_code, resp.reason))
# TODO: Need to fix captcha handling
# dest_url = requests.utils.quote(_SCHOLARHOST+pagerequest)
# soup = BeautifulSoup(resp.text, 'html.parser')
# captcha_url = soup.find('img').get('src')
# resp = _handle_captcha(captcha_url)
# return _get_page(re.findall(r'https:\/\/(?:.*?)(\/.*)', resp)[0])
else:
raise Exception('Error: {0} {1}'.format(resp.status_code, resp.reason))
def _get_soup(pagerequest):
"""Return the BeautifulSoup for a page on scholar.google.com"""
html = _get_page(pagerequest)
html = html.replace(u'\xa0', u' ')
return BeautifulSoup(html, 'html.parser')
def _search_scholar_soup(soup):
"""Generator that returns Publication objects from the search page"""
while True:
for row in soup.find_all('div', 'gs_or'):
yield Publication(row, 'scholar')
if soup.find(class_='gs_ico gs_ico_nav_next'):
url = soup.find(class_='gs_ico gs_ico_nav_next').parent['href']
soup = _get_soup(_HOST+url)
else:
break
def _search_citation_soup(soup):
"""Generator that returns Author objects from the author search page"""
while True:
for row in soup.find_all('div', 'gsc_1usr'):
yield Author(row)
next_button = soup.find(class_='gs_btnPR gs_in_ib gs_btn_half gs_btn_lsb gs_btn_srt gsc_pgn_pnx')
if next_button and 'disabled' not in next_button.attrs:
url = next_button['onclick'][17:-1]
url = codecs.getdecoder("unicode_escape")(url)[0]
soup = _get_soup(_HOST+url)
else:
break
def _find_tag_class_name(__data, tag, text):
elements = __data.find_all(tag)
for element in elements:
if 'class' in element.attrs and text in element.attrs['class'][0]:
return element.attrs['class'][0]
class Publication(object):
"""Returns an object for a single publication"""
def __init__(self, __data, pubtype=None):
self.bib = dict()
self.source = pubtype
if self.source == 'citations':
self.bib['title'] = __data.find('a', class_='gsc_a_at').text
self.id_citations = re.findall(_CITATIONPUBRE, __data.find('a', class_='gsc_a_at')['data-href'])[0]
citedby = __data.find(class_='gsc_a_ac')
if citedby and not (citedby.text.isspace() or citedby.text == ''):
self.citedby = int(citedby.text)
year = __data.find(class_='gsc_a_h')
if year and year.text and not year.text.isspace() and len(year.text)>0:
self.bib['year'] = int(year.text)
elif self.source == 'scholar':
databox = __data.find('div', class_='gs_ri')
title = databox.find('h3', class_='gs_rt')
if title.find('span', class_='gs_ctu'): # A citation
title.span.extract()
elif title.find('span', class_='gs_ctc'): # A book or PDF
title.span.extract()
self.bib['title'] = title.text.strip()
if title.find('a'):
self.bib['url'] = title.find('a')['href']
authorinfo = databox.find('div', class_='gs_a')
self.bib['author'] = ' and '.join([i.strip() for i in authorinfo.text.split(' - ')[0].split(',')])
if databox.find('div', class_='gs_rs'):
self.bib['abstract'] = databox.find('div', class_='gs_rs').text
if self.bib['abstract'][0:8].lower() == 'abstract':
self.bib['abstract'] = self.bib['abstract'][9:].strip()
lowerlinks = databox.find('div', class_='gs_fl').find_all('a')
for link in lowerlinks:
if 'Import into BibTeX' in link.text:
self.url_scholarbib = link['href']
if 'Cited by' in link.text:
self.citedby = int(re.findall(r'\d+', link.text)[0])
self.id_scholarcitedby = re.findall(_SCHOLARPUBRE, link['href'])[0]
if __data.find('div', class_='gs_ggs gs_fl'):
self.bib['eprint'] = __data.find('div', class_='gs_ggs gs_fl').a['href']
self._filled = False
def fill(self):
"""Populate the Publication with information from its profile"""
if self.source == 'citations':
url = _CITATIONPUB.format(self.id_citations)
soup = _get_soup(_HOST+url)
self.bib['title'] = soup.find('div', id='gsc_vcd_title').text
if soup.find('a', class_='gsc_vcd_title_link'):
self.bib['url'] = soup.find('a', class_='gsc_vcd_title_link')['href']
for item in soup.find_all('div', class_='gs_scl'):
key = item.find(class_='gsc_vcd_field').text
val = item.find(class_='gsc_vcd_value')
if key == 'Authors':
self.bib['author'] = ' and '.join([i.strip() for i in val.text.split(',')])
elif key == 'Journal':
self.bib['journal'] = val.text
elif key == 'Volume':
self.bib['volume'] = val.text
elif key == 'Issue':
self.bib['number'] = val.text
elif key == 'Pages':
self.bib['pages'] = val.text
elif key == 'Publisher':
self.bib['publisher'] = val.text
elif key == 'Publication date':
self.bib['year'] = arrow.get(val.text).year
elif key == 'Description':
if val.text[0:8].lower() == 'abstract':
val = val.text[9:].strip()
self.bib['abstract'] = val
elif key == 'Total citations':
self.id_scholarcitedby = re.findall(_SCHOLARPUBRE, val.a['href'])[0]
# number of citation per year
years = [int(y.text) for y in soup.find_all(class_='gsc_vcd_g_t')]
cites = [int(c.text) for c in soup.find_all(class_='gsc_vcd_g_al')]
self.cites_per_year = dict(zip(years, cites))
if soup.find('div', class_='gsc_vcd_title_ggi'):
self.bib['eprint'] = soup.find('div', class_='gsc_vcd_title_ggi').a['href']
self._filled = True
elif self.source == 'scholar':
bibtex = _get_page(self.url_scholarbib)
self.bib.update(bibtexparser.loads(bibtex).entries[0])
self._filled = True
return self
def get_citedby(self):
"""Searches GScholar for other articles that cite this Publication and
returns a Publication generator.
"""
if not hasattr(self, 'id_scholarcitedby'):
self.fill()
if hasattr(self, 'id_scholarcitedby'):
url = _SCHOLARPUB.format(requests.utils.quote(self.id_scholarcitedby))
soup = _get_soup(_HOST+url)
return _search_scholar_soup(soup)
else:
return []
def __str__(self):
return pprint.pformat(self.__dict__)
class Author(object):
"""Returns an object for a single author"""
def __init__(self, __data):
if isinstance(__data, str) or isinstance(__data, unicode):
self.id = __data
else:
self.id = re.findall(_CITATIONAUTHRE, __data('a')[0]['href'])[0]
self.url_picture = _HOST+'/citations?view_op=medium_photo&user={}'.format(self.id)
self.name = __data.find('h3', class_=_find_tag_class_name(__data, 'h3', 'name')).text
affiliation = __data.find('div', class_=_find_tag_class_name(__data, 'div', 'aff'))
if affiliation:
self.affiliation = affiliation.text
email = __data.find('div', class_=_find_tag_class_name(__data, 'div', 'eml'))
if email:
self.email = re.sub(_EMAILAUTHORRE, r'@', email.text)
self.interests = [i.text.strip() for i in
__data.find_all('a', class_=_find_tag_class_name(__data, 'a', 'one_int'))]
citedby = __data.find('div', class_=_find_tag_class_name(__data, 'div', 'cby'))
if citedby and citedby.text != '':
self.citedby = int(citedby.text[9:])
self._filled = False
def fill(self):
"""Populate the Author with information from their profile"""
url_citations = _CITATIONAUTH.format(self.id)
url = '{0}&pagesize={1}'.format(url_citations, _PAGESIZE)
soup = _get_soup(_HOST+url)
self.name = soup.find('div', id='gsc_prf_in').text
self.affiliation = soup.find('div', class_='gsc_prf_il').text
self.interests = [i.text.strip() for i in soup.find_all('a', class_='gsc_prf_inta')]
# h-index, i10-index and h-index, i10-index in the last 5 years
index = soup.find_all('td', class_='gsc_rsb_std')
if index:
self.citedby = int(index[0].text)
self.citedby5y = int(index[1].text)
self.hindex = int(index[2].text)
self.hindex5y = int(index[3].text)
self.i10index = int(index[4].text)
self.i10index5y = int(index[5].text)
else:
self.hindex = self.hindex5y = self.i10index = self.i10index5y = 0
# number of citations per year
years = [int(y.text) for y in soup.find_all('span', class_='gsc_g_t')]
cites = [int(c.text) for c in soup.find_all('span', class_='gsc_g_al')]
self.cites_per_year = dict(zip(years, cites))
# co-authors
self.coauthors = []
for row in soup.find_all('span', class_='gsc_rsb_a_desc'):
new_coauthor = Author(re.findall(_CITATIONAUTHRE, row('a')[0]['href'])[0])
new_coauthor.name = row.find(tabindex="-1").text
new_coauthor.affiliation = row.find(class_="gsc_rsb_a_ext").text
self.coauthors.append(new_coauthor)
self.publications = list()
pubstart = 0
while True:
for row in soup.find_all('tr', class_='gsc_a_tr'):
new_pub = Publication(row, 'citations')
self.publications.append(new_pub)
if 'disabled' not in soup.find('button', id='gsc_bpf_more').attrs:
pubstart += _PAGESIZE
url = '{0}&cstart={1}&pagesize={2}'.format(url_citations, pubstart, _PAGESIZE)
soup = _get_soup(_HOST+url)
else:
break
self._filled = True
return self
def __str__(self):
return pprint.pformat(self.__dict__)
def search_pubs_query(query):
"""Search by scholar query and return a generator of Publication objects"""
url = _PUBSEARCH.format(requests.utils.quote(query))
soup = _get_soup(_HOST+url)
return _search_scholar_soup(soup)
def search_author(name):
"""Search by author name and return a generator of Author objects"""
url = _AUTHSEARCH.format(requests.utils.quote(name))
soup = _get_soup(_HOST+url)
return _search_citation_soup(soup)
def search_keyword(keyword):
"""Search by keyword and return a generator of Author objects"""
url = _KEYWORDSEARCH.format(requests.utils.quote(keyword))
soup = _get_soup(_HOST+url)
return _search_citation_soup(soup)
def search_pubs_custom_url(url):
"""Search by custom URL and return a generator of Publication objects
URL should be of the form '/scholar?q=...'"""
soup = _get_soup(_HOST+url)
return _search_scholar_soup(soup)
def search_author_custom_url(url):
"""Search by custom URL and return a generator of Publication objects
URL should be of the form '/citation?q=...'"""
soup = _get_soup(_HOST+url)
return _search_citation_soup(soup)
| 42.888571
| 154
| 0.607155
|
d3668c2cfc7b553c1d185bf3cc06da4900313da1
| 692
|
py
|
Python
|
ejercicios_basicos/funciones/funcion_closure.py
|
JuanDuran85/ejemplos_python
|
47aa49c65384ab89654f362f3da6cd2b0ef386e5
|
[
"Apache-2.0"
] | null | null | null |
ejercicios_basicos/funciones/funcion_closure.py
|
JuanDuran85/ejemplos_python
|
47aa49c65384ab89654f362f3da6cd2b0ef386e5
|
[
"Apache-2.0"
] | null | null | null |
ejercicios_basicos/funciones/funcion_closure.py
|
JuanDuran85/ejemplos_python
|
47aa49c65384ab89654f362f3da6cd2b0ef386e5
|
[
"Apache-2.0"
] | null | null | null |
# concepto de closure en python: es una funcion que define a otra y ademas la puede regresar
# la funcion anidada puede acceder a las variables locales definidas en la funcion principal o externa
# funcion principal o externa
def operacion(a,b):
#definimos una funcion interna o anidada
def sumar():
return a + b
# retornar la funcion anidada
return sumar
mi_funcion_closure = operacion(10,20)
print(mi_funcion_closure())
# llamando la funcion al vuelo
print(operacion(10,20)())
# funcion principal o externa y uso de lambda
def opracion_dos(a,b):
#definimos una funcion interna o anidada con lambda
return lambda: a + b
print(opracion_dos(10,20)())
| 27.68
| 102
| 0.734104
|
a2af0b3eedba5a972d5f2d014d037b3dc8348ebe
| 985
|
py
|
Python
|
server/exercises/models.py
|
ni4ka7a/gym-track
|
71ad75f47849ccc6a5198c59fe2261796f705122
|
[
"MIT"
] | null | null | null |
server/exercises/models.py
|
ni4ka7a/gym-track
|
71ad75f47849ccc6a5198c59fe2261796f705122
|
[
"MIT"
] | null | null | null |
server/exercises/models.py
|
ni4ka7a/gym-track
|
71ad75f47849ccc6a5198c59fe2261796f705122
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
CATEGORY_CHOICES = (
("BR", "Barbel"),
("DM", "Dumbbell"),
("MA", "Machine"),
("CA", "Cardio"),
("WB", "Weighted Bodyweight")
)
BODY_PART_CHOICES = (
("CO", "Core"),
("AR", "Arms"),
("BA", "Back"),
("CH", "Chest"),
("LE", "Legs"),
("SH", "Shoulders"),
("OT", "Other"),
("FB", "Full Body"),
("CA", "Cardio"),
)
class Exercise(models.Model):
name = models.CharField(max_length=100)
description = models.CharField(max_length=500, blank=True)
category = models.CharField(
max_length=2, choices=CATEGORY_CHOICES, default="BR")
# TODO: rename to bodyPart and add migration
bodypart = models.CharField(
max_length=2, choices=BODY_PART_CHOICES, default="CO")
created_at = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(
User, related_name="exercises", on_delete=models.CASCADE, null=True)
| 28.142857
| 76
| 0.619289
|
b577405fb8be3e59f2bc199de7068accd2ec3bbb
| 2,547
|
py
|
Python
|
ramsey/RamseyGame.py
|
bzhaocaltech/alpha-zero-ramsey-numbers
|
dfd10b577c1bc26c4f445bcc2fafa8c1bcf9cb6c
|
[
"MIT"
] | null | null | null |
ramsey/RamseyGame.py
|
bzhaocaltech/alpha-zero-ramsey-numbers
|
dfd10b577c1bc26c4f445bcc2fafa8c1bcf9cb6c
|
[
"MIT"
] | null | null | null |
ramsey/RamseyGame.py
|
bzhaocaltech/alpha-zero-ramsey-numbers
|
dfd10b577c1bc26c4f445bcc2fafa8c1bcf9cb6c
|
[
"MIT"
] | 1
|
2018-10-29T23:29:29.000Z
|
2018-10-29T23:29:29.000Z
|
from copy import deepcopy
import sys
sys.path.append('..')
from Game import Game
from Graph import Graph
class RamseyGame(Game):
def __init__(self, n, p, q):
assert p >= 2 and q >= 2
self.n = n
# For indexing purposes colors keys must be [1, 2, ...]
self.colors = {1: p, 2: q}
# Maps index to action
self.index_to_action = []
for c in self.colors:
for i in range(n):
for j in range(i + 1, n):
self.index_to_action.append((i, j, c))
# Maps action to index in action vector
def action_to_index(self, i, j, c):
assert i != j
if i > j:
i, j = j, i
return self.n * i + j + ((c-1) * self.n * (self.n-1) - (i+1) * (i+2)) // 2
# Return a graph representing the initial board state
def getInitGraph(self):
# return initial graph (numpy graph)
return Graph(self.n)
# Get the graph size
def getGraphSize(self):
return self.n
def getActionSize(self):
return len(self.index_to_action)
def getNextStateFromAction(self, graph, action):
i, j, c = action
new_graph = deepcopy(graph)
new_graph.colorEdge(i, j, c, self.colors)
return new_graph
# Given a graph and an action, returns a new graph after that action has
# been made
def getNextState(self, graph, index):
return self.getNextStateFromAction(graph, self.index_to_action[index])
# Get all valid actions one-hot encoded
def getValidMoves(self, graph):
# return a fixed size binary vector
valid = [0] * self.getActionSize()
for i, j in graph.edgeIter():
if not graph.hasEdge(i, j):
for c in self.colors:
valid[self.action_to_index(i, j, c)] = 1
return valid
# Check if state is terminal by checking for monochromatic cliques of given size and color
# and if there are uncolored edges remaining
def getGameEnded(self, graph):
if graph.has_clique:
return True
return graph.num_edges == graph.total_edges
def stringRepresentation(self, graph):
return str(graph)
# Get the score of a graph. Equivalent to number of edges in the graph
# minus an additional 1 if the graph has a clique
def getScore(self, graph):
reward = graph.num_edges
return reward - 1 if graph.has_clique else reward
def getCanonicalForm(self, graph):
return graph.adj_mat
| 29.964706
| 94
| 0.601099
|
419fa5034c825621a948594d34fa8d00e03a0a13
| 2,890
|
py
|
Python
|
app/forms.py
|
TomCallR/ProjetNLP_jury
|
19919551051e9de9fd5fe263200a7da8405f51de
|
[
"BSD-3-Clause"
] | null | null | null |
app/forms.py
|
TomCallR/ProjetNLP_jury
|
19919551051e9de9fd5fe263200a7da8405f51de
|
[
"BSD-3-Clause"
] | null | null | null |
app/forms.py
|
TomCallR/ProjetNLP_jury
|
19919551051e9de9fd5fe263200a7da8405f51de
|
[
"BSD-3-Clause"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, DateField, SubmitField, SelectField, IntegerField, SelectMultipleField
from wtforms.validators import DataRequired, Email
class InitForm(FlaskForm):
submit = SubmitField("Initialiser la base")
class CourseCreateForm(FlaskForm):
label = StringField(
label="Intitulé",
validators=[DataRequired(message="Saisissez un intitulé")]
)
startdate = DateField(
label="Date de début (Y-m-d)",
# format='%d/%m/%Y', does not work
validators=[DataRequired(message="Saisissez une date de début")]
)
enddate = DateField(
label="Date de fin (Y-m-d)",
# format='%d/%m/%Y', does not work
validators=[DataRequired(message="Saisissez une date de fin")]
)
fileid = StringField(
label="Id du fichier associé (réponses aux questionnaires)",
validators=[DataRequired(message="Saisissez l'id' du fichier")]
)
submit = SubmitField("Ajouter")
class CourseDeleteForm(FlaskForm):
course = SelectField(
label="Formation",
# coerce=int,
validators=[DataRequired(message="Sélectionnez une formation")]
)
submit = SubmitField("Supprimer")
class StudentCreateForm(FlaskForm):
lastname = StringField(
label="Nom de famille",
validators=[DataRequired(message="Saisissez un nom de famille")]
)
firstname = StringField(
label="Prénom",
validators=[DataRequired(message="Saisissez un prénom")]
)
email = StringField(
label="Email",
validators=[DataRequired(message="Saisissez un email"),
Email(message="Email incorrect")]
)
course = SelectField(
label="Formation",
validators=[DataRequired(message="Sélectionnez une formation")]
)
submit = SubmitField("Ajouter")
class StudentDeleteForm(FlaskForm):
student = SelectField(
label="Etudiant",
# coerce=int,
validators=[DataRequired(message="Sélectionnez un étudiant")]
)
submit = SubmitField("Supprimer")
class SpreadsheetSelect(FlaskForm):
enddate = DateField(
label="Fin de formation postérieure au",
validators=[DataRequired(message="Saisissez une date")]
)
submit = SubmitField("Sélectionner")
class SheetsSelect(FlaskForm):
daysnochange = IntegerField(
label="Nombre de jours sans modification",
validators=[DataRequired(message="Saisissez un nombre de jours")]
)
submit = SubmitField("Mettre à jour")
class DashboardForm(FlaskForm):
courses = SelectMultipleField(
label="Formations"
)
startdate = DateField(
label="Date de début"
)
enddate = DateField(
label="Date de fin"
)
students = SelectMultipleField(
label="Etudiants"
)
submit = SubmitField("Analyser")
| 28.613861
| 103
| 0.653979
|
ec1e1152cf6e4fb30689e771231ccfa1dcd4d24b
| 14,137
|
py
|
Python
|
src/Building.py
|
ketsonroberto/PBDO
|
cdc1c5275bc17753be5c06a216f92391b6f1f1ab
|
[
"MIT"
] | null | null | null |
src/Building.py
|
ketsonroberto/PBDO
|
cdc1c5275bc17753be5c06a216f92391b6f1f1ab
|
[
"MIT"
] | null | null | null |
src/Building.py
|
ketsonroberto/PBDO
|
cdc1c5275bc17753be5c06a216f92391b6f1f1ab
|
[
"MIT"
] | null | null | null |
import numpy as np
import copy
import sys
# from BuildingProperties import *
class Structure:
# Class to perform the sturctural analysis of a building whose properties are provided.
def __init__(self, building=None, columns=None, slabs=None, core=None, concrete=None, steel=None, cost=None):
self.building = building
self.columns = columns
self.slabs = slabs
self.core = core
self.concrete = concrete
self.steel = steel
self.cost = cost
def stiffness_story(self):
# Compute the stiffness for each story of the building.
# Read the information of the columns geometry and mechanical properties of the materials.
area_col = self.columns["area"]
moment_inertia_col = self.columns["Iy"]
height_col = self.columns["height"]
k_col = self.stiffness(area=area_col, moment_inertia=moment_inertia_col, height=height_col)
area_core = self.core["area"]
moment_inertia_core = self.core["Iy"]
height_core = self.core["height"]
k_core = self.stiffness(area=area_core, moment_inertia=moment_inertia_core, height=height_core)
num_col = self.columns["quantity"]
num_core = self.core["quantity"]
k_story = num_col * k_col + num_core * k_core
# Return the stiffness of the story.
return k_story
def stiffness(self, area=None, moment_inertia=None, height=None):
# Compute the stiffness given the mechanical and geometrical properties.
Gc = self.concrete["Gc"] # Shear modulus.
Ec = self.concrete["Ec"] # Concrete Young's module.
Es = self.steel["Es"] # Steel Young's module.
As = self.columns["v_steel"] * area # Area of steel.
Ac = area - As # Area of concrete.
E = (As * Es + Ac * Ec) / (area) # Effective Young's module.
ks = Gc * area / height
kf = 3 * E * moment_inertia / (height ** 3)
kt = 1 / ((1 / kf) + (1 / ks)) # Total stiffness.
return kt
def mass_storey(self, top_story=False):
# Compute the equivalent mass corresponding to each story.
num_col = self.columns["quantity"]
mslab = self.mass_slab()
mcol = self.mass_column()
if top_story:
# Do not consider the weight of the columns above the top floor.
mass_st = 0.5 * (num_col * mcol) + mslab
else:
mass_st = num_col * mcol + mslab
return mass_st
def mass_slab(self):
# Compute the mass of each slab.
ros = self.steel["density"]
ro = self.concrete["density"]
thickness = self.slabs["thickness"]
width = self.slabs["width"]
depth = self.slabs["depth"]
Vs = self.slabs["steel_rate"] * thickness * width * depth
Vc = thickness * width * depth - Vs
mass_s = ro * Vc + ros * Vs
return mass_s
def mass_column(self):
# Compute the mass of each column.
ros = self.steel["density"]
ro = self.concrete["density"]
height = self.columns["height"]
area = self.columns["area"]
As = self.columns["v_steel"] * area
Ac = area - As
mass_col = ro * Ac * height + ros * As * height # +stirups
return mass_col
def compression(self, col_size=None, L=None):
# Construct the Moment-Rotation diagram.
# For a single column
PA_ksi = 1.4503773800722e-7
fc_prime = self.concrete["fck"]
Ec = self.concrete["Ec"]
fy = self.steel["fy"]
Es = self.steel["Es"]
fc_prime = fc_prime * PA_ksi
ecu = 0.003
fy = fy * PA_ksi
Es = Es * PA_ksi
Ec = Ec * PA_ksi
ros = 0.08
b = col_size
b = b / 0.0254 # m to inch
h = copy.copy(b)
As = ros*b*h/2
As_prime = ros*b*h/2
d_prime = 2.5
d = b - d_prime
# Centroid
yc = h/2
n = Es / Ec
y_bar = (b * h * yc + (n - 1) * As_prime * d_prime + (n - 1) * As * d) / (
b * h + (n - 1) * As_prime + (n - 1) * As)
# uncracked moment of inertia.
Iun = (b * h ** 3 / 12) + b * h * (y_bar - yc) ** 2 + (n - 1) * As_prime * (y_bar - d_prime) ** 2 +\
(n - 1) * As * (d - y_bar) ** 2
# tensile strenght concrete
ft = 7.5 * np.sqrt(1000 * fc_prime) / 1000
# yb is the distance of y_bar to the bottom of the section
yd = h - y_bar
Mcr = ft * Iun / yd
phi_cr = Mcr / (Ec * Iun) # rad / in
M = np.zeros(4)
phi = np.zeros(4)
M[0] = 0
phi[0] = 0
M[1] = Mcr
phi[1] = phi_cr
# 2 - cracked transformed section.
# kd is the height of the compression section of the cracked section
# (b/2)*kd^2 + ((n-1)*As_prime + n*As)*kd - ((n-1)*As_prime*d_prime + n*As*d);
aa = b / 2
bb = (n - 1) * As_prime + n * As
cc = - ((n - 1) * As_prime * d_prime + n * As * d)
#depth of the neutral axis from the top of the section.
kd = (-bb + np.sqrt(bb ** 2 - 4 * aa * cc)) / (2 * aa)
Icr = (b * kd ** 3 / 12) + b * kd * (kd / 2) ** 2 + (n - 1) * As_prime * (kd - d_prime) ** 2 + (n) * As * (
d - kd) ** 2
phi_acr = Mcr / (Ec * Icr)
#M[2] = Mcr
#phi[2] = phi_acr
# 3 - Yield of steel or concrete non-linear
# cracked transformed section valid until fs=fy or fc = 0.7fc_prime
# steel yields es = ey = fy/Es
if d<=kd:
# No yielding
phi_y = sys.float_info.max
My = sys.float_info.max
else:
es = fy / Es
phi_y = es / (d - kd)
My = phi_y * Ec * Icr
# concrete nonlinear.
phi_con = 0.7 * (fc_prime / Ec) / kd
Mcon = phi_con * Ec * Icr #kip-in
# check which one occur first (yielding or nonlinear concrete)
if My < Mcon:
Mnl = My
phi_nl = phi_y
else:
Mnl = Mcon
phi_nl = phi_con
M[2] = Mnl
phi[2] = phi_nl
# Find nominal strength (ACI) from strain-stress diagram.
if fc_prime <= 4:
b1 = 0.85
elif 4 < fc_prime <= 8:
b1 = 0.85 - 0.05 * (fc_prime - 4)
else:
b1 = 0.65
# Find c and fs_prime
cont = 1
c = 1
ct = 0.01
fs_prime = copy.copy(fy)
while abs(c / ct - 1) > 0.0002 and cont < 100:
c = (As * fy - As_prime * fs_prime) / (0.85 * fc_prime * b * b1)
if c==0:
c=0.00000000001
c=abs(c)
fs_prime = 0.003 * Es * ((c - d_prime) / c)
cont = cont + 1
phi_r = ecu / c
As2 = As_prime * fs_prime / fy
Mr = As2 * fy * (d - d_prime) + (As - As2) * fy * (d - b1 * c / 2)
M[3] = Mr
phi[3] = phi_r
return M, phi
def deformation_damage_index(self, B=None, stiffness=None, Mom=None, phi=None):
# Compute the Deformation Damage Index (DDI).
k = stiffness
Lc = self.columns["height"] # Height of each column.
EI = (k * Lc ** 3) / 12 # Young's module X Moment of inertia.
M = 6 * EI * B / (Lc ** 2)
NM_kipin = 112.9848004306 # Convert N x m to kip x in
M = M/NM_kipin
My = Mom[1]
phiy = phi[1]
Mu = Mom[2]
phiu = phi[2]
phim = phiu
# interpolate rotation.
if M <= Mom[1]:
phim = M * phi[1] / Mom[1]
elif Mom[1] < M <= Mom[2]:
phim = ((M - Mom[1]) / (Mom[2] - Mom[1])) * (phi[2] - phi[1]) + phi[1]
elif Mom[2] < M <= Mom[3]:
phim = ((M - Mom[2]) / (Mom[3] - Mom[2])) * (phi[3] - phi[2]) + phi[2]
else:
phim = phi[3]
# Compute DDI given the rotation (phi).
if phim < phi[1]:
ddi = 0
elif phi[1] <= phim < phi[2]:
ddi = (phim - phi[1]) / (phi[2] - phi[1])
elif phi[2] <= phim < phi[3]:
ddi = (phim - phi[2]) / (phi[3] - phi[2])
else:
ddi = 1.0
return ddi
class Costs(Structure):
# Estimate costs. Costs is a subclass of structure.
def __init__(self, building=None, columns=None, slabs=None, core=None, concrete=None, steel=None, cost=None):
self.building = building
self.columns = columns
self.slabs = slabs
self.core = core
self.concrete = concrete
self.steel = steel
self.cost = cost
Structure.__init__(self, building=building, columns=columns, slabs=slabs, core=core, concrete=concrete,
steel=steel, cost=cost)
def initial_cost_stiffness(self, col_size=None, par0=None, par1=None):
# initial cost is in fact the construction cost.
num_col = self.columns["quantity"] # Number of columns.
height_col = self.columns["height"] # Height of columns.
pslabs = self.slabs["cost_m2"] # costs of slabs.
area_col = col_size**2
moment_inertia_col = (col_size**4)/12
k_col = self.stiffness(area=area_col, moment_inertia=moment_inertia_col, height=height_col)
stiffness_kN_cm = 0.00001 * k_col
cost_initial = (par0 * (stiffness_kN_cm) ** par1) * num_col * height_col
cost_initial = cost_initial + pslabs * self.slabs["width"] * self.slabs["depth"] # price_slabs_m2*A
cost_initial = 1.6*cost_initial # include 60% of additional costs.
return cost_initial
def cost_damage(self, b=None, col_size=None, L=None, ncolumns=None, dry_wall_area=None):
# Cost of failures for a given level of interstory drift ratio (b).
# Glazing
A_glazing = 1.5 * L
A_bulding = 2 * L * (self.building["width"] + self.building["depth"])
Adry = 5.95
IDRd = self.cost["IDRd"]
IDRu = self.cost["IDRu"]
cIDRd = self.cost["cost_IDRd"]
cIDRu = self.cost["cost_IDRu"]
IDRd_eg = self.cost["IDRd_eg"]
IDRu_eg = self.cost["IDRu_eg"]
cIDRd_eg = self.cost["cost_IDRd_eg"]
cIDRu_eg = self.cost["cost_IDRu_eg"]
IDRd_dp = self.cost["IDRd_dp"]
IDRu_dp = self.cost["IDRu_dp"]
cIDRd_dp = self.cost["cost_IDRd_dp"]
cIDRu_dp = self.cost["cost_IDRu_dp"]
IDRd_df = self.cost["IDRd_df"]
IDRu_df = self.cost["IDRu_df"]
cIDRd_df = self.cost["cost_IDRd_df"]
cIDRu_df = self.cost["cost_IDRu_df"]
# COLUMNS - SLAB CONECTIONS
bsf = IDRd * L
bcol = IDRu * L
csf = ncolumns * cIDRd
ccol = ncolumns * cIDRu
# bar(1) = datad % cost_par % bcol(i)
# EXTERIOR GLAZING
bsf_eg = IDRd_eg * L
bcol_eg = IDRu_eg * L
csf_eg = cIDRd_eg * (A_bulding / A_glazing)
ccol_eg = cIDRu_eg * (A_bulding / A_glazing)
# bar(2) = datad % cost_par % bcol_eg(i)
# DRYWALL PARTITIONS
bsf_dp = IDRd_dp * L
bcol_dp = IDRu_dp * L
csf_dp = cIDRd_dp * (dry_wall_area / Adry)
ccol_dp = cIDRu_dp * (dry_wall_area / Adry)
# bar(3) = datad % cost_par % bcol_dp(i)
# DRYWALL FINISH
bsf_df = IDRd_df * L
bcol_df = IDRu_df * L
csf_df = cIDRd_df * (dry_wall_area / Adry)
ccol_df = cIDRu_df * (dry_wall_area / Adry)
# bar(4) = datad % cost_par % bcol_df(i)
# Next: costs associated to the interstory drift ratio.
# COLUMN-SLAB CONNECTIONS.
if b < bsf:
cf_cs = 0
elif bcol > b >= bsf:
cf_cs = ((ccol - csf) / (bcol - bsf)) * (b - bsf) + csf
else:
cf_cs = ccol
# EXTERIOR GLAZING.
if b < bsf_eg:
cf_eg = 0
elif bcol_eg > b >= bsf_eg:
cf_eg = ((ccol_eg - csf_eg) / (bcol_eg - bsf_eg)) * (b - bsf_eg) + csf_eg
else:
cf_eg = ccol_eg
# DYRWALL PARTITIONS.
if b < bsf_dp:
cf_dp = 0
elif bcol_dp > b >= bsf_dp:
cf_dp = ((ccol_dp - csf_dp) / (bcol_dp - bsf_dp)) * (b - bsf_dp) + csf_dp
else:
cf_dp = ccol_dp
# DRYWALL FINISH
if b < bsf_df:
cf_df = 0
elif bcol_df > b >= bsf_df:
cf_df = ((ccol_df - csf_df) / (bcol_df - bsf_df)) * (b - bsf_df) + csf_df
else:
cf_df = ccol_df
# Next: costs associated to the deformation damage indexes.
area_col = col_size**2
moment_inertia_col = col_size**4/12
k_col = Structure.stiffness(self, area=area_col, moment_inertia=moment_inertia_col, height=L)
Mom, phi = Costs.compression(self, col_size=col_size, L=L)
ddi = Costs.deformation_damage_index(self, B=b, stiffness=k_col, Mom=Mom, phi=phi)
DDI1 = self.cost["DDI_1"]
DDI2 = self.cost["DDI_2"]
DDI3 = self.cost["DDI_3"]
DDI4 = self.cost["DDI_4"]
cDDI1 = self.cost["cost_DDI_1"]
cDDI2 = self.cost["cost_DDI_2"]
cDDI3 = self.cost["cost_DDI_3"]
cDDI4 = self.cost["cost_DDI_4"]
if ddi < DDI1:
cf_duc = 0
elif DDI1 <= ddi < DDI2:
bsf = DDI1
bcol = DDI2
csf = cDDI1
ccol = cDDI2
cf_duc = ((ccol - csf) / (bcol - bsf)) * (ddi - bsf) + csf
elif DDI2 <= ddi < DDI3:
bsf = DDI2
bcol = DDI3
csf = cDDI2
ccol = cDDI3
cf_duc = ((ccol - csf) / (bcol - bsf)) * (ddi - bsf) + csf
elif DDI3 <= ddi < DDI4:
bsf = DDI3
bcol = DDI4
csf = cDDI3
ccol = cDDI4
cf_duc = ((ccol - csf) / (bcol - bsf)) * (ddi - bsf) + csf
else:
cf_duc = cDDI4
f_duc = cf_duc * ncolumns
# Total cost.
cf = cf_cs + cf_duc + (cf_eg + cf_dp + cf_df)*0 #Only considering the structural damage
return cf
| 32.056689
| 115
| 0.523025
|
1818bc108ff15fc017dc522e727161e1d7496f15
| 8,857
|
py
|
Python
|
src/server/bos/controllers/v1/sessiontemplate.py
|
Cray-HPE/bos
|
a4a7fc58c884d951b6051093e1a4e2aeaba6740f
|
[
"MIT"
] | 1
|
2022-03-15T18:17:11.000Z
|
2022-03-15T18:17:11.000Z
|
src/server/bos/controllers/v1/sessiontemplate.py
|
Cray-HPE/bos
|
a4a7fc58c884d951b6051093e1a4e2aeaba6740f
|
[
"MIT"
] | null | null | null |
src/server/bos/controllers/v1/sessiontemplate.py
|
Cray-HPE/bos
|
a4a7fc58c884d951b6051093e1a4e2aeaba6740f
|
[
"MIT"
] | 1
|
2022-03-06T12:47:06.000Z
|
2022-03-06T12:47:06.000Z
|
# Cray-provided controllers for the Boot Orchestration Service
# Copyright 2019-2021 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# (MIT License)
import logging
import connexion
import json
import wget
import os
from connexion.lifecycle import ConnexionResponse
from bos.models.session_template import SessionTemplate # noqa: E501
from bos.dbclient import BosEtcdClient
from bos.utils import _canonize_xname
LOGGER = logging.getLogger('bos.controllers.sessiontemplate')
BASEKEY = "/sessionTemplate"
EXAMPLE_BOOT_SET = {
"type": "your-boot-type",
"boot_ordinal": 1,
"etag": "your_boot_image_etag",
"kernel_parameters": "your-kernel-parameters",
"network": "nmn",
"node_list": [
"xname1", "xname2", "xname3"],
"path": "your-boot-path",
"rootfs_provider": "your-rootfs-provider",
"rootfs_provider_passthrough": "your-rootfs-provider-passthrough"}
EXAMPLE_SESSION_TEMPLATE = {
"boot_sets": {
"name_your_boot_set": EXAMPLE_BOOT_SET},
"cfs": {
"configuration": "desired-cfs-config"},
"enable_cfs": True,
"name": "name-your-template"}
def sanitize_xnames(st_json):
"""
Sanitize xnames - Canonize the xnames
N.B. Because python passes object references by value you need to use
the return value. It will have no impact on the inputted object.
Args:
st_json (string): The Session Template as a JSON object
Returns:
The Session Template with all of the xnames sanitized
"""
if 'boot_sets' in st_json:
for boot_set in st_json['boot_sets']:
if 'node_list' in st_json['boot_sets'][boot_set]:
clean_nl = [_canonize_xname(node) for node in
st_json['boot_sets'][boot_set]['node_list']]
st_json['boot_sets'][boot_set]['node_list'] = clean_nl
return st_json
def create_v1_sessiontemplate(): # noqa: E501
"""POST /v1/sessiontemplate
Creates a new session template. # noqa: E501
"""
LOGGER.debug("POST /v1/sessiontemplate invoked create_v1_sessiontemplate")
if connexion.request.is_json:
LOGGER.debug("connexion.request.is_json")
LOGGER.debug("type=%s", type(connexion.request.get_json()))
LOGGER.debug("Received: %s", connexion.request.get_json())
else:
return "Post must be in JSON format", 400
sessiontemplate = None
try:
"""Convert the JSON request data into a SessionTemplate object.
Any exceptions caught here would be generated from the model
(i.e. bos.models.session_template).
An example is an exception for a session template name that
does not confirm to Kubernetes naming convention.
In this case return 400 with a description of the specific error.
"""
sessiontemplate = SessionTemplate.from_dict(connexion.request.get_json())
except Exception as err:
return connexion.problem(
status=400, title="The session template could not be created.",
detail=str(err))
if sessiontemplate.template_url:
"""If a template URL was provided in the body treat this as a reference
to a JSON session template structure which needs to be read and
stored.
"""
LOGGER.debug("create_v1_sessiontemplate template_url: %s", sessiontemplate.template_url)
"""Downloads the content locally into a file named after the uri.
An optional 'out' parameter can be specified as the base dir
for the file
"""
sessionTemplateFile = ""
try:
sessionTemplateFile = wget.download(sessiontemplate.template_url)
LOGGER.debug("Downloaded: %s", sessionTemplateFile)
except Exception as err:
return connexion.problem(
status=400,
title="Error while getting content from '{}'".format(
sessiontemplate.template_url),
detail=str(err))
# Read in the session template file
with open(sessionTemplateFile, 'r') as f:
st_json = json.load(f)
if 'name' not in st_json.keys() or st_json['name'] == "":
return connexion.problem(
status=400, title="Bad request",
detail="The Session Template '{}' "
"is missing the required \'name\' attribute."
.format(sessiontemplate.template_url))
json_st_str = json.dumps(sanitize_xnames(st_json))
LOGGER.debug("Removing temporary local file: '%s'", sessionTemplateFile)
os.remove(sessionTemplateFile)
# Create a Session Template from the content.
"""Store the Session Template content.
For now overwrite any existing template by name w/o warning.
Later this can be changed (detected and blocked) when we
support patching operations. This could also be changed to
result in an HTTP 409 Conflict. TBD.
"""
with BosEtcdClient() as bec:
key = "{}/{}".format(BASEKEY, st_json['name'])
bec.put(key, value=json_st_str)
return key, 201
if sessiontemplate.name:
"""If a template name has been provided in the body, treat this as
a complete JSON session template record and store it.
For now overwrite any existing template by name w/o warning.
Later this can be changed when we support patching operations.
This could also be changed to result in an HTTP 409 Conflict. TBD.
"""
LOGGER.debug("create_v1_sessiontemplate name: %s", sessiontemplate.name)
st_json = connexion.request.get_json()
json_st_str = json.dumps(sanitize_xnames(st_json))
with BosEtcdClient() as bec:
key = "/sessionTemplate/{}".format(sessiontemplate.name)
bec.put(key, value=json_st_str)
return key, 201
def get_v1_sessiontemplates(): # noqa: E501
"""
GET /v1/sessiontemplates
List all sessiontemplates
"""
LOGGER.debug("get_v1_sessiontemplates: Fetching sessions.")
with BosEtcdClient() as bec:
results = []
for st, _meta in bec.get_prefix('{}/'.format(BASEKEY)):
json_st = json.loads(st.decode('utf-8'))
results.append(json_st)
return results, 200
def get_v1_sessiontemplate(session_template_id):
"""
GET /v1/sessiontemplate
Get the session template by session template ID
"""
LOGGER.debug("get_v1_sessiontemplate by ID: %s", session_template_id) # noqa: E501
with BosEtcdClient() as bec:
key = "{}/{}".format(BASEKEY, session_template_id)
st, _meta = bec.get(key)
if st:
json_st = json.loads(st.decode('utf-8'))
return json_st, 200
else:
return connexion.problem(status=404,
title="The Session Template was not found",
detail="The Session Template '{}' was not found.".format(session_template_id)) # noqa: E501
def get_v1_sessiontemplatetemplate():
"""
GET /v1/sessiontemplatetemplate
Get the example session template
"""
return EXAMPLE_SESSION_TEMPLATE, 200
def delete_v1_sessiontemplate(session_template_id):
"""
DELETE /v1/sessiontemplate
Delete the session template by session template ID
"""
LOGGER.debug("delete_v1_sessiontemplate by ID: %s", session_template_id)
result = get_v1_sessiontemplate(session_template_id)
if isinstance(result, ConnexionResponse):
return result
with BosEtcdClient() as bec:
key = "/sessionTemplate/{}".format(session_template_id)
bec.delete(key)
return '', 204
| 38.846491
| 129
| 0.658124
|
7dba8428969d5f922e172175362e7010eaf049fe
| 8,202
|
py
|
Python
|
salt/modules/mdadm.py
|
wikimedia/operations-debs-salt
|
be6342abc7401ff92f67ed59f7834f1359f35314
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/mdadm.py
|
wikimedia/operations-debs-salt
|
be6342abc7401ff92f67ed59f7834f1359f35314
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/mdadm.py
|
wikimedia/operations-debs-salt
|
be6342abc7401ff92f67ed59f7834f1359f35314
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Salt module to manage RAID arrays with mdadm
'''
# Import python libs
import os
import logging
# Import salt libs
import salt.utils
from salt.exceptions import CommandExecutionError, SaltInvocationError
# Set up logger
log = logging.getLogger(__name__)
# Define a function alias in order not to shadow built-in's
__func_alias__ = {
'list_': 'list'
}
# Define the module's virtual name
__virtualname__ = 'raid'
def __virtual__():
'''
mdadm provides raid functions for Linux
'''
if __grains__['kernel'] != 'Linux':
return False
if not salt.utils.which('mdadm'):
return False
return __virtualname__
def list_():
'''
List the RAID devices.
CLI Example:
.. code-block:: bash
salt '*' raid.list
'''
ret = {}
for line in (__salt__['cmd.run_stdout']
(['mdadm', '--detail', '--scan'],
python_shell=False).splitlines()):
if ' ' not in line:
continue
comps = line.split()
device = comps[1]
ret[device] = {"device": device}
for comp in comps[2:]:
key = comp.split('=')[0].lower()
value = comp.split('=')[1]
ret[device][key] = value
return ret
def detail(device='/dev/md0'):
'''
Show detail for a specified RAID device
CLI Example:
.. code-block:: bash
salt '*' raid.detail '/dev/md0'
'''
ret = {}
ret['members'] = {}
# Lets make sure the device exists before running mdadm
if not os.path.exists(device):
msg = "Device {0} doesn't exist!"
raise CommandExecutionError(msg.format(device))
cmd = ['mdadm', '--detail', device]
for line in __salt__['cmd.run_stdout'](cmd, python_shell=False).splitlines():
if line.startswith(device):
continue
if ' ' not in line:
continue
if ':' not in line:
if '/dev/' in line:
comps = line.split()
state = comps[4:-1]
ret['members'][comps[0]] = {
'device': comps[-1],
'major': comps[1],
'minor': comps[2],
'number': comps[0],
'raiddevice': comps[3],
'state': ' '.join(state),
}
continue
comps = line.split(' : ')
comps[0] = comps[0].lower()
comps[0] = comps[0].strip()
comps[0] = comps[0].replace(' ', '_')
ret[comps[0]] = comps[1].strip()
return ret
def destroy(device):
'''
Destroy a RAID device.
WARNING This will zero the superblock of all members of the RAID array..
CLI Example:
.. code-block:: bash
salt '*' raid.destroy /dev/md0
'''
try:
details = detail(device)
except CommandExecutionError:
return False
stop_cmd = ['mdadm', '--stop', device]
zero_cmd = ['mdadm', '--zero-superblock']
if __salt__['cmd.retcode'](stop_cmd, python_shell=False):
for number in details['members']:
zero_cmd.append(details['members'][number]['device'])
__salt__['cmd.retcode'](zero_cmd, python_shell=False)
# Remove entry from config file:
if __grains__.get('os_family') == 'Debian':
cfg_file = '/etc/mdadm/mdadm.conf'
else:
cfg_file = '/etc/mdadm.conf'
try:
__salt__['file.replace'](cfg_file, 'ARRAY {0} .*'.format(device), '')
except SaltInvocationError:
pass
if __salt__['raid.list']().get(device) is None:
return True
else:
return False
def create(name,
level,
devices,
metadata='default',
test_mode=False,
**kwargs):
'''
Create a RAID device.
.. versionchanged:: 2014.7.0
.. warning::
Use with CAUTION, as this function can be very destructive if not used
properly!
CLI Examples:
.. code-block:: bash
salt '*' raid.create /dev/md0 level=1 chunk=256 devices="['/dev/xvdd', '/dev/xvde']" test_mode=True
.. note::
Adding ``test_mode=True`` as an argument will print out the mdadm
command that would have been run.
name
The name of the array to create.
level
The RAID level to use when creating the raid.
devices
A list of devices used to build the array.
kwargs
Optional arguments to be passed to mdadm.
returns
test_mode=True:
Prints out the full command.
test_mode=False (Default):
Executes command on remote the host(s) and
Prints out the mdadm output.
.. note::
It takes time to create a RAID array. You can check the progress in
"resync_status:" field of the results from the following command:
.. code-block:: bash
salt '*' raid.detail /dev/md0
For more info, read the ``mdadm(8)`` manpage
'''
opts = []
raid_devices = len(devices)
for key in kwargs:
if not key.startswith('__'):
opts.append('--{0}'.format(key))
if kwargs[key] is not True:
opts.append(str(kwargs[key]))
if key == 'spare-devices':
raid_devices -= int(kwargs[key])
cmd = ['mdadm',
'-C', name,
'-R',
'-v'] + opts + [
'-l', str(level),
'-e', metadata,
'-n', str(raid_devices)] + devices
cmd_str = ' '.join(cmd)
if test_mode is True:
return cmd_str
elif test_mode is False:
return __salt__['cmd.run'](cmd, python_shell=False)
def save_config():
'''
Save RAID configuration to config file.
Same as:
mdadm --detail --scan >> /etc/mdadm/mdadm.conf
Fixes this issue with Ubuntu
REF: http://askubuntu.com/questions/209702/why-is-my-raid-dev-md1-showing-up-as-dev-md126-is-mdadm-conf-being-ignored
CLI Example:
.. code-block:: bash
salt '*' raid.save_config
'''
scan = __salt__['cmd.run']('mdadm --detail --scan', python_shell=False).split()
# Issue with mdadm and ubuntu
# REF: http://askubuntu.com/questions/209702/why-is-my-raid-dev-md1-showing-up-as-dev-md126-is-mdadm-conf-being-ignored
if __grains__['os'] == 'Ubuntu':
buggy_ubuntu_tags = ['name', 'metadata']
for bad_tag in buggy_ubuntu_tags:
for i, elem in enumerate(scan):
if not elem.find(bad_tag):
del scan[i]
scan = ' '.join(scan)
if __grains__.get('os_family') == 'Debian':
cfg_file = '/etc/mdadm/mdadm.conf'
else:
cfg_file = '/etc/mdadm.conf'
if not __salt__['file.search'](cfg_file, scan):
__salt__['file.append'](cfg_file, scan)
return __salt__['cmd.run']('update-initramfs -u')
def assemble(name,
devices,
test_mode=False,
**kwargs):
'''
Assemble a RAID device.
CLI Examples:
.. code-block:: bash
salt '*' raid.assemble /dev/md0 ['/dev/xvdd', '/dev/xvde']
.. note::
Adding ``test_mode=True`` as an argument will print out the mdadm
command that would have been run.
name
The name of the array to assemble.
devices
The list of devices comprising the array to assemble.
kwargs
Optional arguments to be passed to mdadm.
returns
test_mode=True:
Prints out the full command.
test_mode=False (Default):
Executes command on the host(s) and prints out the mdadm output.
For more info, read the ``mdadm`` manpage.
'''
opts = []
for key in kwargs:
if not key.startswith('__'):
opts.append('--{0}'.format(key))
if kwargs[key] is not True:
opts.append(kwargs[key])
# Devices may have been written with a blob:
if type(devices) is str:
devices = devices.split(',')
cmd = ['mdadm', '-A', name, '-v', opts] + devices
if test_mode is True:
return cmd
elif test_mode is False:
return __salt__['cmd.run'](cmd, python_shell=False)
| 25.236923
| 123
| 0.560351
|
838c18bfe76cc35f5a07d12d31699dd724c16c5f
| 401
|
py
|
Python
|
boutique_ado/wsgi.py
|
hartnetl/boutique-ado-walkthrough
|
ef04eb4dae443d2df1bce7fbef9de8f628b58a0b
|
[
"MTLL"
] | null | null | null |
boutique_ado/wsgi.py
|
hartnetl/boutique-ado-walkthrough
|
ef04eb4dae443d2df1bce7fbef9de8f628b58a0b
|
[
"MTLL"
] | null | null | null |
boutique_ado/wsgi.py
|
hartnetl/boutique-ado-walkthrough
|
ef04eb4dae443d2df1bce7fbef9de8f628b58a0b
|
[
"MTLL"
] | null | null | null |
"""
WSGI config for boutique_ado project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'boutique_ado.settings')
application = get_wsgi_application()
| 23.588235
| 78
| 0.790524
|
ee21bc6e6fd2da8342cd778b8e5411bafe8c05dd
| 32,857
|
py
|
Python
|
scripts/cloud-check-control/cloudAtlasCheckControl.py
|
Amon-Cyn/atlas-checks
|
ffe3d213a866b3fa5bba9d0950378d3aa818f646
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/cloud-check-control/cloudAtlasCheckControl.py
|
Amon-Cyn/atlas-checks
|
ffe3d213a866b3fa5bba9d0950378d3aa818f646
|
[
"BSD-3-Clause"
] | 9
|
2018-09-05T16:06:10.000Z
|
2019-07-29T22:49:38.000Z
|
scripts/cloud-check-control/cloudAtlasCheckControl.py
|
Amon-Cyn/atlas-checks
|
ffe3d213a866b3fa5bba9d0950378d3aa818f646
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
execute atlas-checks on an EC2 instance
"""
import argparse
import logging
import os
import sys
import time
import boto3
import paramiko
import scp
from botocore.exceptions import ClientError
from paramiko.auth_handler import AuthenticationException
VERSION = "1.0.0"
AWS_REGION = 'us-west-1'
def setup_logging(default_level=logging.INFO):
"""
Setup logging configuration
"""
logging.basicConfig(
format="%(asctime)s %(levelname)-8s %(message)s",
level=default_level,
datefmt="%Y-%m-%d %H:%M:%S",
)
return logging.getLogger("CloudAtlasChecksControl")
def finish(error_message=None, status=0):
"""exit the process
Method to exit the Python script. It will log the given message and then exit().
:param error_message: Error message to log upon exiting the process
:param status: return code to exit the process with
"""
if error_message:
logger.error(error_message)
else:
logger.info("Done")
exit(status)
class CloudAtlasChecksControl:
"""Main Class to control atlas checks spark job on EC2"""
def __init__(
self,
timeoutMinutes=6000,
key=None,
instanceId="",
processes=32,
memory=256,
formats="flags",
countries="",
s3InFolder=None,
s3fsMount=False,
s3OutFolder=None,
terminate=False,
templateName="atlas_checks-ec2-template",
atlasConfig="https://raw.githubusercontent.com/osmlab/atlas-checks/dev/config/configuration.json",
checks="",
mrkey="",
mrProject="",
mrURL="https://maproulette.org:443",
jar="atlas-checks/build/libs/atlas-checks-*-SNAPSHOT-shadow.jar",
awsRegion=AWS_REGION,
):
self.timeoutMinutes = timeoutMinutes
self.key = key
self.instanceId = instanceId
self.s3InFolder = s3InFolder
self.s3fsMount = s3fsMount
self.processes = processes
self.memory = memory
self.formats = formats
self.countries = countries
self.s3OutFolder = s3OutFolder
self.terminate = terminate
self.templateName = templateName
self.homeDir = "/home/ubuntu/"
self.atlasCheckDir = os.path.join(self.homeDir, "atlas-checks/")
self.atlasOutDir = os.path.join(self.homeDir, "output/")
self.atlasInDir = os.path.join(self.homeDir, "input/")
self.atlasLogDir = os.path.join(self.homeDir, "log/")
self.atlasCheckLogName = "atlasCheck.log"
self.atlasCheckLog = os.path.join(self.atlasLogDir, self.atlasCheckLogName)
self.atlasCheckMRPushLogName = "mrPush.log"
self.atlasCheckMRPushLog = os.path.join(
self.atlasLogDir, self.atlasCheckMRPushLogName
)
self.atlasConfig = atlasConfig
self.checks = checks
self.mrkey = mrkey
self.mrProject = mrProject
self.mrURL = mrURL
self.jar = jar
self.instanceName = "AtlasChecks"
self.localJar = '/tmp/atlas-checks.jar'
self.localConfig = '/tmp/configuration.json'
self.sshClient = None
self.scpClient = None
self.ec2 = boto3.client(
'ec2',
region_name = awsRegion,
)
self.ssmClient = boto3.client(
'ssm',
region_name = awsRegion,
)
def setup_config(self, file_preface="file://"):
if self.atlasConfig.find("s3:") >= 0:
if self.ssh_cmd(
"aws s3 cp {} {}".format(self.atlasConfig, self.localConfig)
):
finish("Failed to copy config S3://{}".format(self.atlasConfig), -1)
return file_preface + self.localConfig
elif self.atlasConfig.find("http:") >= 0:
return self.atlasConfig
else:
# if configuration.json is a file then copy it to the EC2 instance
self.put_files(self.atlasConfig, self.localConfig)
return file_preface + self.localConfig
def setup_jar(self):
if self.jar.find("s3:") >= 0:
if self.ssh_cmd(
"aws s3 cp {} {}".format(self.jar, self.localJar)
):
finish("Failed to copy jar S3://{}".format(self.jar), -1)
return self.localJar
else:
# if configuration.json is a file then copy it to the EC2 instance
self.put_files(self.jar, self.localJar)
return self.localJar
def atlasCheck(self):
"""Submit an spark job to perform atlas checks on an EC2 instance.
If the CloudAtlasChecksControl includes an instance ID then atlas checks will
be executed on that instance. If no instance ID is defined then it will
create a new instance.
Dependencies:
- self.instanceId - indicates a running instance or "" to create one
- self.S3Atlas - indicates the S3 bucket and path that contains atlas files
"""
if self.instanceId == "":
self.create_instance()
self.get_instance_info()
if not self.is_process_running("SparkSubmit"):
cmd = "mkdir -p {} {}".format(self.atlasLogDir, self.atlasOutDir)
if self.ssh_cmd(cmd):
finish("Unable to create directory {}".format(cmd), -1)
# remove the success or failure files from any last run.
if self.ssh_cmd("rm -f {}/_*".format(self.atlasOutDir)):
finish("Unable to clean up old status files", -1)
# sync the country folders to the local directory
for c in list(self.countries.split(",")):
logger.info("syncing {}".format(c))
if self.ssh_cmd(
"aws s3 sync --only-show-errors {0}{1} {2}{1}".format(
self.s3InFolder, c, self.atlasInDir
)
):
finish(
"Failed to sync {}/{}".format(self.s3InFolder, c), -1
)
if self.ssh_cmd(
"aws s3 cp {}sharding.txt {}sharding.txt".format(
self.s3InFolder, self.atlasInDir
)
):
finish("Failed to copy sharding.txt", -1)
if self.info is not None:
cmd = ("echo '{{\n{},\n\"cmd\":\"{}\"\n}}' > {}INFO "
.format(self.info, " ".join(sys.argv), self.atlasOutDir))
else:
cmd = ("echo '{{\n\"cmd\":\"{}\"\n}}' > {}INFO "
.format(" ".join(sys.argv), self.atlasOutDir))
if self.ssh_cmd(cmd):
finish("Unable to write info file", -1)
atlasConfig = self.setup_config()
jarFile = self.setup_jar()
cmd = (
"/opt/spark/bin/spark-submit"
+ " --class=org.openstreetmap.atlas.checks.distributed.ShardedIntegrityChecksSparkJob"
+ " --master=local[{}]".format(self.processes)
+ " --conf='spark.driver.memory={}g'".format(self.memory)
+ " --conf='spark.rdd.compress=true'"
+ " {}".format(jarFile)
+ " -maxPoolMinutes=2880"
+ " -input='{}'".format(self.atlasInDir)
+ " -output='{}'".format(self.atlasOutDir)
+ " -outputFormats='{}'".format(self.formats)
+ " -countries='{}'".format(self.countries)
+ " -configFiles='{}'".format(atlasConfig)
+ " > {} 2>&1 &".format(self.atlasCheckLog)
)
logger.info("Submitting spark job: {}".format(cmd))
if self.ssh_cmd(cmd):
finish("Unable to execute spark job", -1)
# make sure spark job has started before checking for completion
time.sleep(5)
else:
logger.info("Detected a running atlas check spark job.")
logger.info("About to wait for remote script to complete. If "
"disconnected before the script completes then execute the "
"following command to continue waiting:\n {} --id={}"
.format(" ".join(sys.argv), self.instanceId))
# wait for script to complete
if self.wait_for_process_to_complete():
finish(
"Timeout waiting for script to complete. TODO - instructions to reconnect.",
-1,
)
self.sync()
def sync(self):
"""Sync an existing instance containing already generated atlas output with s3
Dependencies:
- self.instanceId - indicates a running instance or "" to create one
- self.s3OutFolder - the S3 bucket and folder path to push the output
- self.terminate - indicates if the EC2 instance should be terminated
"""
if self.s3OutFolder is None:
logger.warning(
"No S3 output folder specified, skipping s3 sync. Use -o 's3folder/path' to sync to s3"
)
return
logger.info(
"Syncing EC2 instance atlas-checks output with S3 bucket {}.".format(
self.s3OutFolder
)
)
# push output to s3
cmd = "aws s3 sync --only-show-errors --exclude *.crc {} {} ".format(
self.atlasOutDir, self.s3OutFolder
)
if self.ssh_cmd(cmd):
finish("Unable to sync with S3", -1)
# terminate instance
if self.terminate:
self.terminate_instance()
def challenge(self):
if self.instanceId == "":
self.create_instance()
self.get_instance_info()
logger.info("Creating map roulette challenge.")
# sync the country folders to the local directory
for c in list(self.countries.split(",")):
logger.info(
"syncing {}/flag/{} to {}flag/{}".format(
self.s3InFolder, c, self.atlasOutDir, c
)
)
if self.ssh_cmd(
"aws s3 sync --only-show-errors {0}/flag/{1} {2}flag/{1}".format(
self.s3InFolder, c, self.atlasOutDir
)
):
finish("Failed to sync {}/{}".format(self.s3InFolder, c), -1)
atlasConfig = self.setup_config(file_preface="")
jarFile = self.setup_jar()
cmd = (
"java -cp {}".format(jarFile)
+ " org.openstreetmap.atlas.checks.maproulette.MapRouletteUploadCommand"
+ " -maproulette='{}:{}:{}'".format(self.mrURL, self.mrProject, self.mrkey)
+ " -logfiles='{}flag'".format(self.atlasOutDir)
+ " -outputPath='{}'".format(self.atlasOutDir)
+ " -config='{}'".format(atlasConfig)
+ " -checkinComment='#AtlasChecks'"
+ " -countries='{}'".format(self.countries)
+ " -checks='{}'".format(self.checks)
+ " -includeFixSuggestions=true"
+ " > {} 2>&1".format(self.atlasCheckLog)
)
logger.info("Starting mr upload: {}".format(cmd))
if self.ssh_cmd(cmd, verbose=True):
finish("Unable to execute spark job", -1)
def clean(self):
"""Clean a running Instance of all produced folders and files
This readies the instance for a clean atlas check run or terminates an EC2
instance completely.
Dependencies:
- self.instanceId - indicates a running instance or "" to create one
- self.terminate - indicates if the EC2 instance should be terminated
"""
if self.terminate:
logger.info("Terminating EC2 instance.")
self.terminate_instance()
else:
logger.info("Cleaning up EC2 instance.")
cmd = "rm -rf {}/* {}/* ".format(self.atlasOutDir, self.atlasLogDir)
if self.ssh_cmd(cmd):
finish("Unable to clean", -1)
def create_instance(self):
"""Create Instance from atlas_checks-ec2-template template
Dependencies:
- self.templateId
- self.instanceName
:return:
"""
logger.info("Creating EC2 instance from {} template.".format(self.templateName))
try:
logger.info("Create instance...")
response = self.ec2.run_instances(
LaunchTemplate={"LaunchTemplateName": self.templateName},
TagSpecifications=[
{
"ResourceType": "instance",
"Tags": [{"Key": "Name", "Value": self.instanceName}],
}
],
MaxCount=1,
MinCount=1,
KeyName=self.key,
)
self.instanceId = response["Instances"][0]["InstanceId"]
logger.info("Instance {} was created".format(self.instanceId))
except ClientError as e:
finish(e, -1)
def terminate_instance(self):
"""Terminate Instance
Dependencies:
- self.templateId
"""
logger.info("Terminating EC2 instance {}".format(self.instanceId))
try:
response = self.ec2.terminate_instances(InstanceIds=[self.instanceId])
logger.info("Instance {} was terminated".format(self.instanceId))
except ClientError as e:
finish(e, -1)
def ssh_connect(self):
"""Connect to an EC2 instance"""
for _timeout in range(16):
try:
keyFile = "{}/.ssh/{}.pem".format(os.environ.get("HOME"), self.key)
key = paramiko.RSAKey.from_private_key_file(keyFile)
self.sshClient = paramiko.SSHClient()
self.sshClient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
logger.debug(
"Connecting to {} ... ".format(self.instance["PublicDnsName"])
)
self.sshClient.connect(
self.instance["PublicDnsName"], username="ubuntu", pkey=key
)
logger.info(
"Connected to {} ... ".format(self.instance["PublicDnsName"])
)
self.scpClient = scp.SCPClient(self.sshClient.get_transport())
break
except AuthenticationException as error:
logger.error(
"Authentication failed: did you remember to create an SSH key? {error}"
)
raise error
except paramiko.ssh_exception.NoValidConnectionsError:
time.sleep(15)
continue
def put_files(self, localFiles, remoteDirectory):
"""Put files from local system onto running EC2 instance"""
if self.scpClient is None:
self.ssh_connect()
try:
self.scpClient.put(localFiles, remoteDirectory)
except scp.IOException as error:
logger.error("Unable to copy files. {error}")
raise error
logger.debug("Files: " + localFiles + " uploaded to: " + remoteDirectory)
def get_files(self, remoteFiles, localDirectory):
"""Get files from running ec2 instance to local system"""
if self.scpClient is None:
self.ssh_connect()
try:
self.scpClient.get(remoteFiles, localDirectory)
except scp.SCPException as error:
logger.error("Unable to copy files. {error}")
raise error
logger.debug("Files: " + remoteFiles + " downloaded to: " + localDirectory)
def ssh_cmd(self, cmd, quiet=False, verbose=False):
"""Issue an ssh command on the remote EC2 instance
:param cmd: the command string to execute on the remote system
:param quiet: If true, don't display errors on failures
:returns: Returns the status of the completed ssh command.
"""
if self.key is not None:
if self.sshClient is None:
self.ssh_connect()
logger.debug("Issuing remote command: {} ... ".format(cmd))
ssh_stdin, ssh_stdout, ssh_stderr = self.sshClient.exec_command(cmd)
if ssh_stdout.channel.recv_exit_status() and not quiet:
logger.error(" Remote command stderr:")
logger.error("\t".join(map(str, ssh_stderr.readlines())))
if verbose:
logger.info(" Remote command stdout:")
logger.info("\t".join(map(str, ssh_stdout.readlines())))
return ssh_stdout.channel.recv_exit_status()
# if key was not specified then try to use ssm
logger.debug("Issuing remote command: {} ... ".format(cmd))
while True:
try:
response = self.ssmClient.send_command(
InstanceIds=[self.instanceId],
DocumentName='AWS-RunShellScript',
Parameters={'commands': [cmd]}
)
break
except ClientError as e:
logger.debug(f'{e}')
time.sleep(5)
time.sleep(1)
command_id = response['Command']['CommandId']
for _timeout in range(self.timeoutMinutes * 60):
feedback = self.ssmClient.get_command_invocation(CommandId=command_id, InstanceId=self.instanceId)
if feedback['StatusDetails'] != 'InProgress':
break
time.sleep(1)
if feedback['StatusDetails'] != 'Success':
if not quiet:
logger.error("feedback: " + feedback['StatusDetails'])
logger.error(" Remote command stderr:")
logger.error(feedback['StandardErrorContent'])
return -1
if verbose:
logger.info(" Remote command stdout:")
logger.info(feedback['StandardOutputContent'])
return 0
def wait_for_process_to_complete(self):
"""Wait for process to complete
Will block execution while waiting for the completion of the spark
submit on the EC2 instance. Upon completion of the script it will look
at the log file produced to see if it completed successfully. If the
Atlas Checks spark job failed then this function will exit.
:returns: 0 - if Atlas check spark job completed successfully
:returns: 1 - if Atlas check spark job timed out
"""
logger.info("Waiting for Spark Submit process to complete...")
# wait for up to TIMEOUT minutes for the VM to be up and ready
for _timeout in range(self.timeoutMinutes):
if not self.is_process_running("SparkSubmit"):
logger.info("Atlas Check spark job has completed.")
if self.ssh_cmd(
"grep 'Success!' {}/_*".format(self.atlasOutDir), quiet=True
):
logger.error("Atlas Check spark job Failed.")
logger.error(
"---tail of Atlas Checks Spark job log output ({})--- \n".format(
self.atlasCheckLogName
+ " ".join(
map(
str,
open(self.atlasCheckLogName, "r").readlines()[-50:],
)
)
)
)
finish(status=-1)
return 0
time.sleep(60)
return 1
def is_process_running(self, process):
"""Indicate if process is actively running
Uses pgrep on the EC2 instance to detect if the process is
actively running.
:returns: 0 - if process is NOT running
:returns: 1 - if process is running
"""
if self.ssh_cmd("pgrep -P1 -f {}".format(process), quiet=True):
return 0
logger.debug("{} is still running ... ".format(process))
return 1
def start_ec2(self):
"""Start EC2 Instance."""
logger.info("Starting the EC2 instance.")
try:
logger.info("Start instance")
response = self.ec2.start_instances(InstanceIds=[self.instanceId])
logger.debug(response)
except ClientError as e:
logger.error(e)
def stop_ec2(self):
"""Stop EC2 Instance."""
logger.info("Stopping the EC2 instance.")
try:
response = self.ec2.stop_instances(InstanceIds=[self.instanceId])
logger.debug(response)
except ClientError as e:
logger.error(e)
def get_instance_info(self):
"""Get the info for an EC2 instance.
Given an EC2 instance ID this function will retrieve the instance info
for the instance and save it in self.instance.
"""
logger.info("Getting EC2 Instance {} Info...".format(self.instanceId))
# wait for up to TIMEOUT seconds for the VM to be up and ready
for _timeout in range(10):
response = self.ec2.describe_instances(InstanceIds=[self.instanceId])
if not response["Reservations"]:
finish("Instance {} not found".format(self.instanceId), -1)
if (
response["Reservations"][0]["Instances"][0].get("PublicIpAddress")
is None
):
logger.info(
"Waiting for EC2 instance {} to boot...".format(self.instanceId)
)
time.sleep(6)
continue
self.instance = response["Reservations"][0]["Instances"][0]
logger.info(
"EC2 instance: {} booted with name: {}".format(
self.instanceId, self.instance["PublicDnsName"]
)
)
break
for _timeout in range(100):
if self.ssh_cmd("systemctl is-system-running", quiet=True):
logger.debug(
"Waiting for systemd on EC2 instance to complete initialization..."
)
time.sleep(6)
continue
return
finish("Timeout while waiting for EC2 instance to be ready", -1)
def parse_args():
"""Parse user parameters
:returns: args
"""
parser = argparse.ArgumentParser(
description="This script automates the use of EC2 instance to execute "
"an atlas-checks spark job. It is meant to be executed on a laptop with "
"access to the EC2 instance"
)
parser.add_argument(
'--zone',
default=AWS_REGION,
type=str,
help=f"The AWS region to use. e.g. {AWS_REGION}",
)
parser.add_argument(
"--name",
help="Set EC2 instance name.",
)
parser.add_argument(
"--template",
help="Set EC2 template name to create instance from.",
)
parser.add_argument(
"--minutes",
type=int,
help="Set process timeout to number of minutes.",
)
parser.add_argument(
"--version", help="Display the current version", action="store_true"
)
parser.add_argument(
"--terminate",
default=False,
help="Terminate EC2 instance after successful execution",
action="store_true",
)
subparsers = parser.add_subparsers(
title="commands",
description="One of the following commands must be specified when executed. "
"To see more information about each command and the parameters that "
"are used for each command then specify the command and "
"the --help parameter.",
)
parser_check = subparsers.add_parser(
"check",
help="Execute Atlas Checks and, if '--output' is set, then push atlas check results to S3 folder",
)
parser_check.add_argument(
"--id", help="ID - Indicates the ID of an existing EC2 instance to use"
)
parser_check.add_argument(
"--key",
required=True,
help="KEY - Instance key name to use to login to instance. This key "
"is expected to be the same name as the key as defined by AWS and the "
"corresponding pem file must be located in your local '~/.ssh/' "
"directory and should be a pem file. See the following URL for "
"instructions on creating a key: "
"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html. "
"(e.g. `--key=aws-key`)",
)
parser_check.add_argument(
"--output",
help="Out - The S3 Output directory. (e.g. '--out=s3://atlas-bucket/atlas-checks/output')",
)
parser_check.add_argument(
"--mount",
default=False,
help="Flag to indicate if s3fs should be used to mount input directory. (Default: False)",
)
parser_check.add_argument(
"--input",
required=True,
help="IN - The S3 Input directory that contains atlas file directories and sharding.txt. "
"(e.g. s3://bucket/path/to/atlas/file/dir/)",
)
parser_check.add_argument(
"--countries",
required=True,
help="COUNTRIES - A comma separated list of ISO3 codes. (e.g. --countries=GBR)",
)
parser_check.add_argument(
"--memory",
type=int,
help="MEMORY - Gigs of memory for spark job.",
)
parser_check.add_argument(
"--formats",
help="FORMATS - Output format",
)
parser_check.add_argument(
"--config",
required=True,
help="CONFIG - s3://path/to/configuration.json, http://path/to/configuration.json, "
" or /local/path/to/configuration.json to use as configuration.json for atlas-checks ",
)
parser_check.add_argument(
"--processes",
type=int,
help="PROCESSES - Number of parallel jobs to start.",
)
parser_check.add_argument(
"--jar",
required=True,
help="JAR - s3://path/to/atlas_checks.jar or /local/path/to/atlas_checks.jar to execute",
)
parser_check.add_argument(
"--info",
help="INFO - Json string to add to the 'INFO' file in the output folder "
"(e.g. --tag='{\"version\":\"1.6.3\"}')",
)
parser_check.set_defaults(func=CloudAtlasChecksControl.atlasCheck)
parser_sync = subparsers.add_parser(
"sync", help="Sync Atlas Check output files from instance to S3 folder"
)
parser_sync.add_argument(
"--id",
required=True,
help="ID - Indicates the ID of an existing EC2 instance to use",
)
parser_sync.add_argument(
"--key",
required=True,
help="KEY - Instance key name to use to login to instance. This key "
"is expected to be the same name as the key as defined by AWS and the "
"corresponding pem file must be located in your local '~/.ssh/' "
"directory and should be a pem file. See the following URL for "
"instructions on creating a key: "
"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html. "
"(e.g. `--key=aws-key`)",
)
parser_sync.add_argument(
"--output", required=True, help="Out - The S3 Output directory"
)
parser_sync.set_defaults(func=CloudAtlasChecksControl.sync)
parser_mr = subparsers.add_parser(
"challenge", help="Create a Map Roulette Challenge"
)
parser_mr.add_argument(
"--id",
help="ID - Indicates the ID of an existing EC2 instance to use",
)
parser_mr.add_argument(
"--project",
required=True,
help="PROJECT - Indicates the name to use to create the map roulette project",
)
parser_mr.add_argument(
"--mrkey",
required=True,
help="MRKEY - The api|key to use to connect to Map Roulette",
)
parser_mr.add_argument(
"--countries",
required=True,
help="COUNTRIES - A comma separated list of ISO3 codes. (e.g. --countries=GBR)",
)
parser_mr.add_argument(
"--checks",
required=True,
help="CHECKS - A comma separated list of checks names to include in project. "
"(e.g. --checks='EdgeCrossingEdgeCheck,SinkIslandCheck')",
)
parser_mr.add_argument(
"--key",
required=True,
help="KEY - Instance key name to use to login to instance. This key "
"is expected to be the same name as the key as defined by AWS and the "
"corresponding pem file must be located in your local '~/.ssh/' "
"directory and should be a pem file. See the following URL for "
"instructions on creating a key: "
"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html. "
"(e.g. `--key=aws-key`)",
)
parser_mr.add_argument(
"--config",
required=True,
help="CONFIG - Path within the S2 Input bucket or a URL to a json file to "
"use as configuration.json for atlas-checks (Default: Latest from atlas-config repo)",
)
parser_mr.add_argument(
"--input",
required=True,
help="INPUT - The s3 Atlas Files Output directory to use as input for challenge. "
"(e.g. '--input=s3://atlas-bucket/atlas-checks/output')",
)
parser_mr.add_argument(
"--jar",
help="JAR - The full path to the jar file to execute.",
)
parser_mr.set_defaults(func=CloudAtlasChecksControl.challenge)
parser_clean = subparsers.add_parser("clean", help="Clean up instance")
parser_clean.add_argument(
"--id",
required=True,
help="ID - Indicates the ID of an existing EC2 instance to use",
)
parser_clean.add_argument(
"--key",
required=True,
help="KEY - Instance key name to use to login to instance. This key "
"is expected to be the same name as the key as defined by AWS and the "
"corresponding pem file must be located in your local '~/.ssh/' "
"directory and should be a pem file. See the following URL for "
"instructions on creating a key: "
"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html. "
"(e.g. `--key=aws-key`)",
)
parser_clean.set_defaults(func=CloudAtlasChecksControl.clean)
args = parser.parse_args()
return args
def evaluate(args, cloudctl):
"""Evaluate the given arguments.
:param args: The user's input.
:param cloudctl: An instance of CloudAtlasChecksControl to use.
"""
if args.version is True:
logger.critical("This is version {0}.".format(VERSION))
finish()
if args.name is not None:
cloudctl.instanceName = args.name
if args.template is not None:
cloudctl.templateName = args.templateName
if args.minutes is not None:
cloudctl.timeoutMinutes = args.minutes
if hasattr(args, "input") and args.input is not None:
cloudctl.s3InFolder = args.input
if hasattr(args, "mount") and args.mount is not None:
cloudctl.s3fsMount = args.mount
if hasattr(args, "processes") and args.processes is not None:
cloudctl.processes = args.processes
if hasattr(args, "key") and args.key is not None:
cloudctl.key = args.key
if hasattr(args, "output") and args.output is not None:
cloudctl.s3OutFolder = args.output
if hasattr(args, "countries") and args.countries is not None:
cloudctl.countries = args.countries
if hasattr(args, "formats") and args.formats is not None:
cloudctl.formats = args.formats
if hasattr(args, "memory") and args.memory is not None:
cloudctl.memory = args.memory
if hasattr(args, "config") and args.config is not None:
cloudctl.atlasConfig = args.config
if hasattr(args, "checks") and args.checks is not None:
cloudctl.checks = args.checks
if hasattr(args, "mrkey") and args.mrkey is not None:
cloudctl.mrkey = args.mrkey
if hasattr(args, "project") and args.project is not None:
cloudctl.mrProject = args.project
if hasattr(args, "jar") and args.jar is not None:
cloudctl.jar = args.jar
if hasattr(args, "info") and args.jar is not None:
cloudctl.info = args.info
if hasattr(args, "id") and args.id is not None:
cloudctl.instanceId = args.id
cloudctl.get_instance_info()
if hasattr(args, "func") and args.func is not None:
args.func(cloudctl)
else:
finish("A command must be specified. Try '-h' for help.")
logger = setup_logging()
if __name__ == "__main__":
args = parse_args()
cloudctl = CloudAtlasChecksControl(
terminate=args.terminate,
awsRegion=args.zone,
)
evaluate(args, cloudctl)
finish()
| 38.028935
| 110
| 0.575829
|
d68dd7c0448dc9a77e633b6c109fa88cc2883068
| 9,211
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/appservice/_validators.py
|
nexxai/azure-cli
|
3f24ada49f3323d9310d46ccc1025dc99fc4cf8e
|
[
"MIT"
] | 2
|
2020-08-08T11:00:25.000Z
|
2020-08-08T11:00:30.000Z
|
src/azure-cli/azure/cli/command_modules/appservice/_validators.py
|
nexxai/azure-cli
|
3f24ada49f3323d9310d46ccc1025dc99fc4cf8e
|
[
"MIT"
] | 1
|
2021-06-02T02:47:28.000Z
|
2021-06-02T02:47:29.000Z
|
src/azure-cli/azure/cli/command_modules/appservice/_validators.py
|
nexxai/azure-cli
|
3f24ada49f3323d9310d46ccc1025dc99fc4cf8e
|
[
"MIT"
] | 1
|
2020-09-07T18:44:14.000Z
|
2020-09-07T18:44:14.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from ._client_factory import web_client_factory
from .utils import _normalize_sku
def validate_timeout_value(namespace):
"""Validates that zip deployment timeout is set to a reasonable min value"""
if isinstance(namespace.timeout, int):
if namespace.timeout <= 29:
raise CLIError('--timeout value should be a positive value in seconds and should be at least 30')
def validate_site_create(cmd, namespace):
"""Validate the SiteName that is being used to create is available
This API requires that the RG is already created"""
client = web_client_factory(cmd.cli_ctx)
if isinstance(namespace.name, str) and isinstance(namespace.resource_group_name, str) \
and isinstance(namespace.plan, str):
resource_group_name = namespace.resource_group_name
plan = namespace.plan
if is_valid_resource_id(plan):
parsed_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parsed_result['resource_group'], parsed_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}'".format(plan, resource_group_name))
# verify that the name is available for create
validation_payload = {
"name": namespace.name,
"type": "Microsoft.Web/sites",
"location": plan_info.location,
"properties": {
"serverfarmId": plan_info.id
}
}
validation = client.validate(resource_group_name, validation_payload)
if validation.status.lower() == "failure" and validation.error.code != 'SiteAlreadyExists':
raise CLIError(validation.error.message)
def validate_ase_create(cmd, namespace):
# Validate the ASE Name availability
client = web_client_factory(cmd.cli_ctx)
resource_type = 'Microsoft.Web/hostingEnvironments'
if isinstance(namespace.name, str):
name_validation = client.check_name_availability(namespace.name, resource_type)
if not name_validation.name_available:
raise CLIError(name_validation.message)
def validate_asp_create(cmd, namespace):
"""Validate the SiteName that is being used to create is available
This API requires that the RG is already created"""
client = web_client_factory(cmd.cli_ctx)
if isinstance(namespace.name, str) and isinstance(namespace.resource_group_name, str):
resource_group_name = namespace.resource_group_name
if isinstance(namespace.location, str):
location = namespace.location
else:
from azure.cli.core.profiles import ResourceType
rg_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = rg_client.resource_groups.get(resource_group_name)
location = group.location
validation_payload = {
"name": namespace.name,
"type": "Microsoft.Web/serverfarms",
"location": location,
"properties": {
"skuName": _normalize_sku(namespace.sku) or 'B1',
"capacity": namespace.number_of_workers or 1,
"needLinuxWorkers": namespace.is_linux,
"isXenon": namespace.hyper_v
}
}
validation = client.validate(resource_group_name, validation_payload)
if validation.status.lower() == "failure" and validation.error.code != 'ServerFarmAlreadyExists':
raise CLIError(validation.error.message)
def validate_app_or_slot_exists_in_rg(cmd, namespace):
"""Validate that the App/slot exists in the RG provided"""
client = web_client_factory(cmd.cli_ctx)
webapp = namespace.name
resource_group_name = namespace.resource_group_name
if isinstance(namespace.slot, str):
app = client.web_apps.get_slot(resource_group_name, webapp, namespace.slot, raw=True)
else:
app = client.web_apps.get(resource_group_name, webapp, None, raw=True)
if app.response.status_code != 200:
raise CLIError(app.response.text)
def validate_app_exists_in_rg(cmd, namespace):
client = web_client_factory(cmd.cli_ctx)
webapp = namespace.name
resource_group_name = namespace.resource_group_name
app = client.web_apps.get(resource_group_name, webapp, None, raw=True)
if app.response.status_code != 200:
raise CLIError(app.response.text)
def validate_add_vnet(cmd, namespace):
resource_group_name = namespace.resource_group_name
from azure.cli.command_modules.network._client_factory import network_client_factory
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnet = namespace.vnet
name = namespace.name
slot = namespace.slot
vnet_loc = ''
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_loc = v.location
break
from ._appservice_utils import _generic_site_operation
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
# converting geo region to geo location
webapp_loc = webapp.location.lower().replace(" ", "")
if vnet_loc != webapp_loc:
raise CLIError("The app and the vnet resources are in different locations. \
Cannot integrate a regional VNET to an app in a different region")
def validate_front_end_scale_factor(namespace):
if namespace.front_end_scale_factor:
min_scale_factor = 5
max_scale_factor = 15
scale_error_text = "Frontend Scale Factor '{}' is invalid. Must be between {} and {}"
scale_factor = namespace.front_end_scale_factor
if scale_factor < min_scale_factor or scale_factor > max_scale_factor:
raise CLIError(scale_error_text.format(scale_factor, min_scale_factor, max_scale_factor))
def validate_asp_sku(cmd, namespace):
import json
client = web_client_factory(cmd.cli_ctx)
serverfarm = namespace.name
resource_group_name = namespace.resource_group_name
asp = client.app_service_plans.get(resource_group_name, serverfarm, None, raw=True)
if asp.response.status_code != 200:
raise CLIError(asp.response.text)
# convert byte array to json
output_str = asp.response.content.decode('utf8')
res = json.loads(output_str)
# Isolated SKU is supported only for ASE
if namespace.sku in ['I1', 'I2', 'I3']:
if res.get('properties').get('hostingEnvironment') is None:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if res.get('properties').get('hostingEnvironment') is not None:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def validate_ip_address(cmd, namespace):
if namespace.ip_address is not None:
_validate_ip_address_format(namespace)
_validate_ip_address_existance(cmd, namespace)
def _validate_ip_address_format(namespace):
if namespace.ip_address is not None:
# IPv6
if ':' in namespace.ip_address:
if namespace.ip_address.count(':') > 1:
if '/' not in namespace.ip_address:
namespace.ip_address = namespace.ip_address + '/128'
return
return
# IPv4
elif '.' in namespace.ip_address:
if namespace.ip_address.count('.') == 3:
if '/' not in namespace.ip_address:
namespace.ip_address = namespace.ip_address + '/32'
return
return
raise CLIError('Invalid IP address')
def _validate_ip_address_existance(cmd, namespace):
resource_group_name = namespace.resource_group_name
name = namespace.name
slot = namespace.slot
from ._appservice_utils import _generic_site_operation
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
access_rules = configs.ip_security_restrictions
is_exists = [(lambda x: x.ip_address == namespace.ip_address)(x) for x in access_rules]
if True in is_exists:
raise CLIError('IP address ' + namespace.ip_address + ' already exists.'
'Cannot add duplicate IP address values.')
| 44.713592
| 118
| 0.674194
|
fd96619a22323a6b64a70520c8c12ee482ceb87c
| 4,396
|
py
|
Python
|
tensortrade/oms/orders/broker.py
|
alexis-rodriguez/tensortrade
|
b9008c9becac3b46eb91796b4950d2fe5aa97cdf
|
[
"Apache-2.0"
] | null | null | null |
tensortrade/oms/orders/broker.py
|
alexis-rodriguez/tensortrade
|
b9008c9becac3b46eb91796b4950d2fe5aa97cdf
|
[
"Apache-2.0"
] | null | null | null |
tensortrade/oms/orders/broker.py
|
alexis-rodriguez/tensortrade
|
b9008c9becac3b46eb91796b4950d2fe5aa97cdf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorTrade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from typing import List, Dict
from collections import OrderedDict
from tensortrade.core.base import TimeIndexed
from tensortrade.oms.orders.order import Order, OrderStatus
from tensortrade.oms.orders.order_listener import OrderListener
class Broker(OrderListener, TimeIndexed):
"""A broker for handling the execution of orders on multiple exchanges.
Orders are kept in a virtual order book until they are ready to be executed.
Attributes
----------
unexecuted : `List[Order]`
The list of orders the broker is waiting to execute, when their
criteria is satisfied.
executed : `Dict[str, Order]`
The dictionary of orders the broker has executed since resetting,
organized by order id.
trades : `Dict[str, Trade]`
The dictionary of trades the broker has executed since resetting,
organized by order id.
"""
def __init__(self):
self.unexecuted = []
self.executed = {}
self.trades = OrderedDict()
def submit(self, order: "Order") -> None:
"""Submits an order to the broker.
Adds `order` to the queue of orders waiting to be executed.
Parameters
----------
order : `Order`
The order to be submitted.
"""
self.unexecuted += [order]
def cancel(self, order: "Order") -> None:
"""Cancels an order.
Parameters
----------
order : `Order`
The order to be canceled.
"""
if order.status == OrderStatus.CANCELLED:
raise Warning(f"Order {order.id} has already been cancelled.")
if order in self.unexecuted:
self.unexecuted.remove(order)
order.cancel()
def update(self) -> None:
"""Updates the brokers order management system.
The broker will look through the unexecuted orders and if an order
is ready to be executed the broker will submit it to the executed
list and execute the order.
Then the broker will find any orders that are active, but expired, and
proceed to cancel them.
"""
executed_ids = []
for order in self.unexecuted:
if order.is_executable:
executed_ids.append(order.id)
self.executed[order.id] = order
order.attach(self)
order.execute()
for order_id in executed_ids:
self.unexecuted.remove(self.executed[order_id])
for order in remove:
self.unexecuted.remove(order)
for order in self.unexecuted + list(self.executed.values()):
if order.is_active and order.is_expired:
self.cancel(order)
def on_fill(self, order: "Order", trade: "Trade") -> None:
"""Updates the broker after an order has been filled.
Parameters
----------
order : `Order`
The order that is being filled.
trade : `Trade`
The trade that is being made to fill the order.
"""
if trade.order_id in self.executed and trade not in self.trades:
self.trades[trade.order_id] = self.trades.get(trade.order_id, [])
self.trades[trade.order_id] += [trade]
if order.is_complete:
next_order = order.complete()
if next_order:
if next_order.is_executable:
self.executed[next_order.id] = next_order
next_order.attach(self)
next_order.execute()
else:
self.submit(next_order)
def reset(self) -> None:
"""Resets the broker."""
self.unexecuted = []
self.executed = {}
self.trades = OrderedDict()
| 33.052632
| 80
| 0.608508
|
e3887f019edc48b64c8a799e0cde890e95a2a1b1
| 4,238
|
py
|
Python
|
files/default/hopsworks_auth/hopsworks_jwt_auth.py
|
tkakantousis/airflow-chef
|
18b869b5b1b5a9323a605403923ce6bf1a26ff7a
|
[
"Apache-2.0"
] | null | null | null |
files/default/hopsworks_auth/hopsworks_jwt_auth.py
|
tkakantousis/airflow-chef
|
18b869b5b1b5a9323a605403923ce6bf1a26ff7a
|
[
"Apache-2.0"
] | null | null | null |
files/default/hopsworks_auth/hopsworks_jwt_auth.py
|
tkakantousis/airflow-chef
|
18b869b5b1b5a9323a605403923ce6bf1a26ff7a
|
[
"Apache-2.0"
] | null | null | null |
import sys
import requests
import flask_login
from requests.auth import AuthBase
from flask_login import login_required, logout_user, current_user, UserMixin
from flask import flash, url_for, redirect, request, current_app
from airflow import models
from airflow import configuration
from airflow.configuration import AirflowConfigException
from airflow.utils.log.logging_mixin import LoggingMixin
PY3 = sys.version_info[0] == 3
if PY3:
from urllib import parse as urlparse
from jwt import JWT
jwt = JWT()
else:
import urlparse
import jwt
log = LoggingMixin().log
login_manager = flask_login.LoginManager()
login_manager.login_view = 'airflow.login'
login_manager.session_protection = "Strong"
JWT_SUBJECT_KEY = 'sub'
class AuthenticationError(Exception):
pass
class JWTUser(models.User):
def __init__(self, user):
self.user = user
log.debug("User is: {}".format(user))
if configuration.conf.getboolean("webserver", "filter_by_owner"):
superuser_username = configuration.conf.get("webserver", "superuser")
if user.username == superuser_username:
self.superuser = True
else:
self.superuser = False
else:
self.superuser = True
@property
def is_active(self):
"""Required by flask_login"""
return True
@property
def is_authenticated(self):
"""Required by flask_login"""
return True
@property
def is_anonymous(self):
"""Required by flask_login"""
return False
def data_profiling(self):
"""Provides access to data profiling tools"""
return self.superuser
def is_superuser(self):
"""Access all things"""
return self.superuser
def get_id(self):
return self.user.get_id()
@login_manager.user_loader
def load_user(user_id):
log.debug("Loading user with id: {0}".format(user_id))
user = models.User(id=user_id, username=user_id, is_superuser=False)
return JWTUser(user)
def authenticate(jwt):
hopsworks_host = configuration.conf.get("webserver", "hopsworks_host")
hopsworks_port = configuration.conf.get("webserver", "hopsworks_port")
if not hopsworks_port:
hopsworks_port = 443
url = "https://{host}:{port}/hopsworks-api/api/auth/jwt/session".format(
host = parse_host(hopsworks_host),
port = hopsworks_port)
auth = AuthorizationToken(jwt)
response = requests.get(url, auth=auth, verify=False)
response.raise_for_status()
if response.status_code != requests.codes.ok:
raise AuthenticationError()
def parse_host(host):
"""
Host should be just the hostname or ip address
Remove protocol or any endpoints from the host
"""
parsed_host = urlparse.urlparse(host).hostname
if parsed_host:
# Host contains protocol
return parsed_host
return host
def login(self, request):
if current_user.is_authenticated:
flash("You are already logged in")
return redirect(url_for('index'))
if 'Authorization' not in request.headers:
flash("Missing authorization header")
return redirect(url_for('airflow.noaccess'))
jwt_bearer = request.headers.get('Authorization')
try:
authenticate(jwt_bearer)
encoded_jwt = jwt_bearer.split(' ')[1].strip()
decoded_jwt = decode_jwt(encoded_jwt)
username = decoded_jwt[JWT_SUBJECT_KEY]
log.debug("Subject is: {}".format(username))
user = models.User(id=username, username=username, is_superuser=False)
flask_login.login_user(JWTUser(user), force=True)
return redirect(request.args.get("next") or url_for("admin.index"))
except AuthenticationError:
flash("Invalid JWT")
return redirect(url_for('airflow.noaccess'))
def decode_jwt(encoded_jwt):
if PY3:
return jwt.decode(encoded_jwt, do_verify=False)
return jwt.decode(encoded_jwt, verify=False)
class AuthorizationToken(AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, request):
request.headers['Authorization'] = self.token
return request
| 28.829932
| 81
| 0.677206
|
e6f4cdcb557272ca18b82c29953abf569bbcc10f
| 9,711
|
py
|
Python
|
source/conf.py
|
AENCO-Global/Chain-Docs
|
baa8f6328acb983e10749af56d6d54065ac60347
|
[
"Apache-2.0"
] | null | null | null |
source/conf.py
|
AENCO-Global/Chain-Docs
|
baa8f6328acb983e10749af56d6d54065ac60347
|
[
"Apache-2.0"
] | null | null | null |
source/conf.py
|
AENCO-Global/Chain-Docs
|
baa8f6328acb983e10749af56d6d54065ac60347
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# AEN-docs documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 18 16:39:26 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('_ext'))
import sphinx_bootstrap_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinxcontrib.examplecode',
'sphinxcontrib.fulltoc',
'sphinxcontrib.ghcontributors',
'edit-on-github',
'ablog'
]
# Add any paths that contain templates here, relative to this directory.
import ablog
templates_path = ['_templates']
templates_path.append(ablog.get_html_templates_path())
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AEN-docs'
copyright = u'2018, AEN'
author = u'AEN'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.12'
# The full version, including alpha/beta/rc tags.
release = u'Master'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'navbar_title': "developer center",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
'navbar_links': [
("Getting Started", "getting-started/what-is-aen"),
("Guides", "guides/overview"),
("References", "references"),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "exclude",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "cosmo" or "sandstone".
#
# The set of valid themes depend on the version of Bootstrap
# that's used (the next config option).
#
# Currently, the supported themes are:
# - Bootstrap 2: https://bootswatch.com/2
# - Bootstrap 3: https://bootswatch.com/3
'bootswatch_theme': "cosmo",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo-aen.svg"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
## Custom style overrides
def setup(app):
app.add_stylesheet("https://use.fontawesome.com/releases/v5.0.13/css/all.css")
app.add_stylesheet("css/custom.css") # may also be an URL
app.add_javascript("js/custom.js")
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'api/requests': ['localtoc.html'],
'api/endpoints': [],
'api/tools': ['localtoc.html'],
'api/websockets': ['localtoc.html'],
'api/status-errors': ['localtoc.html'],
'cli/**': ['localtoc.html'],
'concepts/**': ['localtoc.html'],
'getting-started/**': ['localtoc.html'],
'guides/overview': ['localtoc.html'],
'guides/account': ['localtoc.html'],
'guides/blockchain': ['localtoc.html'],
'guides/mosaic': ['localtoc.html'],
'guides/namespace': ['localtoc.html'],
'guides/mosaic': ['localtoc.html'],
'guides/running-a-node': ['localtoc.html'],
'guides/transaction': ['localtoc.html'],
'guides/workshops': ['localtoc.html'],
'guides/workshops/creating-a-new-workshop': ['localtoc.html'],
'guides/writing-a-guide': ['localtoc.html'],
'libraries/**': ['localtoc.html'],
'prototyping-tool/**': ['localtoc.html'],
'sdk/**': ['localtoc.html'],
'wallet/**': ['localtoc.html'],
'support/**': ['localtoc.html'],
'index': [],
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AEN-docsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AEN-docs.tex', u'AEN-docs Documentation',
u'AEN', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'AEN-docs', u'AEN-docs Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AEN-docs', u'AEN-docs Documentation',
author, 'AEN-docs', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
html_favicon = 'favicon-32x32.png'
# -- Options for edit on github ----------------------------------------------
edit_on_github_project = 'AENCo-Global/Chain-Docs'
edit_on_github_branch = 'master'
# -- Options for edit scaled images ----------------------------------------------
html_scaled_image_link = False
# base blog url
blog_baseurl = ''
| 29.972222
| 82
| 0.664092
|
7d7a7567f81bc66381cb3461c3fb99277a3059d5
| 6,253
|
py
|
Python
|
cogs/amongUs.py
|
ovandermeer/Pixels-bot
|
2d0cf8617aa5b189168dcb6cf8ea0e3e3461e158
|
[
"MIT"
] | 1
|
2021-04-30T04:32:08.000Z
|
2021-04-30T04:32:08.000Z
|
cogs/amongUs.py
|
ovandermeer/Pixels-bot
|
2d0cf8617aa5b189168dcb6cf8ea0e3e3461e158
|
[
"MIT"
] | null | null | null |
cogs/amongUs.py
|
ovandermeer/Pixels-bot
|
2d0cf8617aa5b189168dcb6cf8ea0e3e3461e158
|
[
"MIT"
] | 1
|
2021-06-26T13:13:19.000Z
|
2021-06-26T13:13:19.000Z
|
import discord
from discord.ext import commands
import configparser
import sys
import logging
import PixelBotData.supportingFunctions as supportingFunctions
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler(f"debug-{supportingFunctions.getDate()}.log")]
)
from time import sleep
class amongUs(commands.Cog):
def __init__(self, client):
self.client = client
self.client = client
self.config = configparser.ConfigParser()
self.config.read('config.ini')
self.amongUsRequiresRole = self.config["pixelBotConfig"]["amongUsRequiresRole"]
self.amongUsRequiresRole = self.amongUsRequiresRole.lower()
if self.amongUsRequiresRole != "true" and self.amongUsRequiresRole != "false":
logging.warning('Please enter either true or false under the "amongUsRequiresRole" field in config.ini')
print(f'[{supportingFunctions.getTime()}] Please enter either true or false under the "amongUsRequiresRole" field in config.ini')
sys.exit()
@commands.command(aliases=['kill', 'die', 'k', 'K'])
async def killPlayer(self, ctx, *, members=None):
runCommand = False
if self.amongUsRequiresRole == "false":
runCommand = True
else:
for role in ctx.author.roles:
role = str(role)
if role == "Among Us permission":
runCommand = True
if runCommand is True:
if members is None:
await ctx.send('Please mention at least one valid user!')
return
members = members.split(" ")
for member in members:
member = member[3:]
member = member[:-1]
member = int(member)
memberObject = ctx.guild.get_member(member)
deadRole = discord.utils.get(ctx.guild.roles, name="Among Us - Dead")
await memberObject.add_roles(deadRole)
await memberObject.edit(mute=True)
await ctx.send("Player(s) killed!")
else:
await ctx.send("This command requires the 'Among Us permission' role to run. Please make sure you have this role, and try again.")
@commands.command(aliases=['reset', 'restart', 'r', 'R'])
async def resetGame(self, ctx):
runCommand = False
if self.amongUsRequiresRole == "false":
runCommand = True
else:
for role in ctx.author.roles:
role = str(role)
if role == "Among Us permission":
runCommand = True
if runCommand is True:
deadRole = discord.utils.get(ctx.guild.roles, name="Among Us - Dead")
deadMembers = deadRole.members
for member in deadMembers:
await member.remove_roles(deadRole)
await self.unmuteAllUsers(ctx)
await ctx.send("Game reset!")
else:
await ctx.send("This command requires the 'Among Us permission' role to run. Please make sure you have this role, and try again.")
@commands.command(aliases=["mute", "mutea", 'm', 'M'])
async def muteAll(self, ctx):
runCommand = False
if self.amongUsRequiresRole == "false":
runCommand = True
else:
for role in ctx.author.roles:
role = str(role)
if role == "Among Us permission":
runCommand = True
if runCommand is True:
channel = discord.utils.get(ctx.guild.voice_channels, name='Among Us', bitrate=64000)
members = channel.members
for member in members:
await member.edit(mute=True)
await ctx.send("Muted channel!")
else:
await ctx.send("This command requires the 'Among Us permission' role to run. Please make sure you have this role, and try again.")
@commands.command(aliases=["umute", "unmute", 'u', 'um', 'U', 'UM'])
async def unmuteAll(self, ctx):
runCommand = False
if self.amongUsRequiresRole == "false":
runCommand = True
else:
for role in ctx.author.roles:
role = str(role)
if role == "Among Us permission":
runCommand = True
if runCommand is True:
await self.unmuteAllUsers(ctx)
await ctx.send("Unmuted all players!")
else:
await ctx.send("This command requires the 'Among Us permission' role to run. Please make sure you have this role, and try again.")
@commands.command(aliases=["umutea", "unmutea", 'ua', 'UA'])
async def unmuteAlive(self, ctx):
runCommand = False
if self.amongUsRequiresRole == "false":
runCommand = True
else:
for role in ctx.author.roles:
role = str(role)
if role == "Among Us permission":
runCommand = True
if runCommand is True:
channel = discord.utils.get(ctx.guild.voice_channels, name='Among Us', bitrate=64000)
members = channel.members
for member in members:
dead = False
for role in member.roles:
role = str(role)
if role == "Among Us - Dead":
dead = True
if not dead:
await member.edit(mute=False)
await ctx.send("Unmuted alive players!")
else:
await ctx.send("This command requires the 'Among Us permission' role to run. Please make sure you have this role, and try again.")
async def unmuteAllUsers(self, ctx):
channel = discord.utils.get(ctx.guild.voice_channels, name='Among Us', bitrate=64000)
members = channel.members
for member in members:
await member.edit(mute=False)
def setup(client):
client.add_cog(amongUs(client))
| 37.443114
| 142
| 0.56245
|
a36f64c2e4638e2a3f62d4b0db9467ee224ffdc4
| 2,988
|
py
|
Python
|
tests/telephony/test_lowlevel.py
|
JesseVermeulen123/conducthotline.com
|
4e854b94e62c64a89fb631afbcc56d07dcfb0828
|
[
"Apache-2.0"
] | 23
|
2019-03-19T05:26:09.000Z
|
2021-07-21T20:36:02.000Z
|
tests/telephony/test_lowlevel.py
|
JesseVermeulen123/conducthotline.com
|
4e854b94e62c64a89fb631afbcc56d07dcfb0828
|
[
"Apache-2.0"
] | 23
|
2019-03-29T02:41:11.000Z
|
2021-04-30T20:45:47.000Z
|
tests/telephony/test_lowlevel.py
|
JesseVermeulen123/conducthotline.com
|
4e854b94e62c64a89fb631afbcc56d07dcfb0828
|
[
"Apache-2.0"
] | 10
|
2019-03-19T14:01:57.000Z
|
2020-11-28T12:44:45.000Z
|
# Copyright 2019 Alethea Katherine Flowers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import nexmo
import pytest
from hotline.telephony import lowlevel
@pytest.mark.parametrize(
["country", "expected"],
[(None, "11234567890"), ("US", "11234567890"), ("GB", "441234567890")],
)
def test_rent_number(country, expected):
client = mock.create_autospec(nexmo.Client)
client.application_id = "appid"
client.get_available_numbers.return_value = {
"numbers": [
# Should always grab the first one.
{"country": country, "msisdn": expected},
{"country": "US", "msisdn": "19876543210"},
]
}
result = lowlevel.rent_number(
sms_callback_url="example.com/sms", country_code=country, client=client
)
assert result == {"country": country, "msisdn": f"+{expected}"}
client.get_available_numbers.assert_called_once_with(country, mock.ANY)
client.buy_number.assert_called_once_with({"country": country, "msisdn": expected})
def test_rent_number_none_available():
client = mock.create_autospec(nexmo.Client)
client.application_id = "appid"
client.get_available_numbers.return_value = {"numbers": []}
with pytest.raises(RuntimeError, match="No numbers available"):
lowlevel.rent_number(sms_callback_url="example.com/sms", client=client)
def test_rent_number_buy_error_is_okay():
client = mock.create_autospec(nexmo.Client)
client.application_id = "appid"
client.get_available_numbers.return_value = {
"numbers": [
{"country": "US", "msisdn": "+1123456789"},
{"country": "US", "msisdn": "+1987654321"},
]
}
# Return an error when trying to buy the first number, so that the method
# ends up buying the second number.
client.buy_number.side_effect = [nexmo.Error(), None]
result = lowlevel.rent_number(sms_callback_url="example.com/sms", client=client)
assert result == {"country": "US", "msisdn": "+1987654321"}
assert client.buy_number.call_count == 2
@mock.patch("time.sleep", autospec=True)
def test_send_sms(sleep):
client = mock.create_autospec(nexmo.Client)
client.application_id = "appid"
client.send_message.return_value = {"messages": [{}]}
lowlevel.send_sms(to="1234", sender="5678", message="meep", client=client)
client.send_message.assert_called_once_with(
{"from": "5678", "to": "1234", "text": "meep"}
)
| 32.835165
| 87
| 0.690428
|
06638660481cdb3f0eda2019b3c76781dafcf423
| 2,204
|
py
|
Python
|
TechTry/paramikoTest/paramiko-test1.py
|
yangdaodao92/common-tools
|
a1d90c7666b50306646ce24ec2c87115876079cb
|
[
"Apache-2.0"
] | null | null | null |
TechTry/paramikoTest/paramiko-test1.py
|
yangdaodao92/common-tools
|
a1d90c7666b50306646ce24ec2c87115876079cb
|
[
"Apache-2.0"
] | null | null | null |
TechTry/paramikoTest/paramiko-test1.py
|
yangdaodao92/common-tools
|
a1d90c7666b50306646ce24ec2c87115876079cb
|
[
"Apache-2.0"
] | null | null | null |
import paramiko
import select
import re
import sys
import io
# interesting_line_pattern = re.compile('xxx')
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
def do_tail():
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
from os.path import expanduser
home = expanduser("~")
# client.connect('xxx', username='xxx', key_filename='%s/.ssh/id_rsa' % home)
# client.connect('192.168.133.177', username='root', password='123qwe!@#')
# client.connect('10.126.15.196', username='paas', password='123qwe!@#')
client.connect('10.126.15.182', username='paas', password='123qwe!@#')
# log_file = '/opt/tomcats/apache-tomcat-member-center-api/logs/catalina.out'
log_file = '/opt/apache-tomcat-7.0.47_8080/logs/catalina.out'
# grep_pattern = "grep_filter"
remote_command = 'tail -f %s' % log_file
print(remote_command)
transport = client.get_transport()
channel = transport.open_session()
channel.exec_command(remote_command)
BUF_SIZE = 1024
LeftOver = b''
while transport.is_active():
# print('transport is active')
try:
rl, wl, xl = select.select([channel], [], [], 0.0)
if len(rl) > 0:
buf = channel.recv(BUF_SIZE)
if len(buf) > 0:
lines_to_process = LeftOver + buf
EOL = lines_to_process.rfind(b'\n')
if EOL != len(lines_to_process) - 1:
LeftOver = lines_to_process[EOL + 2:]
lines_to_process = lines_to_process[:EOL+1]
else:
LeftOver = b''
if lines_to_process.rfind(b'\n') == len(lines_to_process) - 1:
for line in lines_to_process.splitlines():
print(str(line, 'utf-8'))
else:
print(len(lines_to_process))
except (KeyboardInterrupt, SystemExit):
print('got ctrl+c')
break
client.close()
print('client closed')
if __name__ == '__main__':
do_tail()
| 34.984127
| 82
| 0.579855
|
55b87b4bcf07ebc66a001cd15474932ea3e22975
| 14,651
|
py
|
Python
|
qpython3/qconnection.py
|
gusutabopb/qPython3
|
d194f742d5a8fd69e32c895435db595fc9e15a15
|
[
"Apache-2.0"
] | null | null | null |
qpython3/qconnection.py
|
gusutabopb/qPython3
|
d194f742d5a8fd69e32c895435db595fc9e15a15
|
[
"Apache-2.0"
] | null | null | null |
qpython3/qconnection.py
|
gusutabopb/qPython3
|
d194f742d5a8fd69e32c895435db595fc9e15a15
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket
import struct
from . import MetaData, CONVERSION_OPTIONS
from .qtype import QException
from .qreader import QReader, QReaderException
from .qwriter import QWriter, QWriterException
class QConnectionException(Exception):
'''Raised when a connection to the q service cannot be established.'''
pass
class QAuthenticationException(QConnectionException):
'''Raised when a connection to the q service is denied.'''
pass
class MessageType(object):
'''Enumeration defining IPC protocol message types.'''
ASYNC = 0
SYNC = 1
RESPONSE = 2
class QConnection(object):
'''Connector class for interfacing with the q service.
Provides methods for synchronous and asynchronous interaction.
The :class:`.QConnection` class provides a context manager API and can be
used with a ``with`` statement::
with qconnection.QConnection(host = 'localhost', port = 5000) as q:
print(q)
print(q('{`int$ til x}', 10))
:Parameters:
- `host` (`string`) - q service hostname
- `port` (`integer`) - q service port
- `username` (`string` or `None`) - username for q authentication/authorization
- `password` (`string` or `None`) - password for q authentication/authorization
- `timeout` (`nonnegative float` or `None`) - set a timeout on blocking socket operations
- `encoding` (`string`) - string encoding for data deserialization
- `reader_class` (subclass of `QReader`) - data deserializer
- `writer_class` (subclass of `QWriter`) - data serializer
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed
data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars, **Default**: ``False``
'''
def __init__(self, host, port, username=None, password=None, timeout=None, encoding='latin-1',
reader_class=None, writer_class=None, **options):
self.host = host
self.port = port
self.username = username
self.password = password
self._connection = None
self._connection_file = None
self._protocol_version = None
self.timeout = timeout
self._encoding = encoding
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
try:
from qpython3._pandas import PandasQReader, PandasQWriter
self._reader_class = PandasQReader
self._writer_class = PandasQWriter
except ImportError:
self._reader_class = QReader
self._writer_class = QWriter
if reader_class:
self._reader_class = reader_class
if writer_class:
self._writer_class = writer_class
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def protocol_version(self):
'''Retrieves established version of the IPC protocol.
:returns: `integer` -- version of the IPC protocol
'''
return self._protocol_version
def open(self):
'''Initialises connection to q service.
If the connection hasn't been initialised yet, invoking the
:func:`.open` creates a new socket and performs a handshake with a q
service.
:raises: :class:`.QConnectionException`, :class:`.QAuthenticationException`
'''
if not self._connection:
if not self.host:
raise QConnectionException('Host cannot be None')
self._init_socket()
self._initialize()
self._writer = self._writer_class(self._connection,
protocol_version=self._protocol_version,
encoding=self._encoding)
self._reader = self._reader_class(self._connection_file, encoding=self._encoding)
def _init_socket(self):
'''Initialises the socket used for communicating with a q service,'''
try:
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((self.host, self.port))
self._connection.settimeout(self.timeout)
self._connection_file = self._connection.makefile('b')
except:
self._connection = None
self._connection_file = None
raise
def close(self):
'''Closes connection with the q service.'''
if self._connection:
self._connection_file.close()
self._connection_file = None
self._connection.close()
self._connection = None
def is_connected(self):
'''Checks whether connection with a q service has been established.
Connection is considered inactive when:
- it has not been initialised,
- it has been closed.
:returns: `boolean` -- ``True`` if connection has been established,
``False`` otherwise
'''
return True if self._connection else False
def _initialize(self):
'''Performs a IPC protocol handshake.'''
credentials = (f'{self.username if self.username else ""}:'
f'{self.password if self.password else ""}')
credentials = credentials.encode(self._encoding)
self._connection.send(credentials + b'\3\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
self._init_socket()
self._connection.send(credentials + b'\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
raise QAuthenticationException('Connection denied.')
self._protocol_version = min(struct.unpack('B', response)[0], 3)
def __str__(self):
if self.username:
return f'{self.username}@:{self.host}:{self.port}'
else:
return f':{self.host}:{self.port}'
def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type,
**self._options.union_dict(**options))
def sync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only=False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'),
MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected')
def async_(self, query, *parameters, **options):
'''Performs an asynchronous query and returns **without** retrieving of
the response.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.async_('{til x}', 10)
Executes a q expression:
>>> q.async_('til 10')
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
self.query(MessageType.ASYNC, query, *parameters, **options)
def receive(self, data_only=True, **options):
'''Reads and (optionally) parses the response from a q service.
Retrieves query result along with meta-information:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = False, raw = False))
QMessage: message type: 2, data size: 13, is_compressed: False, data: 10
Retrieves parsed query result:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = True, raw = False))
10
Retrieves not-parsed (raw) query result:
>>> from binascii import hexlify
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(hexlify(q.receive(data_only = True, raw = True)))
fa0a000000
:Parameters:
- `data_only` (`boolean`) - if ``True`` returns only data part of the
message, otherwise returns data and message meta-information
encapsulated in :class:`.QMessage` instance
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: depending on parameter flags: :class:`.QMessage` instance,
parsed message, raw data
:raises: :class:`.QReaderException`
'''
result = self._reader.read(**self._options.union_dict(**options))
return result.data if data_only else result
def __call__(self, *parameters, **options):
return self.sync(parameters[0], *parameters[1:], **options)
| 38.965426
| 113
| 0.592315
|
e7f0bc80c971961477cb3f7a6bbc6474b04b6ffd
| 51,379
|
py
|
Python
|
tensorflow_datasets/core/dataset_builder.py
|
Rishabh-Choudhry/datasets
|
2bad427bba6cdcab717698a70c96339733c5d42c
|
[
"Apache-2.0"
] | 2
|
2022-02-14T09:51:39.000Z
|
2022-02-14T13:27:49.000Z
|
tensorflow_datasets/core/dataset_builder.py
|
Rishabh-Choudhry/datasets
|
2bad427bba6cdcab717698a70c96339733c5d42c
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/core/dataset_builder.py
|
Rishabh-Choudhry/datasets
|
2bad427bba6cdcab717698a70c96339733c5d42c
|
[
"Apache-2.0"
] | 1
|
2020-12-13T22:11:33.000Z
|
2020-12-13T22:11:33.000Z
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DatasetBuilder base class."""
import abc
import dataclasses
import functools
import inspect
import json
import os
import sys
from typing import Any, ClassVar, Dict, Iterable, List, Optional, Tuple, Type, Union
from absl import logging
import six
import tensorflow as tf
from tensorflow_datasets.core import constants
from tensorflow_datasets.core import dataset_info
from tensorflow_datasets.core import decode
from tensorflow_datasets.core import download
from tensorflow_datasets.core import file_adapters
from tensorflow_datasets.core import logging as tfds_logging
from tensorflow_datasets.core import naming
from tensorflow_datasets.core import registered
from tensorflow_datasets.core import split_builder as split_builder_lib
from tensorflow_datasets.core import splits as splits_lib
from tensorflow_datasets.core import tf_compat
from tensorflow_datasets.core import tfrecords_reader
from tensorflow_datasets.core import units
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.proto import dataset_info_pb2
from tensorflow_datasets.core.utils import file_utils
from tensorflow_datasets.core.utils import gcs_utils
from tensorflow_datasets.core.utils import read_config as read_config_lib
from tensorflow_datasets.core.utils import type_utils
import termcolor
ReadOnlyPath = type_utils.ReadOnlyPath
ReadWritePath = type_utils.ReadWritePath
Tree = type_utils.Tree
TreeDict = type_utils.TreeDict
VersionOrStr = Union[utils.Version, str]
FORCE_REDOWNLOAD = download.GenerateMode.FORCE_REDOWNLOAD
REUSE_CACHE_IF_EXISTS = download.GenerateMode.REUSE_CACHE_IF_EXISTS
REUSE_DATASET_IF_EXISTS = download.GenerateMode.REUSE_DATASET_IF_EXISTS
GCS_HOSTED_MSG = """\
Dataset %s is hosted on GCS. It will automatically be downloaded to your
local data directory. If you'd instead prefer to read directly from our public
GCS bucket (recommended if you're running on GCP), you can instead pass
`try_gcs=True` to `tfds.load` or set `data_dir=gs://tfds-data/datasets`.
"""
@dataclasses.dataclass(eq=False)
class BuilderConfig:
"""Base class for `DatasetBuilder` data configuration.
DatasetBuilder subclasses with data configuration options should subclass
`BuilderConfig` and add their own properties.
"""
# TODO(py3.10): Should update dataclass to be:
# * Frozen (https://bugs.python.org/issue32953)
# * Kwargs-only (https://bugs.python.org/issue33129)
name: str
version: Optional[VersionOrStr] = None
release_notes: Optional[Dict[str, str]] = None
supported_versions: List[VersionOrStr] = dataclasses.field(
default_factory=list)
description: Optional[str] = None
@classmethod
def from_dataset_info(
cls,
info_proto: dataset_info_pb2.DatasetInfo) -> Optional["BuilderConfig"]:
if not info_proto.config_name:
return None
return BuilderConfig(
name=info_proto.config_name,
description=info_proto.config_description,
version=info_proto.version,
release_notes=info_proto.release_notes or {},
)
class DatasetBuilder(registered.RegisteredDataset):
"""Abstract base class for all datasets.
`DatasetBuilder` has 3 key methods:
* `DatasetBuilder.info`: documents the dataset, including feature
names, types, and shapes, version, splits, citation, etc.
* `DatasetBuilder.download_and_prepare`: downloads the source data
and writes it to disk.
* `DatasetBuilder.as_dataset`: builds an input pipeline using
`tf.data.Dataset`s.
**Configuration**: Some `DatasetBuilder`s expose multiple variants of the
dataset by defining a `tfds.core.BuilderConfig` subclass and accepting a
config object (or name) on construction. Configurable datasets expose a
pre-defined set of configurations in `DatasetBuilder.builder_configs`.
Typical `DatasetBuilder` usage:
```python
mnist_builder = tfds.builder("mnist")
mnist_info = mnist_builder.info
mnist_builder.download_and_prepare()
datasets = mnist_builder.as_dataset()
train_dataset, test_dataset = datasets["train"], datasets["test"]
assert isinstance(train_dataset, tf.data.Dataset)
# And then the rest of your input pipeline
train_dataset = train_dataset.repeat().shuffle(1024).batch(128)
train_dataset = train_dataset.prefetch(2)
features = tf.compat.v1.data.make_one_shot_iterator(train_dataset).get_next()
image, label = features['image'], features['label']
```
"""
# Semantic version of the dataset (ex: tfds.core.Version('1.2.0'))
VERSION = None
# Release notes
# Metadata only used for documentation. Should be a dict[version,description]
# Multi-lines are automatically dedent
RELEASE_NOTES: ClassVar[Dict[str, str]] = {}
# List dataset versions which can be loaded using current code.
# Data can only be prepared with canonical VERSION or above.
SUPPORTED_VERSIONS = []
# Named configurations that modify the data generated by download_and_prepare.
BUILDER_CONFIGS = []
# Must be set for datasets that use 'manual_dir' functionality - the ones
# that require users to do additional steps to download the data
# (this is usually due to some external regulations / rules).
#
# This field should contain a string with user instructions, including
# the list of files that should be present. It will be
# displayed in the dataset documentation.
MANUAL_DOWNLOAD_INSTRUCTIONS = None
def __init__(
self,
*,
data_dir: Optional[utils.PathLike] = None,
config: Union[None, str, BuilderConfig] = None,
version: Union[None, str, utils.Version] = None,
):
"""Constructs a DatasetBuilder.
Callers must pass arguments as keyword arguments.
Args:
data_dir: directory to read/write data. Defaults to the value of the
environment variable TFDS_DATA_DIR, if set, otherwise falls back to
"~/tensorflow_datasets".
config: `tfds.core.BuilderConfig` or `str` name, optional configuration
for the dataset that affects the data generated on disk. Different
`builder_config`s will have their own subdirectories and versions.
version: Optional version at which to load the dataset. An error is
raised if specified version cannot be satisfied. Eg: '1.2.3', '1.2.*'.
The special value "experimental_latest" will use the highest version,
even if not default. This is not recommended unless you know what you
are doing, as the version could be broken.
"""
if data_dir:
data_dir = os.fspath(data_dir) # Pathlib -> str
# For pickling:
self._original_state = dict(
data_dir=data_dir, config=config, version=version)
# To do the work:
self._builder_config = self._create_builder_config(config)
# Extract code version (VERSION or config)
self._version = self._pick_version(version)
# Compute the base directory (for download) and dataset/version directory.
self._data_dir_root, self._data_dir = self._build_data_dir(data_dir)
if tf.io.gfile.exists(self._data_dir):
self.info.read_from_directory(self._data_dir)
else: # Use the code version (do not restore data)
self.info.initialize_from_bucket()
@utils.classproperty
@classmethod
@utils.memoize()
def code_path(cls) -> ReadOnlyPath:
"""Returns the path to the file where the Dataset class is located.
Note: As the code can be run inside zip file. The returned value is
a `ReadOnlyPath` by default. Use `tfds.core.utils.to_write_path()` to cast
the path into `ReadWritePath`.
Returns:
path: pathlib.Path like abstraction
"""
modules = cls.__module__.split(".")
if len(modules) >= 2: # Filter `__main__`, `python my_dataset.py`,...
# If the dataset can be loaded from a module, use this to support zipapp.
# Note: `utils.resource_path` will return either `zipfile.Path` (for
# zipapp) or `pathlib.Path`.
try:
path = utils.resource_path(modules[0])
except TypeError: # Module is not a package
pass
else:
# For dynamically added modules, `importlib.resources` returns
# `pathlib.Path('.')` rather than the real path, so filter those by
# checking for `parts`.
# Check for `zipfile.Path` (`ResourcePath`) as it does not have `.parts`
if isinstance(path, utils.ResourcePath) or path.parts:
modules[-1] += ".py"
return path.joinpath(*modules[1:])
# Otherwise, fallback to `pathlib.Path`. For non-zipapp, it should be
# equivalent to the above return.
return utils.as_path(inspect.getfile(cls))
def __getstate__(self):
return self._original_state
def __setstate__(self, state):
self.__init__(**state)
@utils.memoized_property
def canonical_version(self) -> utils.Version:
return cannonical_version_for_config(self, self._builder_config)
@utils.memoized_property
def supported_versions(self):
if self._builder_config and self._builder_config.supported_versions:
return self._builder_config.supported_versions
else:
return self.SUPPORTED_VERSIONS
@utils.memoized_property
def versions(self) -> List[utils.Version]:
"""Versions (canonical + availables), in preference order."""
return [
utils.Version(v) if isinstance(v, six.string_types) else v
for v in [self.canonical_version] + self.supported_versions
]
def _pick_version(self, requested_version):
"""Returns utils.Version instance, or raise AssertionError."""
# Validate that `canonical_version` is correctly defined
assert self.canonical_version
if requested_version == "experimental_latest":
return max(self.versions)
for version in self.versions:
if requested_version is None or version.match(requested_version):
return version
available_versions = [str(v) for v in self.versions]
msg = "Dataset {} cannot be loaded at version {}, only: {}.".format(
self.name, requested_version, ", ".join(available_versions))
raise AssertionError(msg)
@property
def version(self):
return self._version
@property
def release_notes(self) -> Dict[str, str]:
if self.builder_config and self.builder_config.release_notes:
return self.builder_config.release_notes
else:
return self.RELEASE_NOTES
@property
def data_dir(self):
return self._data_dir
@property
def data_path(self) -> type_utils.ReadWritePath:
# Instead, should make `_data_dir` be Path everywhere
return utils.as_path(self._data_dir)
@utils.classproperty
@classmethod
def _checksums_path(cls) -> ReadOnlyPath:
"""Returns the checksums path."""
# Used:
# * To load the checksums (in url_infos)
# * To save the checksums (in DownloadManager)
new_path = cls.code_path.parent / "checksums.tsv"
# Checksums of legacy datasets are located in a separate dir.
legacy_path = utils.tfds_path() / "url_checksums" / f"{cls.name}.txt"
if (
# zipfile.Path does not have `.parts`. Additionally, `os.fspath`
# will extract the file, so use `str`.
"tensorflow_datasets" in str(new_path) and legacy_path.exists() and
not new_path.exists()):
return legacy_path
else:
return new_path
@utils.classproperty
@classmethod
@functools.lru_cache(maxsize=None)
def url_infos(cls) -> Optional[Dict[str, download.checksums.UrlInfo]]:
"""Load `UrlInfo` from the given path."""
# Note: If the dataset is downloaded with `record_checksums=True`, urls
# might be updated but `url_infos` won't as it is memoized.
# Search for the url_info file.
checksums_path = cls._checksums_path
# If url_info file is found, load the urls
if checksums_path.exists():
return download.checksums.load_url_infos(checksums_path)
else:
return None
@utils.memoized_property
def info(self) -> dataset_info.DatasetInfo:
"""`tfds.core.DatasetInfo` for this builder."""
# Ensure .info hasn't been called before versioning is set-up
# Otherwise, backward compatibility cannot be guaranteed as some code will
# depend on the code version instead of the restored data version
if not getattr(self, "_version", None):
# Message for developers creating new dataset. Will trigger if they are
# using .info in the constructor before calling super().__init__
raise AssertionError(
"Info should not been called before version has been defined. "
"Otherwise, the created .info may not match the info version from "
"the restored dataset.")
info = self._info()
if not isinstance(info, dataset_info.DatasetInfo):
raise TypeError(
"DatasetBuilder._info should returns `tfds.core.DatasetInfo`, not "
f" {type(info)}.")
return info
def download_and_prepare(self, *, download_dir=None, download_config=None):
"""Downloads and prepares dataset for reading.
Args:
download_dir: `str`, directory where downloaded files are stored. Defaults
to "~/tensorflow-datasets/downloads".
download_config: `tfds.download.DownloadConfig`, further configuration for
downloading and preparing dataset.
Raises:
IOError: if there is not enough disk space available.
"""
download_config = download_config or download.DownloadConfig()
data_exists = tf.io.gfile.exists(self._data_dir)
if data_exists and download_config.download_mode == REUSE_DATASET_IF_EXISTS:
logging.info("Reusing dataset %s (%s)", self.name, self._data_dir)
return
elif data_exists and download_config.download_mode == REUSE_CACHE_IF_EXISTS:
logging.info("Deleting pre-existing dataset %s (%s)", self.name,
self._data_dir)
utils.as_path(self._data_dir).rmtree() # Delete pre-existing data.
data_exists = tf.io.gfile.exists(self._data_dir)
if self.version.tfds_version_to_prepare:
available_to_prepare = ", ".join(
str(v) for v in self.versions if not v.tfds_version_to_prepare)
raise AssertionError(
"The version of the dataset you are trying to use ({}:{}) can only "
"be generated using TFDS code synced @ {} or earlier. Either sync to "
"that version of TFDS to first prepare the data or use another "
"version of the dataset (available for `download_and_prepare`: "
"{}).".format(self.name, self.version,
self.version.tfds_version_to_prepare,
available_to_prepare))
# Only `cls.VERSION` or `experimental_latest` versions can be generated.
# Otherwise, users may accidentally generate an old version using the
# code from newer versions.
installable_versions = {
str(v) for v in (self.canonical_version, max(self.versions))
}
if str(self.version) not in installable_versions:
msg = ("The version of the dataset you are trying to use ({}) is too "
"old for this version of TFDS so cannot be generated.").format(
self.info.full_name)
if self.version.tfds_version_to_prepare:
msg += (
"{} can only be generated using TFDS code synced @ {} or earlier "
"Either sync to that version of TFDS to first prepare the data or "
"use another version of the dataset. ").format(
self.version, self.version.tfds_version_to_prepare)
else:
msg += (
"Either sync to a previous version of TFDS to first prepare the "
"data or use another version of the dataset. ")
msg += "Available for `download_and_prepare`: {}".format(
list(sorted(installable_versions)))
raise ValueError(msg)
# Currently it's not possible to overwrite the data because it would
# conflict with versioning: If the last version has already been generated,
# it will always be reloaded and data_dir will be set at construction.
if data_exists:
raise ValueError(
"Trying to overwrite an existing dataset {} at {}. A dataset with "
"the same version {} already exists. If the dataset has changed, "
"please update the version number.".format(self.name, self._data_dir,
self.version))
logging.info("Generating dataset %s (%s)", self.name, self._data_dir)
if not utils.has_sufficient_disk_space(
self.info.dataset_size + self.info.download_size,
directory=self._data_dir_root):
raise IOError(
"Not enough disk space. Needed: {} (download: {}, generated: {})"
.format(
self.info.dataset_size + self.info.download_size,
self.info.download_size,
self.info.dataset_size,
))
self._log_download_bytes()
dl_manager = self._make_download_manager(
download_dir=download_dir,
download_config=download_config,
)
# Maybe save the `builder_cls` metadata common to all builder configs.
if self.BUILDER_CONFIGS:
_save_default_config_name(
# `data_dir/ds_name/config/version/` -> `data_dir/ds_name/`
common_dir=self.data_path.parent.parent,
default_config_name=self.BUILDER_CONFIGS[0].name,
)
# Create a tmp dir and rename to self._data_dir on successful exit.
with utils.incomplete_dir(self._data_dir) as tmp_data_dir:
# Temporarily assign _data_dir to tmp_data_dir to avoid having to forward
# it to every sub function.
with utils.temporary_assignment(self, "_data_dir", tmp_data_dir):
if (download_config.try_download_gcs and
gcs_utils.is_dataset_on_gcs(self.info.full_name)):
logging.info(GCS_HOSTED_MSG, self.name)
gcs_utils.download_gcs_dataset(self.info.full_name, self._data_dir)
self.info.read_from_directory(self._data_dir)
else:
# Old version of TF are not os.PathLike compatible
with tf_compat.mock_gfile_pathlike():
self._download_and_prepare(
dl_manager=dl_manager,
download_config=download_config,
)
# NOTE: If modifying the lines below to put additional information in
# DatasetInfo, you'll likely also want to update
# DatasetInfo.read_from_directory to possibly restore these attributes
# when reading from package data.
self.info.download_size = dl_manager.downloaded_size
# Write DatasetInfo to disk, even if we haven't computed statistics.
self.info.write_to_directory(self._data_dir)
# The generated DatasetInfo contains references to `tmp_data_dir`
self.info.update_data_dir(self._data_dir)
self._log_download_done()
@tfds_logging.as_dataset()
def as_dataset(
self,
split: Optional[Tree[splits_lib.SplitArg]] = None,
*,
batch_size: Optional[int] = None,
shuffle_files: bool = False,
decoders: Optional[TreeDict[decode.partial_decode.DecoderArg]] = None,
read_config: Optional[read_config_lib.ReadConfig] = None,
as_supervised: bool = False,
):
# pylint: disable=line-too-long
"""Constructs a `tf.data.Dataset`.
Callers must pass arguments as keyword arguments.
The output types vary depending on the parameters. Examples:
```python
builder = tfds.builder('imdb_reviews')
builder.download_and_prepare()
# Default parameters: Returns the dict of tf.data.Dataset
ds_all_dict = builder.as_dataset()
assert isinstance(ds_all_dict, dict)
print(ds_all_dict.keys()) # ==> ['test', 'train', 'unsupervised']
assert isinstance(ds_all_dict['test'], tf.data.Dataset)
# Each dataset (test, train, unsup.) consists of dictionaries
# {'label': <tf.Tensor: .. dtype=int64, numpy=1>,
# 'text': <tf.Tensor: .. dtype=string, numpy=b"I've watched the movie ..">}
# {'label': <tf.Tensor: .. dtype=int64, numpy=1>,
# 'text': <tf.Tensor: .. dtype=string, numpy=b'If you love Japanese ..'>}
# With as_supervised: tf.data.Dataset only contains (feature, label) tuples
ds_all_supervised = builder.as_dataset(as_supervised=True)
assert isinstance(ds_all_supervised, dict)
print(ds_all_supervised.keys()) # ==> ['test', 'train', 'unsupervised']
assert isinstance(ds_all_supervised['test'], tf.data.Dataset)
# Each dataset (test, train, unsup.) consists of tuples (text, label)
# (<tf.Tensor: ... dtype=string, numpy=b"I've watched the movie ..">,
# <tf.Tensor: ... dtype=int64, numpy=1>)
# (<tf.Tensor: ... dtype=string, numpy=b"If you love Japanese ..">,
# <tf.Tensor: ... dtype=int64, numpy=1>)
# Same as above plus requesting a particular split
ds_test_supervised = builder.as_dataset(as_supervised=True, split='test')
assert isinstance(ds_test_supervised, tf.data.Dataset)
# The dataset consists of tuples (text, label)
# (<tf.Tensor: ... dtype=string, numpy=b"I've watched the movie ..">,
# <tf.Tensor: ... dtype=int64, numpy=1>)
# (<tf.Tensor: ... dtype=string, numpy=b"If you love Japanese ..">,
# <tf.Tensor: ... dtype=int64, numpy=1>)
```
Args:
split: Which split of the data to load (e.g. `'train'`, `'test'`,
`['train', 'test']`, `'train[80%:]'`,...). See our
[split API guide](https://www.tensorflow.org/datasets/splits). If
`None`, will return all splits in a `Dict[Split, tf.data.Dataset]`.
batch_size: `int`, batch size. Note that variable-length features will be
0-padded if `batch_size` is set. Users that want more custom behavior
should use `batch_size=None` and use the `tf.data` API to construct a
custom pipeline. If `batch_size == -1`, will return feature dictionaries
of the whole dataset with `tf.Tensor`s instead of a `tf.data.Dataset`.
shuffle_files: `bool`, whether to shuffle the input files. Defaults to
`False`.
decoders: Nested dict of `Decoder` objects which allow to customize the
decoding. The structure should match the feature structure, but only
customized feature keys need to be present. See [the
guide](https://github.com/tensorflow/datasets/tree/master/docs/decode.md)
for more info.
read_config: `tfds.ReadConfig`, Additional options to configure the input
pipeline (e.g. seed, num parallel reads,...).
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset` will have
a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default, the returned
`tf.data.Dataset` will have a dictionary with all the features.
Returns:
`tf.data.Dataset`, or if `split=None`, `dict<key: tfds.Split, value:
tfds.data.Dataset>`.
If `batch_size` is -1, will return feature dictionaries containing
the entire dataset in `tf.Tensor`s instead of a `tf.data.Dataset`.
"""
# pylint: enable=line-too-long
if not tf.io.gfile.exists(self._data_dir):
raise AssertionError(
("Dataset %s: could not find data in %s. Please make sure to call "
"dataset_builder.download_and_prepare(), or pass download=True to "
"tfds.load() before trying to access the tf.data.Dataset object.") %
(self.name, self._data_dir_root))
# By default, return all splits
if split is None:
split = {s: s for s in self.info.splits}
read_config = read_config or read_config_lib.ReadConfig()
# Create a dataset for each of the given splits
build_single_dataset = functools.partial(
self._build_single_dataset,
shuffle_files=shuffle_files,
batch_size=batch_size,
decoders=decoders,
read_config=read_config,
as_supervised=as_supervised,
)
all_ds = tf.nest.map_structure(build_single_dataset, split)
return all_ds
def _build_single_dataset(
self,
split,
shuffle_files,
batch_size,
decoders: Optional[TreeDict[decode.partial_decode.DecoderArg]],
read_config: read_config_lib.ReadConfig,
as_supervised,
):
"""as_dataset for a single split."""
wants_full_dataset = batch_size == -1
if wants_full_dataset:
batch_size = self.info.splits.total_num_examples or sys.maxsize
# Build base dataset
ds = self._as_dataset(
split=split,
shuffle_files=shuffle_files,
decoders=decoders,
read_config=read_config,
)
# Auto-cache small datasets which are small enough to fit in memory.
if self._should_cache_ds(
split=split, shuffle_files=shuffle_files, read_config=read_config):
ds = ds.cache()
if batch_size:
# Use padded_batch so that features with unknown shape are supported.
ds = ds.padded_batch(batch_size, tf.compat.v1.data.get_output_shapes(ds))
if as_supervised:
if not self.info.supervised_keys:
raise ValueError(
f"as_supervised=True but {self.name} does not support a supervised "
"structure.")
def lookup_nest(features: Dict[str, Any]) -> Tuple[Any, ...]:
"""Converts `features` to the structure described by `supervised_keys`.
Note that there is currently no way to access features in nested
feature dictionaries.
Args:
features: dictionary of features
Returns:
A tuple with elements structured according to `supervised_keys`
"""
return tf.nest.map_structure(lambda key: features[key],
self.info.supervised_keys)
ds = ds.map(lookup_nest)
# Add prefetch by default
if not read_config.skip_prefetch:
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
# If shuffling is True and seeds not set, allow pipeline to be
# non-deterministic
# This code should probably be moved inside tfreader, such as
# all the tf.data.Options are centralized in a single place.
if (shuffle_files and read_config.shuffle_seed is None and
tf_compat.get_option_deterministic(read_config.options) is None):
options = tf.data.Options()
tf_compat.set_option_deterministic(options, False)
ds = ds.with_options(options)
# If shuffle is False, keep the default value (deterministic), which
# allow the user to overwritte it.
if wants_full_dataset:
return tf_compat.get_single_element(ds)
return ds
def _should_cache_ds(self, split, shuffle_files, read_config):
"""Returns True if TFDS should auto-cache the dataset."""
# The user can explicitly opt-out from auto-caching
if not read_config.try_autocache:
return False
# Skip datasets with unknown size.
# Even by using heuristic with `download_size` and
# `MANUAL_DOWNLOAD_INSTRUCTIONS`, it wouldn't catch datasets which hardcode
# the non-processed data-dir, nor DatasetBuilder not based on tf-record.
if not self.info.dataset_size:
return False
# Do not cache big datasets
# Instead of using the global size, we could infer the requested bytes:
# `self.info.splits[split].num_bytes`
# The info is available for full splits, and could be approximated
# for subsplits `train[:50%]`.
# However if the user is creating multiple small splits from a big
# dataset, those could adds up and fill up the entire RAM.
# 250 MiB is arbitrary picked. For comparison, Cifar10 is about 150 MiB.
if self.info.dataset_size > 250 * units.MiB:
return False
# We do not want to cache data which has more than one shards when
# shuffling is enabled, as this would effectively disable shuffling.
# An exception is for single shard (as shuffling is a no-op).
# Another exception is if reshuffle is disabled (shuffling already cached)
num_shards = len(self.info.splits[split].file_instructions)
if (shuffle_files and
# Shuffling only matter when reshuffle is True or None (default)
read_config.shuffle_reshuffle_each_iteration is not False and # pylint: disable=g-bool-id-comparison
num_shards > 1):
return False
# If the dataset satisfy all the right conditions, activate autocaching.
return True
def _relative_data_dir(self, with_version=True):
"""Relative path of this dataset in data_dir."""
builder_data_dir = self.name
builder_config = self._builder_config
if builder_config:
builder_data_dir = os.path.join(builder_data_dir, builder_config.name)
if not with_version:
return builder_data_dir
version_data_dir = os.path.join(builder_data_dir, str(self._version))
return version_data_dir
def _build_data_dir(self, given_data_dir):
"""Return the data directory for the current version.
Args:
given_data_dir: `Optional[str]`, root `data_dir` passed as `__init__`
argument.
Returns:
data_dir_root: `str`, The root dir containing all datasets, downloads,...
data_dir: `str`, The version data_dir
(e.g. `<data_dir_root>/<ds_name>/<config>/<version>`)
"""
builder_dir = self._relative_data_dir(with_version=False)
version_dir = self._relative_data_dir(with_version=True)
default_data_dir = file_utils.get_default_data_dir(
given_data_dir=given_data_dir)
all_data_dirs = file_utils.list_data_dirs(given_data_dir=given_data_dir)
all_versions = set()
requested_version_dirs = {}
for data_dir_root in all_data_dirs:
# List all existing versions
full_builder_dir = os.path.join(data_dir_root, builder_dir)
data_dir_versions = set(utils.version.list_all_versions(full_builder_dir))
# Check for existance of the requested version
if self.version in data_dir_versions:
requested_version_dirs[data_dir_root] = os.path.join(
data_dir_root, version_dir)
all_versions.update(data_dir_versions)
if len(requested_version_dirs) > 1:
raise ValueError(
"Dataset was found in more than one directory: {}. Please resolve "
"the ambiguity by explicitly specifying `data_dir=`."
"".format(requested_version_dirs.values()))
elif len(requested_version_dirs) == 1: # The dataset is found once
return next(iter(requested_version_dirs.items()))
# No dataset found, use default directory
data_dir = os.path.join(default_data_dir, version_dir)
if all_versions:
logging.warning(
"Found a different version of the requested dataset:\n"
"%s\n"
"Using %s instead.", "\n".join(str(v) for v in sorted(all_versions)),
data_dir)
return default_data_dir, data_dir
def _log_download_done(self):
msg = (f"Dataset {self.name} downloaded and prepared to {self._data_dir}. "
"Subsequent calls will reuse this data.")
termcolor.cprint(msg, attrs=["bold"])
def _log_download_bytes(self):
# Print is intentional: we want this to always go to stdout so user has
# information needed to cancel download/preparation if needed.
# This comes right before the progress bar.
termcolor.cprint(
f"Downloading and preparing dataset {self.info.download_size} "
f"(download: {self.info.download_size}, "
f"generated: {self.info.dataset_size}, "
f"total: {self.info.download_size + self.info.dataset_size}) "
f"to {self._data_dir}...",
attrs=["bold"],
)
@abc.abstractmethod
@utils.docs.doc_private
def _info(self):
"""Returns the `tfds.core.DatasetInfo` object.
This function is called once and the result is cached for all
following calls.
Returns:
dataset_info: The dataset metadata.
"""
raise NotImplementedError
@abc.abstractmethod
def _download_and_prepare(self, dl_manager, download_config=None):
"""Downloads and prepares dataset for reading.
Internal implementation to overwrite when inheriting from DatasetBuilder.
Called when `builder.download_and_prepare` is called.
It should download all required data and generate
the pre-processed datasets files.
Args:
dl_manager: `tfds.download.DownloadManager` used to download and cache
data.
download_config: `DownloadConfig`, Additional options.
"""
raise NotImplementedError
@abc.abstractmethod
def _as_dataset(
self,
split: splits_lib.Split,
decoders: Optional[TreeDict[decode.partial_decode.DecoderArg]] = None,
read_config: Optional[read_config_lib.ReadConfig] = None,
shuffle_files: bool = False,
) -> tf.data.Dataset:
"""Constructs a `tf.data.Dataset`.
Internal implementation to overwrite when inheriting from DatasetBuilder.
Called when `builder.as_dataset` is called.
It should read the pre-processed datasets files and generate
the `tf.data.Dataset` object.
Args:
split: `tfds.Split` which subset of the data to read.
decoders: Nested structure of `Decoder` object to customize the dataset
decoding.
read_config: `tfds.ReadConfig`
shuffle_files: `bool`, whether to shuffle the input files. Optional,
defaults to `False`.
Returns:
`tf.data.Dataset`
"""
raise NotImplementedError
def _make_download_manager(self, download_dir, download_config):
"""Creates a new download manager object."""
download_dir = (
download_dir or os.path.join(self._data_dir_root, "downloads"))
extract_dir = (
download_config.extract_dir or os.path.join(download_dir, "extracted"))
manual_dir = (
download_config.manual_dir or os.path.join(download_dir, "manual"))
if download_config.register_checksums:
# Note: Error will be raised here if user try to record checksums
# from a `zipapp`
register_checksums_path = utils.to_write_path(self._checksums_path)
else:
register_checksums_path = None
return download.DownloadManager(
download_dir=download_dir,
extract_dir=extract_dir,
manual_dir=manual_dir,
url_infos=self.url_infos,
manual_dir_instructions=self.MANUAL_DOWNLOAD_INSTRUCTIONS,
force_download=(download_config.download_mode == FORCE_REDOWNLOAD),
force_extraction=(download_config.download_mode == FORCE_REDOWNLOAD),
force_checksums_validation=download_config.force_checksums_validation,
register_checksums=download_config.register_checksums,
register_checksums_path=register_checksums_path,
verify_ssl=download_config.verify_ssl,
dataset_name=self.name,
)
@property
def builder_config(self):
"""`tfds.core.BuilderConfig` for this builder."""
return self._builder_config
def _create_builder_config(self, builder_config):
"""Create and validate BuilderConfig object."""
if builder_config is None and self.BUILDER_CONFIGS:
builder_config = self.BUILDER_CONFIGS[0]
logging.info("No config specified, defaulting to first: %s/%s", self.name,
builder_config.name)
if not builder_config:
return None
if isinstance(builder_config, six.string_types):
name = builder_config
builder_config = self.builder_configs.get(name)
if builder_config is None:
raise ValueError("BuilderConfig %s not found. Available: %s" %
(name, list(self.builder_configs.keys())))
name = builder_config.name
if not name:
raise ValueError("BuilderConfig must have a name, got %s" % name)
is_custom = name not in self.builder_configs
if is_custom:
logging.warning("Using custom data configuration %s", name)
else:
if builder_config is not self.builder_configs[name]:
raise ValueError(
"Cannot name a custom BuilderConfig the same as an available "
"BuilderConfig. Change the name. Available BuilderConfigs: %s" %
(list(self.builder_configs.keys())))
return builder_config
@utils.classproperty
@classmethod
@utils.memoize()
def builder_configs(cls):
"""Pre-defined list of configurations for this builder class."""
config_dict = {config.name: config for config in cls.BUILDER_CONFIGS}
if len(config_dict) != len(cls.BUILDER_CONFIGS):
names = [config.name for config in cls.BUILDER_CONFIGS]
raise ValueError(
"Names in BUILDER_CONFIGS must not be duplicated. Got %s" % names)
return config_dict
class FileReaderBuilder(DatasetBuilder):
"""Base class for datasets reading files.
Subclasses are:
* `GeneratorBasedBuilder`: Can both generate and read generated dataset.
* `ReadOnlyBuilder`: Can only read pre-generated datasets. A user can
generate a dataset with `GeneratorBasedBuilder`, and read them with
`ReadOnlyBuilder` without requiring the original generation code.
"""
def __init__(
self,
*,
file_format: Union[None, str, file_adapters.FileFormat] = None,
**kwargs: Any,
):
"""Initializes an instance of FileReaderBuilder.
Callers must pass arguments as keyword arguments.
Args:
file_format: EXPERIMENTAL, may change at any time; Format of the record
files in which dataset will be read/written to. If `None`, defaults to
`tfrecord`.
**kwargs: Arguments passed to `DatasetBuilder`.
"""
super().__init__(**kwargs)
self.info.set_file_format(file_format)
@utils.memoized_property
def _example_specs(self):
return self.info.features.get_serialized_info()
def _as_dataset(
self,
split: splits_lib.Split,
decoders: Optional[TreeDict[decode.partial_decode.DecoderArg]],
read_config: read_config_lib.ReadConfig,
shuffle_files: bool,
) -> tf.data.Dataset:
# Partial decoding
# TODO(epot): Should be moved inside `features.decode_example`
if isinstance(decoders, decode.PartialDecoding):
features = decoders.extract_features(self.info.features)
example_specs = features.get_serialized_info()
decoders = decoders.decoders
# Full decoding (all features decoded)
else:
features = self.info.features
example_specs = self._example_specs
decoders = decoders # pylint: disable=self-assigning-variable
reader = tfrecords_reader.Reader(
self._data_dir,
example_specs=example_specs,
file_format=self.info.file_format,
)
decode_fn = functools.partial(features.decode_example, decoders=decoders)
return reader.read(
instructions=split,
split_infos=self.info.splits.values(),
decode_fn=decode_fn,
read_config=read_config,
shuffle_files=shuffle_files,
disable_shuffling=self.info.disable_shuffling,
)
class GeneratorBasedBuilder(FileReaderBuilder):
"""Base class for datasets with data generation based on file adapter.
`GeneratorBasedBuilder` is a convenience class that abstracts away much
of the data writing and reading of `DatasetBuilder`.
It expects subclasses to overwrite `_split_generators` to return a dict of
splits, generators. See the method docstrings for details.
"""
@abc.abstractmethod
@utils.docs.do_not_doc_in_subclasses
@utils.docs.doc_private
def _split_generators(
self,
dl_manager: download.DownloadManager,
) -> Dict[splits_lib.Split, split_builder_lib.SplitGenerator]:
"""Downloads the data and returns dataset splits with associated examples.
Example:
```python
def _split_generators(self, dl_manager):
path = dl_manager.download_and_extract('http://dataset.org/my_data.zip')
return {
'train': self._generate_examples(path=path / 'train_imgs'),
'test': self._generate_examples(path=path / 'test_imgs'),
}
```
* If the original dataset do not have predefined `train`, `test`,... splits,
this function should only returns a single `train` split here. Users can
use the [subsplit API](https://www.tensorflow.org/datasets/splits) to
create subsplits (e.g.
`tfds.load(..., split=['train[:75%]', 'train[75%:]'])`).
* `tfds.download.DownloadManager` caches downloads, so calling `download`
on the same url multiple times only download it once.
* A good practice is to download all data in this function, and have all the
computation inside `_generate_examples`.
* Splits are generated in the order defined here. `builder.info.splits` keep
the same order.
* This function can have an extra `pipeline` kwarg only if some
beam preprocessing should be shared across splits. In this case,
a dict of `beam.PCollection` should be returned.
See `_generate_example` for details.
Args:
dl_manager: `tfds.download.DownloadManager` used to download/extract the
data
Returns:
The dict of split name, generators. See `_generate_examples` for details
about the generator format.
"""
raise NotImplementedError()
@abc.abstractmethod
@utils.docs.do_not_doc_in_subclasses
@utils.docs.doc_private
def _generate_examples(self,
**kwargs: Any) -> split_builder_lib.SplitGenerator:
"""Default function to generate examples for each split.
The function should return a collection of `(key, examples)`. Examples
will be encoded are written to disk. See `yields` section for details.
The function can return/yield:
* A python generator:
```python
def _generate_examples(self, path):
for filepath in path.iterdir():
yield filepath.name, {'image': ..., 'label': ...}
```
* A `beam.PTransform` of (input_types: [] -> output_types: `KeyExample`):
For big datasets and distributed generation. See our Apache Beam
[datasets guide](https://www.tensorflow.org/datasets/beam_datasets)
for more info.
```python
def _generate_examples(self, path):
return (
beam.Create(path.iterdir())
| beam.Map(lambda filepath: filepath.name, {'image': ..., ...})
)
```
* A `beam.PCollection`: This should only be used if you need to share some
distributed processing accross splits. In this case, you can use the
following pattern:
```python
def _split_generators(self, dl_manager, pipeline):
...
# Distributed processing shared across splits
pipeline |= beam.Create(path.iterdir())
pipeline |= 'SharedPreprocessing' >> beam.Map(_common_processing)
...
# Wrap the pipeline inside a ptransform_fn to add `'label' >> ` and avoid
# duplicated PTransform nodes names.
generate_examples = beam.ptransform_fn(self._generate_examples)
return {
'train': pipeline | 'train' >> generate_examples(is_train=True)
'test': pipeline | 'test' >> generate_examples(is_train=False)
}
def _generate_examples(self, pipeline, is_train: bool):
return pipeline | beam.Map(_split_specific_processing, is_train=is_train)
```
Note: Each split should uses a different tag name (e.g.
`'train' >> generate_examples(path)`). Otherwise Beam will raise
duplicated name error.
Args:
**kwargs: Arguments from the `_split_generators`
Yields:
key: `str` or `int`, a unique deterministic example identification key.
* Unique: An error will be raised if two examples are yield with the
same key.
* Deterministic: When generating the dataset twice, the same example
should have the same key.
* Comparable: If shuffling is disabled the key will be used to sort the
dataset.
Good keys can be the image id, or line number if examples are extracted
from a text file.
The example will be sorted by `hash(key)` if shuffling is enabled, and
otherwise by `key`.
Generating the dataset multiple times will keep examples in the
same order.
example: `dict<str feature_name, feature_value>`, a feature dictionary
ready to be encoded and written to disk. The example will be
encoded with `self.info.features.encode_example({...})`.
"""
raise NotImplementedError()
def _download_and_prepare(
self,
dl_manager: download.DownloadManager,
download_config: download.DownloadConfig,
) -> None:
"""Generate all splits and returns the computed split infos."""
split_builder = split_builder_lib.SplitBuilder(
split_dict=self.info.splits,
features=self.info.features,
max_examples_per_split=download_config.max_examples_per_split,
beam_options=download_config.beam_options,
beam_runner=download_config.beam_runner,
file_format=self.info.file_format,
)
# Wrap the generation inside a context manager.
# If `beam` is used during generation (when a pipeline gets created),
# the context manager is equivalent to `with beam.Pipeline()`.
# Otherwise, this is a no-op.
# By auto-detecting Beam, the user only has to change `_generate_examples`
# to go from non-beam to beam dataset:
# https://www.tensorflow.org/datasets/beam_datasets#instructions
with split_builder.maybe_beam_pipeline():
# If the signature has a `pipeline` kwargs, create the pipeline now and
# forward it to `self._split_generators`
# We add this magic because the pipeline kwargs is only used by c4 and
# we do not want to make the API more verbose for a single advanced case.
# See also the documentation at the end here:
# https://www.tensorflow.org/datasets/api_docs/python/tfds/core/GeneratorBasedBuilder?version=nightly#_generate_examples
signature = inspect.signature(self._split_generators)
if "pipeline" in signature.parameters.keys():
optional_pipeline_kwargs = dict(pipeline=split_builder.beam_pipeline)
else:
optional_pipeline_kwargs = {}
split_generators = self._split_generators( # pylint: disable=unexpected-keyword-arg
dl_manager, **optional_pipeline_kwargs)
# TODO(tfds): Could be removed once all datasets are migrated.
# https://github.com/tensorflow/datasets/issues/2537
# Legacy mode (eventually convert list[SplitGeneratorLegacy] -> dict)
split_generators = split_builder.normalize_legacy_split_generators(
split_generators=split_generators,
generator_fn=self._generate_examples,
is_beam=isinstance(self, BeamBasedBuilder),
)
# Ensure `all` isn't used as key.
_check_split_names(split_generators.keys())
# Writer fail if the number of example yield is `0`, so we return here.
if download_config.max_examples_per_split == 0:
return
# Start generating data for all splits
path_suffix = file_adapters.ADAPTER_FOR_FORMAT[
self.info.file_format].FILE_SUFFIX
split_info_futures = []
for split_name, generator in utils.tqdm(
split_generators.items(),
desc="Generating splits...",
unit=" splits",
leave=False,
):
filename_template = naming.ShardedFileTemplate(
split=split_name,
dataset_name=self.name,
data_dir=self.data_path,
filetype_suffix=path_suffix)
future = split_builder.submit_split_generation(
split_name=split_name,
generator=generator,
filename_template=filename_template,
disable_shuffling=self.info.disable_shuffling,
)
split_info_futures.append(future)
# Finalize the splits (after apache beam completed, if it was used)
split_infos = [future.result() for future in split_info_futures]
# Update the info object with the splits.
split_dict = splits_lib.SplitDict(split_infos)
self.info.set_splits(split_dict)
@utils.docs.deprecated
class BeamBasedBuilder(GeneratorBasedBuilder):
"""Beam based Builder.
DEPRECATED: Please use `tfds.core.GeneratorBasedBuilder` instead.
"""
def _generate_examples(self, *args: Any,
**kwargs: Any) -> split_builder_lib.SplitGenerator:
return self._build_pcollection(*args, **kwargs)
def _check_split_names(split_names: Iterable[str]) -> None:
"""Check that split names are valid."""
if "all" in set(str(s).lower() for s in split_names):
raise ValueError(
"`all` is a reserved keyword. Split cannot be named like this.")
def _save_default_config_name(
common_dir: ReadWritePath,
*,
default_config_name: str,
) -> None:
"""Saves `builder_cls` metadata (common to all builder configs)."""
data = {
"default_config_name": default_config_name,
}
# `data_dir/ds_name/config/version/` -> `data_dir/ds_name/.config`
config_dir = common_dir / ".config"
config_dir.mkdir(parents=True, exist_ok=True)
# Note:
# * Save inside a dir to support some replicated filesystem
# * Write inside a `.incomplete` file and rename to avoid multiple configs
# writing concurently the same file
# * Config file is overwritten each time a config is generated. If the
# default config is changed, this will be updated.
config_path = config_dir / "metadata.json"
with utils.incomplete_file(config_path) as tmp_config_path:
tmp_config_path.write_text(json.dumps(data))
def load_default_config_name(common_dir: ReadOnlyPath,) -> Optional[str]:
"""Load `builder_cls` metadata (common to all builder configs)."""
config_path = utils.as_path(common_dir) / ".config/metadata.json"
if not config_path.exists():
return None
data = json.loads(config_path.read_text())
return data.get("default_config_name")
def cannonical_version_for_config(
instance_or_cls: Union[DatasetBuilder, Type[DatasetBuilder]],
config: Optional[BuilderConfig] = None,
) -> utils.Version:
"""Get the cannonical version for the given config.
This allow to get the version without instanciating the class.
The version can be stored either at the class or in the config object.
Args:
instance_or_cls: The instance or class on which get the version
config: The config which might contain the version, or None if the dataset
do not have config.
Returns:
version: The extracted version.
"""
if instance_or_cls.BUILDER_CONFIGS and config is None:
raise ValueError(
f"Cannot infer version on {instance_or_cls.name}. Unknown config.")
if config and config.version:
return utils.Version(config.version)
elif instance_or_cls.VERSION:
return utils.Version(instance_or_cls.VERSION)
else:
raise ValueError(
f"DatasetBuilder {instance_or_cls.name} does not have a defined "
"version. Please add a `VERSION = tfds.core.Version('x.y.z')` to the "
"class.")
| 39.859581
| 126
| 0.697503
|
09ed425547e12c87429922399edc602039d22ce9
| 11,428
|
py
|
Python
|
airflow/executors/celery_executor.py
|
ltxhxpdd123/gateway-airflow
|
b839509d21a985c7a46fb5c6d54a4b77f1bbc4ae
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-07-07T13:42:38.000Z
|
2021-07-07T13:42:38.000Z
|
airflow/executors/celery_executor.py
|
eilifm/airflow
|
781a82f6389e10f1ad3845d820d36515644e79a9
|
[
"Apache-2.0"
] | 6
|
2020-07-07T20:21:26.000Z
|
2021-09-29T17:29:29.000Z
|
airflow/executors/celery_executor.py
|
eilifm/airflow
|
781a82f6389e10f1ad3845d820d36515644e79a9
|
[
"Apache-2.0"
] | 1
|
2020-01-24T00:20:15.000Z
|
2020-01-24T00:20:15.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import math
import os
import subprocess
import time
import traceback
from multiprocessing import Pool, cpu_count
from celery import Celery
from celery import states as celery_states
from airflow import configuration
from airflow.config_templates.default_celery import DEFAULT_CELERY_CONFIG
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import BaseExecutor
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.module_loading import import_string
from airflow.utils.timeout import timeout
# Make it constant for unit test.
CELERY_FETCH_ERR_MSG_HEADER = 'Error fetching Celery task state'
CELERY_SEND_ERR_MSG_HEADER = 'Error sending Celery task'
'''
To start the celery worker, run the command:
airflow worker
'''
if configuration.conf.has_option('celery', 'celery_config_options'):
celery_configuration = import_string(
configuration.conf.get('celery', 'celery_config_options')
)
else:
celery_configuration = DEFAULT_CELERY_CONFIG
app = Celery(
configuration.conf.get('celery', 'CELERY_APP_NAME'),
config_source=celery_configuration)
@app.task
def execute_command(command_to_exec):
log = LoggingMixin().log
log.info("Executing command in Celery: %s", command_to_exec)
env = os.environ.copy()
try:
subprocess.check_call(command_to_exec, stderr=subprocess.STDOUT,
close_fds=True, env=env)
except subprocess.CalledProcessError as e:
log.exception('execute_command encountered a CalledProcessError')
log.error(e.output)
raise AirflowException('Celery command failed')
class ExceptionWithTraceback(object):
"""
Wrapper class used to propagate exceptions to parent processes from subprocesses.
:param exception: The exception to wrap
:type exception: Exception
:param exception_traceback: The stacktrace to wrap
:type exception_traceback: str
"""
def __init__(self, exception, exception_traceback):
self.exception = exception
self.traceback = exception_traceback
def fetch_celery_task_state(celery_task):
"""
Fetch and return the state of the given celery task. The scope of this function is
global so that it can be called by subprocesses in the pool.
:param celery_task: a tuple of the Celery task key and the async Celery object used
to fetch the task's state
:type celery_task: tuple(str, celery.result.AsyncResult)
:return: a tuple of the Celery task key and the Celery state of the task
:rtype: tuple[str, str]
"""
try:
with timeout(seconds=2):
# Accessing state property of celery task will make actual network request
# to get the current state of the task.
res = (celery_task[0], celery_task[1].state)
except Exception as e:
exception_traceback = "Celery Task ID: {}\n{}".format(celery_task[0],
traceback.format_exc())
res = ExceptionWithTraceback(e, exception_traceback)
return res
def send_task_to_executor(task_tuple):
key, simple_ti, command, queue, task = task_tuple
try:
with timeout(seconds=2):
result = task.apply_async(args=[command], queue=queue)
except Exception as e:
exception_traceback = "Celery Task ID: {}\n{}".format(key,
traceback.format_exc())
result = ExceptionWithTraceback(e, exception_traceback)
return key, command, result
class CeleryExecutor(BaseExecutor):
"""
CeleryExecutor is recommended for production use of Airflow. It allows
distributing the execution of task instances to multiple worker nodes.
Celery is a simple, flexible and reliable distributed system to process
vast amounts of messages, while providing operations with the tools
required to maintain such a system.
"""
def __init__(self):
super(CeleryExecutor, self).__init__()
# Celery doesn't support querying the state of multiple tasks in parallel
# (which can become a bottleneck on bigger clusters) so we use
# a multiprocessing pool to speed this up.
# How many worker processes are created for checking celery task state.
self._sync_parallelism = configuration.getint('celery', 'SYNC_PARALLELISM')
if self._sync_parallelism == 0:
self._sync_parallelism = max(1, cpu_count() - 1)
self._sync_pool = None
self.tasks = {}
self.last_state = {}
def start(self):
self.log.debug(
'Starting Celery Executor using %s processes for syncing',
self._sync_parallelism
)
def _num_tasks_per_send_process(self, to_send_count):
"""
How many Celery tasks should each worker process send.
:return: Number of tasks that should be sent per process
:rtype: int
"""
return max(1,
int(math.ceil(1.0 * to_send_count / self._sync_parallelism)))
def _num_tasks_per_fetch_process(self):
"""
How many Celery tasks should be sent to each worker process.
:return: Number of tasks that should be used per process
:rtype: int
"""
return max(1,
int(math.ceil(1.0 * len(self.tasks) / self._sync_parallelism)))
def heartbeat(self):
# Triggering new jobs
if not self.parallelism:
open_slots = len(self.queued_tasks)
else:
open_slots = self.parallelism - len(self.running)
self.log.debug("%s running task instances", len(self.running))
self.log.debug("%s in queue", len(self.queued_tasks))
self.log.debug("%s open slots", open_slots)
sorted_queue = sorted(
[(k, v) for k, v in self.queued_tasks.items()],
key=lambda x: x[1][1],
reverse=True)
task_tuples_to_send = []
for i in range(min((open_slots, len(self.queued_tasks)))):
key, (command, _, queue, simple_ti) = sorted_queue.pop(0)
task_tuples_to_send.append((key, simple_ti, command, queue,
execute_command))
cached_celery_backend = None
if task_tuples_to_send:
tasks = [t[4] for t in task_tuples_to_send]
# Celery state queries will stuck if we do not use one same backend
# for all tasks.
cached_celery_backend = tasks[0].backend
if task_tuples_to_send:
# Use chunking instead of a work queue to reduce context switching
# since tasks are roughly uniform in size
chunksize = self._num_tasks_per_send_process(len(task_tuples_to_send))
num_processes = min(len(task_tuples_to_send), self._sync_parallelism)
send_pool = Pool(processes=num_processes)
key_and_async_results = send_pool.map(
send_task_to_executor,
task_tuples_to_send,
chunksize=chunksize)
send_pool.close()
send_pool.join()
self.log.debug('Sent all tasks.')
for key, command, result in key_and_async_results:
if isinstance(result, ExceptionWithTraceback):
self.log.error(
CELERY_SEND_ERR_MSG_HEADER + ":%s\n%s\n", result.exception, result.traceback
)
elif result is not None:
# Only pops when enqueued successfully, otherwise keep it
# and expect scheduler loop to deal with it.
self.queued_tasks.pop(key)
result.backend = cached_celery_backend
self.running[key] = command
self.tasks[key] = result
self.last_state[key] = celery_states.PENDING
# Calling child class sync method
self.log.debug("Calling the %s sync method", self.__class__)
self.sync()
def sync(self):
num_processes = min(len(self.tasks), self._sync_parallelism)
if num_processes == 0:
self.log.debug("No task to query celery, skipping sync")
return
self.log.debug("Inquiring about %s celery task(s) using %s processes",
len(self.tasks), num_processes)
# Recreate the process pool each sync in case processes in the pool die
self._sync_pool = Pool(processes=num_processes)
# Use chunking instead of a work queue to reduce context switching since tasks are
# roughly uniform in size
chunksize = self._num_tasks_per_fetch_process()
self.log.debug("Waiting for inquiries to complete...")
task_keys_to_states = self._sync_pool.map(
fetch_celery_task_state,
self.tasks.items(),
chunksize=chunksize)
self._sync_pool.close()
self._sync_pool.join()
self.log.debug("Inquiries completed.")
for key_and_state in task_keys_to_states:
if isinstance(key_and_state, ExceptionWithTraceback):
self.log.error(
CELERY_FETCH_ERR_MSG_HEADER + ", ignoring it:%s\n%s\n",
repr(key_and_state.exception), key_and_state.traceback
)
continue
key, state = key_and_state
try:
if self.last_state[key] != state:
if state == celery_states.SUCCESS:
self.success(key)
del self.tasks[key]
del self.last_state[key]
elif state == celery_states.FAILURE:
self.fail(key)
del self.tasks[key]
del self.last_state[key]
elif state == celery_states.REVOKED:
self.fail(key)
del self.tasks[key]
del self.last_state[key]
else:
self.log.info("Unexpected state: %s", state)
self.last_state[key] = state
except Exception:
self.log.exception("Error syncing the Celery executor, ignoring it.")
def end(self, synchronous=False):
if synchronous:
while any([
task.state not in celery_states.READY_STATES
for task in self.tasks.values()]):
time.sleep(5)
self.sync()
| 37.716172
| 100
| 0.630907
|
f1081412fbcd987368fbf35f6a71afe7c8596b03
| 4,363
|
py
|
Python
|
unit_test/daemonconfig_test.py
|
AYCS/bigitr
|
6beefbe79fab887c4ef60e0dd7fa39bca61a22cc
|
[
"Apache-2.0"
] | 3
|
2016-07-19T07:54:27.000Z
|
2021-09-13T08:25:20.000Z
|
unit_test/daemonconfig_test.py
|
AYCS/bigitr
|
6beefbe79fab887c4ef60e0dd7fa39bca61a22cc
|
[
"Apache-2.0"
] | 1
|
2016-02-03T14:17:47.000Z
|
2016-02-03T15:22:20.000Z
|
unit_test/daemonconfig_test.py
|
mikjo/bigitr
|
6beefbe79fab887c4ef60e0dd7fa39bca61a22cc
|
[
"Apache-2.0"
] | 4
|
2015-05-04T10:03:06.000Z
|
2020-10-22T04:46:38.000Z
|
#
# Copyright 2012 SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from cStringIO import StringIO
import tempfile
import testutils
from bigitr import daemonconfig
class TestDaemonConfig(testutils.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp(suffix='.bigitr')
os.environ['DDIR'] = self.dir
daemonConfig = self.dir + '/daemon'
file(daemonConfig, 'w').write('''
[GLOBAL]
appconfig = ${DDIR}/app
[foo]
repoconfig = ${DDIR}/foo1.* ${DDIR}/foo2.*
[bar]
appconfig = ${DDIR}/app2
repoconfig = ${DDIR}/bar
email = other@other blah@blah
''')
self.cfg = daemonconfig.DaemonConfig(daemonConfig)
def tearDown(self):
self.removeRecursive(self.dir)
os.unsetenv('DDIR')
def test_parallelConversions(self):
self.assertEqual(1, self.cfg.parallelConversions())
self.cfg.set('GLOBAL', 'parallel', '8')
self.assertEqual(8, self.cfg.parallelConversions())
def test_getPollFrequency(self):
self.assertEqual(300, self.cfg.getPollFrequency())
self.cfg.set('GLOBAL', 'pollfrequency', '1h')
self.assertEqual(3600, self.cfg.getPollFrequency())
def test_getFullSyncFrequency(self):
self.assertEqual(86000, self.cfg.getFullSyncFrequency())
self.cfg.set('GLOBAL', 'syncfrequency', '1h')
self.assertEqual(3600, self.cfg.getFullSyncFrequency())
def test_getEmail(self):
self.assertEqual(None, self.cfg.getEmail())
self.cfg.set('GLOBAL', 'email', 'here@here')
self.assertEqual(['here@here'], self.cfg.getEmail())
def test_getMailFrom(self):
self.assertEqual(None, self.cfg.getMailFrom())
self.cfg.set('GLOBAL', 'mailfrom', 'noreply@here')
self.assertEqual('noreply@here', self.cfg.getMailFrom())
def test_getMailAll(self):
self.assertFalse(self.cfg.getMailAll())
self.cfg.set('GLOBAL', 'mailall', 'true')
self.assertTrue(self.cfg.getMailAll())
def test_getSmartHost(self):
self.assertEqual('localhost', self.cfg.getSmartHost())
self.cfg.set('GLOBAL', 'smarthost', 'foo')
self.assertEqual('foo', self.cfg.getSmartHost())
def test_getApplicationContexts(self):
self.assertEqual(set(('foo', 'bar')), self.cfg.getApplicationContexts())
def test_getAppConfig(self):
self.assertEqual(self.dir + '/app', self.cfg.getAppConfig('foo'))
self.assertEqual(self.dir + '/app2', self.cfg.getAppConfig('bar'))
def test_getRepoConfigs(self):
# files have to exist to be globbed
file(self.dir + '/foo1.1', 'w')
file(self.dir + '/foo1.2', 'w')
file(self.dir + '/foo2.1', 'w')
file(self.dir + '/bar', 'w')
self.assertEqual([self.dir + '/foo1.1',
self.dir + '/foo1.2',
self.dir + '/foo2.1'],
self.cfg.getRepoConfigs('foo'))
self.assertEqual([self.dir + '/bar'], self.cfg.getRepoConfigs('bar'))
def test_parseTimeSpec(self):
self.assertEqual(3600, self.cfg._parseTimeSpec('1h'))
self.assertEqual(3600, self.cfg._parseTimeSpec('1H'))
self.assertEqual(60, self.cfg._parseTimeSpec('1m'))
self.assertEqual(60, self.cfg._parseTimeSpec('1M'))
self.assertEqual(1, self.cfg._parseTimeSpec('1s'))
self.assertEqual(1, self.cfg._parseTimeSpec('1S'))
self.assertEqual(1, self.cfg._parseTimeSpec('1'))
self.assertEqual(3661, self.cfg._parseTimeSpec('1h1m1'))
self.assertEqual(3612, self.cfg._parseTimeSpec('1h12'))
self.assertEqual(3661, self.cfg._parseTimeSpec('1h1m1s'))
self.assertEqual(3661, self.cfg._parseTimeSpec('1h 1m 1s'))
self.assertEqual(3661, self.cfg._parseTimeSpec('1h 1m 1s '))
self.assertEqual(3661, self.cfg._parseTimeSpec(' 1h 1m 1s '))
| 38.27193
| 80
| 0.655971
|
232565ab439d430cd40b5047c41845c7c15fd88c
| 1,626
|
py
|
Python
|
funcao4.py
|
lucaspompeun/metodos-matematicos-aplicados-nas-engenharias-via-sistemas-computacionais
|
008d397f76a935af1aba530cc0134b9dd326d3ac
|
[
"MIT"
] | 16
|
2019-09-27T03:08:44.000Z
|
2020-10-16T18:43:45.000Z
|
primeira-edicao/funcao4.py
|
gm2sc-ifpa/metodos-matematicos-aplicados-nas-engenharias-via-sistemas-computacionais-master
|
f435c366e08dc14b0557f2172ad3b841ddb7ef2e
|
[
"MIT"
] | null | null | null |
primeira-edicao/funcao4.py
|
gm2sc-ifpa/metodos-matematicos-aplicados-nas-engenharias-via-sistemas-computacionais-master
|
f435c366e08dc14b0557f2172ad3b841ddb7ef2e
|
[
"MIT"
] | 5
|
2019-09-13T20:00:38.000Z
|
2020-09-19T03:04:00.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 15:50:09 2019
INSTITUTO FEDERAL DE EDUCAÇÃO, CIÊNCIA E TECNOLOGIA DO PÁRA - IFPA ANANINDEUA
@author:
Prof. Dr. Denis C. L. Costa
Discentes:
Heictor Alves de Oliveira Costa
Lucas Pompeu Neves
Grupo de Pesquisa:
Gradiente de Modelagem Matemática e
Simulação Computacional - GM²SC
Assunto:
Função Polinomial do 4º grau em Python
Nome do sript: funcao4
Disponível em:
"""
# Importando Bibliotecas
# Biblioteca numpy: Operações matemáticas
import numpy as np
# Biblioteca matplotlib: Represntação Gráfica
import matplotlib.pyplot as plt
# Função do 4º grau : f4 = ax**4 + bx**3 + cx**2 + dx + e
print('Coeficientes da Função do 4º grau')
# Coeficientes: a (a ≠ 0), b, c, d, e
a = 1
b = 2
c = -13
d = -14
e = 24
print('Coeficiente: a =', a)
print('Coeficiente: b =', b)
print('Coeficiente: c =', c)
print('Coeficiente: d =', d)
print('Coeficiente: e =', e)
# Variável independente: x
# Domínio da Função: (início, fim, número de pontos)
x = np.linspace(-5,4,30)
f4 = a*x**4 + b*x**3 + c*x**2 + d*x + e
input("Pressione <enter> para representar graficamente")
print('')
# Representação Gráfica de f4
# Comando plot: (variável, função, 'cor da linha')
plt.plot(x,f4,'k')
plt.xlabel('Valores de x')
plt.ylabel('Valores de y')
plt.title('Função do 4º grau')
plt.grid(True)
plt.show()
print('=== Fim do Programa funcao4 ===')
print('')
input("Acione <Ctrl + l> para limpar o console")
| 23.911765
| 78
| 0.608856
|
73616bf84357d9fe1135967af4ca8fbbb8360dfb
| 4,075
|
py
|
Python
|
pywps/inout/storage/file.py
|
janpisl/pywps
|
73a1835359f0503e08fb007d75de699bf3cf29ed
|
[
"MIT"
] | null | null | null |
pywps/inout/storage/file.py
|
janpisl/pywps
|
73a1835359f0503e08fb007d75de699bf3cf29ed
|
[
"MIT"
] | null | null | null |
pywps/inout/storage/file.py
|
janpisl/pywps
|
73a1835359f0503e08fb007d75de699bf3cf29ed
|
[
"MIT"
] | null | null | null |
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import logging
import os
from pywps._compat import urljoin
from pywps.exceptions import NotEnoughStorage
from pywps import configuration as config
from . import StorageAbstract, STORE_TYPE
LOGGER = logging.getLogger('PYWPS')
class FileStorage(StorageAbstract):
"""File storage implementation, stores data to file system
>>> import ConfigParser
>>> config = ConfigParser.RawConfigParser()
>>> config.add_section('FileStorage')
>>> config.set('FileStorage', 'target', './')
>>> config.add_section('server')
>>> config.set('server', 'outputurl', 'http://foo/bar/filestorage')
>>>
>>> store = FileStorage()
>>>
>>> class FakeOutput(object):
... def __init__(self):
... self.file = self._get_file()
... def _get_file(self):
... tiff_file = open('file.tiff', 'w')
... tiff_file.close()
... return 'file.tiff'
>>> fake_out = FakeOutput()
>>> (type, path, url) = store.store(fake_out)
>>> type == STORE_TYPE.PATH
True
"""
def __init__(self):
"""
"""
self.target = config.get_config_value('server', 'outputpath')
self.output_url = config.get_config_value('server', 'outputurl')
def store(self, output):
import math
import shutil
import tempfile
import uuid
file_name = output.file
request_uuid = output.uuid or uuid.uuid1()
file_block_size = os.stat(file_name).st_blksize
# get_free_space delivers the numer of free blocks, not the available size!
avail_size = get_free_space(self.target) * file_block_size
file_size = os.stat(file_name).st_size
# calculate space used according to block size
actual_file_size = math.ceil(file_size / float(file_block_size)) * file_block_size
if avail_size < actual_file_size:
raise NotEnoughStorage('Not enough space in {} to store {}'.format(self.target, file_name))
# create a target folder for each request
target = os.path.join(self.target, str(request_uuid))
if not os.path.exists(target):
os.makedirs(target)
# build output name
(prefix, suffix) = os.path.splitext(file_name)
if not suffix:
suffix = output.data_format.extension
(file_dir, file_name) = os.path.split(prefix)
output_name = file_name + suffix
# build tempfile in case of duplicates
if os.path.exists(os.path.join(target, output_name)):
output_name = tempfile.mkstemp(suffix=suffix, prefix=file_name + '_',
dir=target)[1]
full_output_name = os.path.join(target, output_name)
LOGGER.info('Storing file output to %s', full_output_name)
shutil.copy2(output.file, full_output_name)
just_file_name = os.path.basename(output_name)
# make sure base url ends with '/'
baseurl = self.output_url.rstrip('/') + '/'
baseurl += str(request_uuid) + '/'
url = urljoin(baseurl, just_file_name)
LOGGER.info('File output URI: %s', url)
return (STORE_TYPE.PATH, output_name, url)
def get_free_space(folder):
""" Return folder/drive free space (in bytes)
"""
import platform
if platform.system() == 'Windows':
import ctypes
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
free_space = free_bytes.value
else:
free_space = os.statvfs(folder).f_bfree
LOGGER.debug('Free space: %s', free_space)
return free_space
| 35.745614
| 117
| 0.595337
|
1b3aadc177e6447d47512565e56410028b799647
| 18,087
|
py
|
Python
|
data/rainbow/script1/20210221-142149/script1.py
|
lbaiao/sys-simulator-2
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
[
"MIT"
] | 1
|
2020-06-14T13:50:28.000Z
|
2020-06-14T13:50:28.000Z
|
data/rainbow/script1/20210221-142149/script1.py
|
lbaiao/sys-simulator-2
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
[
"MIT"
] | null | null | null |
data/rainbow/script1/20210221-142149/script1.py
|
lbaiao/sys-simulator-2
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
[
"MIT"
] | null | null | null |
# Similar to script .
# Uses CompleteEnvironment10dB
# Centralized Learning-Distributed Execution
# Simulates many times, for different number of agents, and take the averages.
# There are different channels to the BS and to the devices.
# Multiple episodes convergence. Everything is in dB.
# One NN is trained and copied to each agent.
from shutil import copyfile
from sys_simulator.general import make_dir_timestamp, save_with_pickle
import matplotlib.pyplot as plt
from sys_simulator.plots import plot_positions_actions_pie
from time import time
from sys_simulator.general import db_to_power, power_to_db
from sys_simulator.channels import BANChannel, UrbanMacroNLOSWinnerChannel
from sys_simulator import general as gen
from sys_simulator.q_learning.environments.completeEnvironment10dB \
import CompleteEnvironment10dB
from sys_simulator.dqn.agents.dqnAgent import ExternalDQNAgent
from sys_simulator.dqn.externalDQNFramework \
import ExternalDQNFramework, RainbowFramework
from sys_simulator.parameters.parameters import \
EnvironmentParameters, TrainingParameters, DQNAgentParameters
from sys_simulator.q_learning.rewards import dis_reward_tensor_db
from copy import deepcopy
import torch
import numpy as np
import pickle
n_mues = 1 # number of mues
n_d2d = 2 # number of d2d pairs
n_rb = n_mues # number of RBs
carrier_frequency = 2.4 # carrier frequency in GHz
bs_radius = 500 # bs radius in m
rb_bandwidth = 180*1e3 # rb bandwidth in Hz
d2d_pair_distance = 50 # d2d pair distance in m
device_height = 1.5 # mobile devices height in m
bs_height = 25 # BS antenna height in m
p_max = 40 # max tx power in dBm
noise_power = -116 # noise power per RB in dBm
bs_gain = 17 # macro bs antenna gain in dBi
user_gain = 4 # user antenna gain in dBi
sinr_threshold_train = 6 # mue sinr threshold in dB for training
mue_margin = 200 # mue margin in dB
# conversions from dBm to dB
p_max = p_max - 30
noise_power = noise_power - 30
# channel parameters
CHANNEL_RND = True
# q-learning parameters
# training
NUMBER = 1
REWARD_FUNCTION = 'classic'
# exec params
STEPS_PER_EPISODE = 25
TEST_STEPS_PER_EPISODE = 25
MAX_NUM_EPISODES = 960 # medium training
ITERATIONS_PER_NUM_AGENTS = 100
EVAL_EVERY = 40
EVAL_NUM_EPISODES = 20
EVAL_STEPS_PER_EPISODE = 5
# debug params
# STEPS_PER_EPISODE = 2
# TEST_STEPS_PER_EPISODE = 2
# MAX_NUM_EPISODES = 10
# ITERATIONS_PER_NUM_AGENTS = 10
# EVAL_EVERY = 1000
# EVAL_NUM_EPISODES = 2
# EVAL_STEPS_PER_EPISODE = 2
# common
EPSILON_INITIAL = 1
EPSILON_MIN = .05
# EPSILON_DECAY = .9*1e-4 # medium training
EPSILON_DECAY = 1.3/(MAX_NUM_EPISODES*STEPS_PER_EPISODE) # medium training
PRIO_BETA_ITS = int(.8*MAX_NUM_EPISODES*STEPS_PER_EPISODE)
GAMMA = 0.9 # Discount factor
C = 8 # C constant for the improved reward function
TARGET_UPDATE = 24
REPLAY_MEMORY_SIZE = 100000
BATCH_SIZE = 64
HIDDEN_SIZE = 256
NUM_HIDDEN_LAYERS = 1
LEARNING_RATE = 1e-2
REWARD_PENALTY = 1.5
ENVIRONMENT_MEMORY = 2
MAX_NUMBER_OF_AGENTS = 5
max_d2d = MAX_NUMBER_OF_AGENTS
range_n_d2d = range(1, max_d2d + 1, 1)
# more parameters
# linear discretization
# actions = power_to_db(np.linspace(
# db_to_power(p_max-20), db_to_power(p_max-10), 10
# ))
# db discretization
actions = power_to_db(
np.linspace(
1e-6, db_to_power(p_max-10), 10
)
)
env_params = EnvironmentParameters(
rb_bandwidth, d2d_pair_distance, p_max, noise_power,
bs_gain, user_gain, sinr_threshold_train,
n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin
)
params = TrainingParameters(MAX_NUM_EPISODES, STEPS_PER_EPISODE)
agent_params = DQNAgentParameters(
EPSILON_MIN, EPSILON_DECAY, EPSILON_INITIAL, REPLAY_MEMORY_SIZE,
BATCH_SIZE, GAMMA
)
reward_function = dis_reward_tensor_db
channel_to_devices = BANChannel(rnd=CHANNEL_RND)
channel_to_bs = UrbanMacroNLOSWinnerChannel(
rnd=CHANNEL_RND, f_c=carrier_frequency, h_bs=bs_height, h_ms=device_height
)
ref_env = CompleteEnvironment10dB(
env_params,
channel_to_bs,
channel_to_devices,
reward_penalty=REWARD_PENALTY,
memory=ENVIRONMENT_MEMORY,
bs_height=bs_height,
reward_function=REWARD_FUNCTION
)
# foo env and foo agents stuff
foo_env = deepcopy(ref_env)
foo_agents = [ExternalDQNAgent(agent_params, [1]) for _ in range(4)]
foo_env.build_scenario(foo_agents)
_, _ = foo_env.step(foo_agents)
env_state_size = foo_env.get_state_size(foo_agents[0])
def train(start):
global actions
framework = RainbowFramework(
agent_params,
env_state_size,
len(actions),
HIDDEN_SIZE,
PRIO_BETA_ITS,
NUM_HIDDEN_LAYERS,
LEARNING_RATE,
)
best_reward = float('-inf')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
mue_spectral_eff_bag = list()
d2d_spectral_eff_bag = list()
rewards_bag = list()
# aux_range = range(max_d2d+1)[1:]
epsilon = agent_params.start_epsilon
for episode in range(MAX_NUM_EPISODES):
env = deepcopy(ref_env)
n_agents = np.random.choice(range_n_d2d)
now = (time() - start) / 60
print(
'Training. ' +
f'Number of agents: {n_agents}. ' +
f'Episode: {episode}/{MAX_NUM_EPISODES-1}. ' +
f'Epsilon: {epsilon}. ' +
f'Prio_Beta: {framework.replay_memory._beta}. ' +
f'Elapsed time: {now} minutes.'
)
agents = [ExternalDQNAgent(agent_params, actions)
for _ in range(n_agents)] # 1 agent per d2d tx
for a in agents:
a.set_epsilon(epsilon)
env.build_scenario(agents)
obs, _ = env.step(agents)
total_reward = 0.0
i = 0
bag = list()
while True:
if i >= params.steps_per_episode:
break
else:
past_actions = torch.zeros([len(agents)], device=device)
for j, agent in enumerate(agents):
agent.get_action(framework, obs[j].float())
past_actions[j] = agent.action_index
# # debugging
# if len(agents) == 2:
# print('debugging')
# aux1 = agents[0].action_index == 9
# aux2 = agents[1].action_index == 5
# aux = [aux1, aux2]
# if np.mean(aux) == 1:
# print('debugging')
next_obs, rewards = env.step(agents)
i += 1
for j, agent in enumerate(agents):
framework.replay_memory.push(
obs[j].cpu(), past_actions[j].cpu(),
rewards[j], next_obs[j].cpu(), 0
)
framework.learn()
total_reward = np.sum(rewards)
bag.append(total_reward.item())
obs = next_obs
if i % TARGET_UPDATE == 0:
framework.target_net.load_state_dict(
framework.policy_net.state_dict()
)
if total_reward > best_reward:
best_reward = total_reward
epsilon = agents[0].epsilon
if episode % EVAL_EVERY == 0:
r, d_speff, m_speff = in_training_test(framework)
rewards_bag.append(r)
# average d2d spectral eff
d2d_spectral_eff_bag.append(d_speff)
# mue spectral eff
mue_spectral_eff_bag.append(m_speff)
# save stuff
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'models/dql/{filename}.pt'
torch.save(framework.policy_net.state_dict(), data_path)
# Return the trained policy
return framework, rewards_bag, d2d_spectral_eff_bag, mue_spectral_eff_bag, epsilon # noqa
def test(n_agents, test_env, framework):
framework.policy_net.eval()
mue_spectral_effs = []
d2d_spectral_effs = []
rewards_bag = []
# jain_index = [list() for _ in range(max_d2d+1)]
bag = list()
agents = [ExternalDQNAgent(agent_params, actions)
for i in range(n_agents)] # 1 agent per d2d tx
test_env.build_scenario(agents)
obs, _ = test_env.step(agents)
total_reward = 0.0
i = 0
while True:
actions_index = list()
for j, agent in enumerate(agents):
aux = agent.act(framework, obs[j].float()).max(1)
agent.set_action(aux[1].long(),
agent.actions[aux[1].item()])
bag.append(aux[1].item())
actions_index.append(aux[1].item())
next_obs, rewards = test_env.step(agents)
obs = next_obs
total_reward = sum(rewards)
# saving stuff
rewards_bag.append(total_reward)
mue_spectral_effs.append(test_env.mue_spectral_eff.item())
d2d_spectral_effs.append(test_env.d2d_spectral_eff.item())
i += 1
if i >= TEST_STEPS_PER_EPISODE:
break
mue_success_rate = np.mean(
np.array(mue_spectral_effs) > np.log2(
1 + db_to_power(sinr_threshold_train)
)
)
# jain_index_avg = list()
# for i, j in enumerate(jain_index):
# jain_index_avg.append(np.average(j))
# save data
return mue_success_rate, mue_spectral_effs, d2d_spectral_effs, rewards
def in_training_test(framework: ExternalDQNFramework):
mue_spectral_eff_bag = list()
d2d_spectral_eff_bag = list()
rewards_bag = list()
for _ in range(EVAL_NUM_EPISODES):
env = deepcopy(ref_env)
n_agents = np.random.choice(range_n_d2d)
agents = [ExternalDQNAgent(agent_params, actions)
for _ in range(n_agents)] # 1 agent per d2d tx
env.build_scenario(agents)
obs, _ = env.step(agents)
for _ in range(EVAL_STEPS_PER_EPISODE):
for j, agent in enumerate(agents):
aux = agent.act(framework, obs[j].float()).max(1)
agent.set_action(aux[1].long(),
agent.actions[aux[1].item()])
next_obs, _ = env.step(agents)
obs = next_obs
# mue spectral eff
mue_spectral_eff_bag.append(env.mue_spectral_eff)
# average d2d spectral eff
d2d_spectral_eff_bag.append(env.d2d_spectral_eff)
rewards_bag.append(env.reward)
mean_mue_speff = np.mean(mue_spectral_eff_bag)
mean_d2d_speff = np.mean(d2d_spectral_eff_bag)
mean_reward = np.mean(rewards_bag)
return mean_reward, mean_d2d_speff, mean_mue_speff
def run(framework=None):
mue_sucess_rate_total = []
mue_spectral_effs_total = []
d2d_spectral_effs_total = []
rewards_total = []
start = time()
r, d_speffs, m_speffs, epsilon = 0, 0, 0, 1
if framework is None:
framework, r, d_speffs, m_speffs, epsilon = train(start)
for n in range(1, MAX_NUMBER_OF_AGENTS+1, 1):
mue_suc_rates = []
mue_speff_rates = []
d2d_speff_rates = []
rews = []
for it in range(ITERATIONS_PER_NUM_AGENTS):
now = (time() - start) / 60
print(
'Testing. ' +
f'Number of agents: {n}/{MAX_NUMBER_OF_AGENTS}. ' +
f'Iteration: {it}/{ITERATIONS_PER_NUM_AGENTS-1}. ' +
f'Elapsed time: {now} minutes.'
)
test_env = deepcopy(ref_env)
mue_success_rate, mue_spectral_effs, d2d_spectral_effs, rewards = \
test(n, test_env, framework)
mue_suc_rates.append(mue_success_rate)
mue_speff_rates.append(mue_spectral_effs)
d2d_speff_rates.append(d2d_spectral_effs)
rews.append(rewards)
mue_sucess_rate_total.append(mue_suc_rates)
mue_spectral_effs_total.append(mue_speff_rates)
d2d_spectral_effs_total.append(d2d_speff_rates)
rewards_total.append(rews)
# save stuff
now = (time() - start) / 60
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
dir_path = f'data/rainbow/{filename}'
data_path = make_dir_timestamp(dir_path)
data_file_path = f'{data_path}/log.pickle'
data = {
'mue_success_rate': mue_sucess_rate_total,
'd2d_speffs': d2d_spectral_effs_total,
'mue_speffs': mue_spectral_effs_total,
'rewards': rewards_total,
'mue_sinr_threshold': sinr_threshold_train,
'elapsed_time': now,
'training_rewards': r,
'training_d2d_speffs': d_speffs,
'training_mue_speffs': m_speffs,
'eval_every': EVAL_EVERY,
'final_epsilon': epsilon,
}
save_with_pickle(data, data_file_path)
copyfile(__file__, f'{data_path}/{filename}.py')
print(f'done. Elapsed time: {now} minutes.')
def run_test():
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'models/dql/{filename}.pt'
framework = torch.load(data_path)
run(framework)
def test_exec():
# environment
test_env = deepcopy(ref_env)
# load framework
framework = ExternalDQNFramework(
agent_params,
env_state_size,
len(actions),
HIDDEN_SIZE,
NUM_HIDDEN_LAYERS,
LEARNING_RATE
)
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'models/dql/{filename}.pt'
state_dict = torch.load(data_path)
framework.policy_net.load_state_dict(state_dict)
framework.policy_net.eval()
# simulation stuff
mue_spectral_effs = []
d2d_spectral_effs = []
rewards_bag = []
# devices positions
pairs_positions = [
((-400, 0, device_height), (-450, 0, device_height)),
((100, 0, device_height), (150, 0, device_height)),
((225, 225, device_height), (275, 225, device_height)),
((55, -55, device_height), (55, -5, device_height)),
]
mue_position = (0, 200, device_height)
# jain_index = [list() for _ in range(max_d2d+1)]
n_agents = len(pairs_positions)
bag = list()
agents = [ExternalDQNAgent(agent_params, actions)
for i in range(n_agents)] # 1 agent per d2d tx
test_env.set_scenario(pairs_positions, mue_position, agents)
obs, _ = test_env.step(agents)
total_reward = 0.0
i = 0
while True:
actions_index = list()
for j, agent in enumerate(agents):
aux = agent.act(framework, obs[j].float()).max(1)
agent.set_action(aux[1].long(),
agent.actions[aux[1].item()])
bag.append(aux[1].item())
actions_index.append(aux[1].item())
next_obs, rewards = test_env.step(agents)
obs = next_obs
total_reward = sum(rewards)
# saving stuff
rewards_bag.append(total_reward)
mue_spectral_effs.append(test_env.mue_spectral_eff.item())
d2d_spectral_effs.append(test_env.d2d_spectral_eff.item())
i += 1
if i >= TEST_STEPS_PER_EPISODE:
break
d2d_txs, d2d_rxs = zip(*test_env.d2d_pairs)
# D2D interference on the MUE, in dB
d2d_interferences = np.array([
d.caused_mue_interference for d in d2d_txs
])
d2d_interferences_mag = db_to_power(d2d_interferences)
d2d_total_interference = np.sum(d2d_interferences_mag)
percentage_interferences = d2d_interferences_mag / d2d_total_interference
interferences, tx_labels, rx_labels = calculate_interferences(test_env)
if d2d_total_interference != 0:
plot_positions_actions_pie(
test_env.bs, test_env.mue, d2d_txs, d2d_rxs,
actions_index, percentage_interferences,
test_env.mue.sinr > sinr_threshold_train, sinr_threshold_train,
test_env.reward, interferences, tx_labels, rx_labels
)
# jain_index[n_agents].append(gen.jain_index(test_env.sinr_d2ds))
mue_success_rate = np.mean(
np.array(mue_spectral_effs) > np.log2(
1 + db_to_power(sinr_threshold_train)
)
)
# jain_index_avg = list()
# for i, j in enumerate(jain_index):
# jain_index_avg.append(np.average(j))
# save data
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
data_path = f'data/rainbow/{filename}_exec.pickle'
data = {
'd2d_speffs_avg_total': d2d_spectral_effs,
'mue_success_rate': mue_success_rate,
'chosen_actions': bag,
'd2d_speffs': d2d_spectral_effs,
'mue_speffs': mue_spectral_effs,
'rewards': rewards_bag,
'mue_sinr_threshold': sinr_threshold_train,
}
with open(data_path, 'wb') as file:
pickle.dump(data, file)
# plot
print_stuff(actions, test_env)
plt.show()
def calculate_interferences(env: CompleteEnvironment10dB):
bs = env.bs
mue = env.mue
d2d_pairs = env.d2d_pairs
txs = [mue]
txs += [p[0] for p in d2d_pairs]
rxs = [bs]
rxs += [p[1] for p in d2d_pairs]
interferences = np.zeros((len(txs), len(rxs)))
for i, tx in enumerate(txs):
for j, (rx, interfered) in enumerate(zip(rxs, txs)):
if tx == interfered:
interf = tx.power_at_receiver
elif tx == mue:
interf = interfered.received_mue_interference
elif rx == bs:
interf = tx.caused_mue_interference
else:
interf = [
power_to_db(i[1]) for i in interfered.interferences
if i[0] == tx.id
][0]
interferences[i][j] = interf
tx_labels = [d.id for d in txs]
rx_labels = [d.id for d in rxs]
return interferences, tx_labels, rx_labels
def print_stuff(actions, env: CompleteEnvironment10dB):
actions = [f'{i:.2f}' for i in actions]
sinr_d2ds = [f'{d[0].sinr:.2f}' for d in env.d2d_pairs]
print(f'MUE Tx Power [dBW]: {env.mue.tx_power:.2f}')
print(f'D2D Power levels [dBW]: {actions}')
print(f'D2D SINR [dB]: {sinr_d2ds}')
print(f'D2D Spectral Efficiencies: {env.d2d_spectral_eff}')
if __name__ == '__main__':
run()
| 36.174
| 94
| 0.643998
|
71a75f01512cc0dafd36637b8e11efc3cbc99b29
| 283
|
py
|
Python
|
preprocess_web/code/ravens_metadata_apps/dataverse_connect/dv_constants.py
|
TwoRavens/raven-metadata-service
|
9461522219f5ef0f4877f24c8f5923e462bd9557
|
[
"Apache-2.0"
] | null | null | null |
preprocess_web/code/ravens_metadata_apps/dataverse_connect/dv_constants.py
|
TwoRavens/raven-metadata-service
|
9461522219f5ef0f4877f24c8f5923e462bd9557
|
[
"Apache-2.0"
] | 103
|
2018-03-13T20:43:59.000Z
|
2021-02-06T19:27:16.000Z
|
preprocess_web/code/ravens_metadata_apps/dataverse_connect/dv_constants.py
|
TwoRavens/raven-metadata-service
|
9461522219f5ef0f4877f24c8f5923e462bd9557
|
[
"Apache-2.0"
] | 1
|
2019-09-11T22:36:14.000Z
|
2019-09-11T22:36:14.000Z
|
"""Used when working with Dataverse"""
# https://dataverse.harvard.edu/file.xhtml?fileId=3135445&version=RELEASED&version=.0
#
KEY_DATAVERSE_FILE_ID = 'fileId'
KEY_DATAVERSE_FILE_VERSION = 'version'
PATH_DATAFILE_ACCESS = '/api/access/datafile/'
PATH_DATAFILE_PAGE = '/file.xhtml'
| 28.3
| 85
| 0.780919
|
96b803a4952e98b85ece87bccc66c4f444d42cd3
| 880
|
py
|
Python
|
poseidon/dags/sde/gp_land_use_dags.py
|
panda-tech/poseidon-airflow
|
bce5bc02b55f15330635a436056d99acb93488ef
|
[
"Apache-2.0"
] | null | null | null |
poseidon/dags/sde/gp_land_use_dags.py
|
panda-tech/poseidon-airflow
|
bce5bc02b55f15330635a436056d99acb93488ef
|
[
"Apache-2.0"
] | null | null | null |
poseidon/dags/sde/gp_land_use_dags.py
|
panda-tech/poseidon-airflow
|
bce5bc02b55f15330635a436056d99acb93488ef
|
[
"Apache-2.0"
] | null | null | null |
"""_dags file for 'general plan land use' sde extraction."""
from airflow.models import DAG
from trident.util import general
from dags.sde.gp_land_use_jobs import sde_to_shp
from trident.util.sde_extract_tasks import create_sde_tasks
args = general.args
conf = general.config
schedule = general.schedule['gis_weekly']
start_date = general.start_date['gis_weekly']
folder = 'gp_land_use'
layer = 'gp_land_use'
datasd_name = 'gp_land_use_datasd'
md = 'general-plan-land-use'
path_to_file = conf['prod_data_dir'] + '/' + datasd_name
dag = DAG(dag_id='gis_{layer}'.format(layer=layer),
default_args=args,
start_date=start_date,
schedule_interval=schedule)
#: Create tasks dynamically
create_sde_tasks(
dag=dag,
folder=folder,
layer=layer,
datasd_name=datasd_name,
md=md,
path_to_file=path_to_file,
sde_to_shp=sde_to_shp)
| 26.666667
| 60
| 0.740909
|
ee86a589602346dd5e084c255b576cf0d16438c9
| 2,020
|
py
|
Python
|
azure-batch/azure/batch/models/output_file_blob_container_destination.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | null | null | null |
azure-batch/azure/batch/models/output_file_blob_container_destination.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2018-11-29T14:46:42.000Z
|
2018-11-29T14:46:42.000Z
|
azure-batch/azure/batch/models/output_file_blob_container_destination.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OutputFileBlobContainerDestination(Model):
"""Specifies a file upload destination within an Azure blob storage container.
All required parameters must be populated in order to send to Azure.
:param path: The destination blob or virtual directory within the Azure
Storage container. If filePattern refers to a specific file (i.e. contains
no wildcards), then path is the name of the blob to which to upload that
file. If filePattern contains one or more wildcards (and therefore may
match multiple files), then path is the name of the blob virtual directory
(which is prepended to each blob name) to which to upload the file(s). If
omitted, file(s) are uploaded to the root of the container with a blob
name matching their file name.
:type path: str
:param container_url: Required. The URL of the container within Azure Blob
Storage to which to upload the file(s). The URL must include a Shared
Access Signature (SAS) granting write permissions to the container.
:type container_url: str
"""
_validation = {
'container_url': {'required': True},
}
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'container_url': {'key': 'containerUrl', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OutputFileBlobContainerDestination, self).__init__(**kwargs)
self.path = kwargs.get('path', None)
self.container_url = kwargs.get('container_url', None)
| 42.083333
| 82
| 0.65495
|
8b21bd43e2d9ae30ea9d5e0c9bc4db366bd9adca
| 8,458
|
py
|
Python
|
kubernetes/client/models/io_xk8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_spec_addons.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/io_xk8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_spec_addons.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/io_xk8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_spec_addons.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'conflict_resolution': 'str',
'name': 'str',
'service_account_role_arn': 'str',
'version': 'str'
}
attribute_map = {
'conflict_resolution': 'conflictResolution',
'name': 'name',
'service_account_role_arn': 'serviceAccountRoleARN',
'version': 'version'
}
def __init__(self, conflict_resolution=None, name=None, service_account_role_arn=None, version=None, local_vars_configuration=None): # noqa: E501
"""IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._conflict_resolution = None
self._name = None
self._service_account_role_arn = None
self._version = None
self.discriminator = None
if conflict_resolution is not None:
self.conflict_resolution = conflict_resolution
self.name = name
if service_account_role_arn is not None:
self.service_account_role_arn = service_account_role_arn
self.version = version
@property
def conflict_resolution(self):
"""Gets the conflict_resolution of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons. # noqa: E501
ConflictResolution is used to declare what should happen if there are parameter conflicts. Defaults to none # noqa: E501
:return: The conflict_resolution of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons. # noqa: E501
:rtype: str
"""
return self._conflict_resolution
@conflict_resolution.setter
def conflict_resolution(self, conflict_resolution):
"""Sets the conflict_resolution of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons.
ConflictResolution is used to declare what should happen if there are parameter conflicts. Defaults to none # noqa: E501
:param conflict_resolution: The conflict_resolution of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons. # noqa: E501
:type: str
"""
allowed_values = ["overwrite", "none"] # noqa: E501
if self.local_vars_configuration.client_side_validation and conflict_resolution not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `conflict_resolution` ({0}), must be one of {1}" # noqa: E501
.format(conflict_resolution, allowed_values)
)
self._conflict_resolution = conflict_resolution
@property
def name(self):
"""Gets the name of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons. # noqa: E501
Name is the name of the addon # noqa: E501
:return: The name of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons.
Name is the name of the addon # noqa: E501
:param name: The name of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 2):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `2`") # noqa: E501
self._name = name
@property
def service_account_role_arn(self):
"""Gets the service_account_role_arn of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons. # noqa: E501
ServiceAccountRoleArn is the ARN of an IAM role to bind to the addons service account # noqa: E501
:return: The service_account_role_arn of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons. # noqa: E501
:rtype: str
"""
return self._service_account_role_arn
@service_account_role_arn.setter
def service_account_role_arn(self, service_account_role_arn):
"""Sets the service_account_role_arn of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons.
ServiceAccountRoleArn is the ARN of an IAM role to bind to the addons service account # noqa: E501
:param service_account_role_arn: The service_account_role_arn of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons. # noqa: E501
:type: str
"""
self._service_account_role_arn = service_account_role_arn
@property
def version(self):
"""Gets the version of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons. # noqa: E501
Version is the version of the addon to use # noqa: E501
:return: The version of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons.
Version is the version of the addon to use # noqa: E501
:param version: The version of this IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and version is None: # noqa: E501
raise ValueError("Invalid value for `version`, must not be `None`") # noqa: E501
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IoXK8sClusterControlplaneV1alpha3AWSManagedControlPlaneSpecAddons):
return True
return self.to_dict() != other.to_dict()
| 38.798165
| 158
| 0.672618
|
c1d6b2a8b00321caeba3fde4d6203444cd4fe282
| 582
|
py
|
Python
|
code/pgms/reverse-int.py
|
souradeepta/PythonPractice
|
fa956ca4b87a0eb92fee21fa78e59757ce665770
|
[
"MIT"
] | null | null | null |
code/pgms/reverse-int.py
|
souradeepta/PythonPractice
|
fa956ca4b87a0eb92fee21fa78e59757ce665770
|
[
"MIT"
] | 4
|
2021-03-19T02:04:20.000Z
|
2021-09-22T18:54:16.000Z
|
code/pgms/reverse-int.py
|
souradeepta/PythonPractice
|
fa956ca4b87a0eb92fee21fa78e59757ce665770
|
[
"MIT"
] | null | null | null |
def reverse(x: int) -> int:
INT_MAX = 2**32-1
INT_MIN = -2**32
ans = 0
neg_sign = False
if x < 0:
neg_sign = True
x_abs = abs(x)
while x_abs > 0:
rem = x_abs % 10
quo = x_abs //10
ans = ans*10 + rem
x_abs = quo
if neg_sign == True:
ans = ans*-1
if ans >= INT_MIN and ans <= INT_MAX:
return ans
else:
return 0
result = reverse(123)
print(result)
result = reverse(-1235)
print(result)
result = reverse(-12351408304981940241892834)
print(result)
| 19.4
| 45
| 0.517182
|
0ee7f27a64c013a0f6c76e6a0ca83c6fb4a68725
| 141
|
py
|
Python
|
PythonCode/1096.py
|
CrystianPrintes20/ProjetoUri
|
92a88ae2671a556f4d418c3605e9a2c6933dc9d8
|
[
"MIT"
] | null | null | null |
PythonCode/1096.py
|
CrystianPrintes20/ProjetoUri
|
92a88ae2671a556f4d418c3605e9a2c6933dc9d8
|
[
"MIT"
] | null | null | null |
PythonCode/1096.py
|
CrystianPrintes20/ProjetoUri
|
92a88ae2671a556f4d418c3605e9a2c6933dc9d8
|
[
"MIT"
] | null | null | null |
j = 5
for i in range(1,10,2):
print('I={} J={}'.format(i, j+2))
print('I={} J={}'.format(i, j+1))
print('I={} J={}'.format(i, j))
| 28.2
| 37
| 0.453901
|
2336258913cdae3881e4328795163527fd883de2
| 2,734
|
py
|
Python
|
06/LaborSupplyModel.py
|
AskerNC/lectures-2021
|
d152450b2fee7be775892dde1a467639aa5e35ea
|
[
"MIT"
] | 9
|
2020-11-30T22:25:38.000Z
|
2021-10-05T12:17:11.000Z
|
06/LaborSupplyModel.py
|
AskerNC/lectures-2021
|
d152450b2fee7be775892dde1a467639aa5e35ea
|
[
"MIT"
] | 1
|
2021-04-12T14:15:49.000Z
|
2021-04-12T15:03:55.000Z
|
06/LaborSupplyModel.py
|
AskerNC/lectures-2021
|
d152450b2fee7be775892dde1a467639aa5e35ea
|
[
"MIT"
] | 30
|
2021-02-08T16:18:01.000Z
|
2022-02-05T17:02:35.000Z
|
import numpy as np
from scipy import optimize
def implied_tax(l,w,tau0,tau1,kappa):
""" calculate implied tax of labor supply choice
Args:
l (float): labor supply
w (float): wage
tau0 (float): standard labor tax
tau1 (float): top bracket labor income tax
kappa (float): cut-off for the top labor income bracket
Returns:
(float): total tax bill
"""
return tau0*w*l + tau1*np.fmax(w*l-kappa,0)
def implied_c(l,m,w,tau0,tau1,kappa):
""" calculate implied optimal consumption of labor supply choice
Args:
l (float): labor supply
m (float): cash-on-hand
w (float): wage
tau0 (float): standard labor tax
tau1 (float): top bracket labor income tax
kappa (float): cut-off for the top labor income bracket
Returns:
(float): consumption
"""
return m + w*l - implied_tax(l,w,tau0,tau1,kappa)
def utility(c,l,nu,frisch):
""" utility of consumption and labor supply decision
Args:
c (float): consumption
l (float): labor supply
nu (float): disutility of labor supply
frisch (float): frisch elasticity of labor supply
Returns:
(float): utility
"""
return np.log(c) - nu*l**(1+1/frisch)/(1+1/frisch)
def value_of_choice(l,nu,frisch,m,w,tau0,tau1,kappa):
""" calculate implied utlity of consumption and labor supply choice
Args:
l (float): labor supply
nu (float): disutility of labor supply
frisch (float): frisch elasticity of labor supply
m (float): cash-on-hand
w (float): wage
tau0 (float): standard labor tax
tau1 (float): top bracket labor income tax
kappa (float): cut-off for the top labor income bracket
Returns:
(float): utility
"""
c = implied_c(l,m,w,tau0,tau1,kappa)
return utility(c,l,nu,frisch)
def find_optimal_labor_supply(nu,frisch,m,w,tau0,tau1,kappa):
""" find optimal labor supply choice
Args:
nu (float): disutility of labor supply
frisch (float): frisch elasticity of labor supply
m (float): cash-on-hand
w (float): wage
tau0 (float): standard labor tax
tau1 (float): top bracket labor income tax
kappa (float): cut-off for the top labor income bracket
Returns:
(float): utility
"""
obj = lambda l: -value_of_choice(l,nu,frisch,m,w,tau0,tau1,kappa)
res = optimize.minimize_scalar(obj,bounds=(1e-8,1),method='bounded')
return res.x
| 25.792453
| 72
| 0.581931
|
f802c4f1eaa3b7681922ca206ccdc6f84fbdb934
| 1,483
|
py
|
Python
|
azure-mgmt-alertsmanagement/azure/mgmt/alertsmanagement/models/alerts_summary_group_item_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-alertsmanagement/azure/mgmt/alertsmanagement/models/alerts_summary_group_item_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-alertsmanagement/azure/mgmt/alertsmanagement/models/alerts_summary_group_item_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AlertsSummaryGroupItem(Model):
"""Alerts summary group item.
:param name: Value of the aggregated field
:type name: str
:param count: Count of the aggregated field
:type count: int
:param groupedby: Name of the field aggregated
:type groupedby: str
:param values: List of the items
:type values:
list[~azure.mgmt.alertsmanagement.models.AlertsSummaryGroupItem]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
'groupedby': {'key': 'groupedby', 'type': 'str'},
'values': {'key': 'values', 'type': '[AlertsSummaryGroupItem]'},
}
def __init__(self, *, name: str=None, count: int=None, groupedby: str=None, values=None, **kwargs) -> None:
super(AlertsSummaryGroupItem, self).__init__(**kwargs)
self.name = name
self.count = count
self.groupedby = groupedby
self.values = values
| 35.309524
| 111
| 0.590695
|
b8b019d929441b29cf4500e6739b09e722793f0c
| 58
|
py
|
Python
|
ncbitax2lin/__init__.py
|
zyxue/ncbitax2lin
|
95dc13f6a8ef9a18b5569b877f7f2c2cce068412
|
[
"MIT"
] | 103
|
2016-11-05T19:47:03.000Z
|
2022-01-13T00:47:16.000Z
|
ncbitax2lin/__init__.py
|
zyxue/ncbitax2lin
|
95dc13f6a8ef9a18b5569b877f7f2c2cce068412
|
[
"MIT"
] | 14
|
2017-02-28T20:56:41.000Z
|
2022-03-20T18:58:05.000Z
|
ncbitax2lin/__init__.py
|
zyxue/ncbitax2lin
|
95dc13f6a8ef9a18b5569b877f7f2c2cce068412
|
[
"MIT"
] | 21
|
2017-05-16T08:44:47.000Z
|
2021-11-12T03:31:19.000Z
|
"""__init__.py for this project"""
__version__ = "2.0.2"
| 14.5
| 34
| 0.655172
|
c39cdbb652276c4758a41495a383938f6ae233f9
| 15,432
|
py
|
Python
|
ceilometer/tests/api/v2/test_post_samples_scenarios.py
|
orbitfp7/ceilometer
|
9905da14bbdf06f95e1e056c9ca0e18087214d0f
|
[
"Apache-2.0"
] | 2
|
2015-09-07T09:15:26.000Z
|
2015-09-30T02:13:23.000Z
|
ceilometer/tests/api/v2/test_post_samples_scenarios.py
|
orbitfp7/ceilometer
|
9905da14bbdf06f95e1e056c9ca0e18087214d0f
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/tests/api/v2/test_post_samples_scenarios.py
|
orbitfp7/ceilometer
|
9905da14bbdf06f95e1e056c9ca0e18087214d0f
|
[
"Apache-2.0"
] | 1
|
2019-09-16T02:11:41.000Z
|
2019-09-16T02:11:41.000Z
|
#
# Copyright 2013 Red Hat, Inc
#
# Author: Angus Salkeld <asalkeld@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test listing raw events.
"""
import copy
import datetime
import mock
from oslo.utils import timeutils
from oslotest import mockpatch
from ceilometer.tests.api import v2
from ceilometer.tests import db as tests_db
class TestPostSamples(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
def fake_notifier_sample(self, ctxt, event_type, payload):
for m in payload:
del m['message_signature']
self.published.append(payload)
def setUp(self):
self.published = []
notifier = mock.Mock()
notifier.sample.side_effect = self.fake_notifier_sample
self.useFixture(mockpatch.Patch('oslo.messaging.Notifier',
return_value=notifier))
super(TestPostSamples, self).setUp()
def test_one(self):
s1 = [{'counter_name': 'apples',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
data = self.post_json('/meters/apples/', s1)
# timestamp not given so it is generated.
s1[0]['timestamp'] = data.json[0]['timestamp']
# Ignore message id that is randomly generated
s1[0]['message_id'] = data.json[0]['message_id']
# source is generated if not provided.
s1[0]['source'] = '%s:openstack' % s1[0]['project_id']
self.assertEqual(s1, data.json)
self.assertEqual(s1[0], self.published[0][0])
def test_nested_metadata(self):
s1 = [{'counter_name': 'apples',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'nest.name1': 'value1',
'name2': 'value2',
'nest.name2': 'value3'}}]
data = self.post_json('/meters/apples/', s1)
# timestamp not given so it is generated.
s1[0]['timestamp'] = data.json[0]['timestamp']
# Ignore message id that is randomly generated
s1[0]['message_id'] = data.json[0]['message_id']
# source is generated if not provided.
s1[0]['source'] = '%s:openstack' % s1[0]['project_id']
unwound = copy.copy(s1[0])
unwound['resource_metadata'] = {'nest': {'name1': 'value1',
'name2': 'value3'},
'name2': 'value2'}
# only the published sample should be unwound, not the representation
# in the API response
self.assertEqual(s1[0], data.json[0])
self.assertEqual(unwound, self.published[0][0])
def test_invalid_counter_type(self):
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'INVALID_TYPE',
'counter_unit': 'instance',
'counter_volume': 1,
'source': 'closedstack',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
data = self.post_json('/meters/my_counter_name/', s1,
expect_errors=True)
self.assertEqual(400, data.status_int)
self.assertEqual(0, len(self.published))
def test_messsage_id_provided(self):
"""Do not accept sample with message_id."""
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'message_id': 'evil',
'source': 'closedstack',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
data = self.post_json('/meters/my_counter_name/', s1,
expect_errors=True)
self.assertEqual(400, data.status_int)
self.assertEqual(0, len(self.published))
def test_wrong_project_id(self):
"""Do not accept cross posting samples to different projects."""
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'source': 'closedstack',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
data = self.post_json('/meters/my_counter_name/', s1,
expect_errors=True,
headers={
"X-Roles": "Member",
"X-Tenant-Name": "lu-tenant",
"X-Project-Id":
"bc23a9d531064583ace8f67dad60f6bb",
})
self.assertEqual(400, data.status_int)
self.assertEqual(0, len(self.published))
def test_multiple_samples(self):
"""Send multiple samples.
The usecase here is to reduce the chatter and send the counters
at a slower cadence.
"""
samples = []
for x in range(6):
dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None)
s = {'counter_name': 'apples',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': float(x * 3),
'source': 'evil',
'timestamp': dt.isoformat(),
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'name1': str(x),
'name2': str(x + 4)}}
samples.append(s)
data = self.post_json('/meters/apples/', samples)
for x, s in enumerate(samples):
# source is modified to include the project_id.
s['source'] = '%s:%s' % (s['project_id'],
s['source'])
# Ignore message id that is randomly generated
s['message_id'] = data.json[x]['message_id']
# remove tzinfo to compare generated timestamp
# with the provided one
c = data.json[x]
timestamp = timeutils.parse_isotime(c['timestamp'])
c['timestamp'] = timestamp.replace(tzinfo=None).isoformat()
# do the same on the pipeline
msg = self.published[0][x]
timestamp = timeutils.parse_isotime(msg['timestamp'])
msg['timestamp'] = timestamp.replace(tzinfo=None).isoformat()
self.assertEqual(s, c)
self.assertEqual(s, self.published[0][x])
def test_missing_mandatory_fields(self):
"""Do not accept posting samples with missing mandatory fields."""
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'source': 'closedstack',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
# one by one try posting without a mandatory field.
for m in ['counter_volume', 'counter_unit', 'counter_type',
'resource_id', 'counter_name']:
s_broke = copy.copy(s1)
del s_broke[0][m]
print('posting without %s' % m)
data = self.post_json('/meters/my_counter_name', s_broke,
expect_errors=True)
self.assertEqual(400, data.status_int)
def test_multiple_project_id_and_admin(self):
"""Allow admin is allowed to set multiple project_id."""
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'source': 'closedstack',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
},
{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 2,
'source': 'closedstack',
'project_id': '4af38dca-f6fc-11e2-94f5-14dae9283f29',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
data = self.post_json('/meters/my_counter_name/', s1,
headers={"X-Roles": "admin"})
self.assertEqual(200, data.status_int)
for x, s in enumerate(s1):
# source is modified to include the project_id.
s['source'] = '%s:%s' % (s['project_id'],
'closedstack')
# Ignore message id that is randomly generated
s['message_id'] = data.json[x]['message_id']
# timestamp not given so it is generated.
s['timestamp'] = data.json[x]['timestamp']
s.setdefault('resource_metadata', dict())
self.assertEqual(s, data.json[x])
self.assertEqual(s, self.published[0][x])
def test_multiple_samples_multiple_sources(self):
"""Test posting with special conditions.
Do accept a single post with some multiples sources with some of them
null.
"""
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'source': 'paperstack',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
},
{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 5,
'source': 'waterstack',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
},
{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 2,
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
data = self.post_json('/meters/my_counter_name/', s1,
expect_errors=True)
self.assertEqual(200, data.status_int)
for x, s in enumerate(s1):
# source is modified to include the project_id.
s['source'] = '%s:%s' % (
s['project_id'],
s.get('source', self.CONF.sample_source)
)
# Ignore message id that is randomly generated
s['message_id'] = data.json[x]['message_id']
# timestamp not given so it is generated.
s['timestamp'] = data.json[x]['timestamp']
s.setdefault('resource_metadata', dict())
self.assertEqual(s, data.json[x])
self.assertEqual(s, self.published[0][x])
def test_missing_project_user_id(self):
"""Ensure missing project & user IDs are defaulted appropriately."""
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'source': 'closedstack',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
project_id = 'bc23a9d531064583ace8f67dad60f6bb'
user_id = 'fd87807-12d2-4b38-9c70-5f5c2ac427ff'
data = self.post_json('/meters/my_counter_name/', s1,
expect_errors=True,
headers={
'X-Roles': 'chief-bottle-washer',
'X-Project-Id': project_id,
'X-User-Id': user_id,
})
self.assertEqual(200, data.status_int)
for x, s in enumerate(s1):
# source is modified to include the project_id.
s['source'] = '%s:%s' % (project_id,
s['source'])
# Ignore message id that is randomly generated
s['message_id'] = data.json[x]['message_id']
# timestamp not given so it is generated.
s['timestamp'] = data.json[x]['timestamp']
s['user_id'] = user_id
s['project_id'] = project_id
self.assertEqual(s, data.json[x])
self.assertEqual(s, self.published[0][x])
| 43.965812
| 77
| 0.535122
|
15fcd117778f71c5751ce72b2574af9e2850a778
| 3,309
|
py
|
Python
|
unit/api/spellcheck/test_spell_checker.py
|
flaxandteal/dp-conceptual-search
|
16c6383a61ba5b7069337c2626a0dc243bfe9d35
|
[
"MIT"
] | 3
|
2018-05-10T16:49:27.000Z
|
2022-03-29T15:23:04.000Z
|
unit/api/spellcheck/test_spell_checker.py
|
flaxandteal/dp-conceptual-search
|
16c6383a61ba5b7069337c2626a0dc243bfe9d35
|
[
"MIT"
] | 2
|
2018-09-20T06:37:27.000Z
|
2018-11-12T12:05:08.000Z
|
unit/api/spellcheck/test_spell_checker.py
|
flaxandteal/dp-conceptual-search
|
16c6383a61ba5b7069337c2626a0dc243bfe9d35
|
[
"MIT"
] | 3
|
2018-06-25T10:48:43.000Z
|
2021-04-11T08:01:27.000Z
|
"""
Tests the spellcheck spell checker API
"""
from unit.utils.search_test_app import SearchTestApp
class SpellCheckTestCase(SearchTestApp):
@property
def sample_words(self) -> dict:
"""
Returns a set of words to be used for testing, with their corrections
:return:
"""
return {
"rpo": "rpi",
"roi": "rpi",
"cpl": "cpi",
"infltion": "inflation",
"economi": "economic"
}
def test_spell_check(self):
"""
Mimics the unit test in the ml package, but directs requests through the API
:return:
"""
expected_keys = ["input_token", "correction", "probability"]
sample_words = self.sample_words
for sample_word in sample_words:
params = {
"q": sample_word,
}
url_encoded_params = self.url_encode(params)
target = "/spellcheck?{0}".format(url_encoded_params)
# Make the request
request, response = self.get(target, 200)
# Check the response
self.assertTrue(hasattr(response, "json"), "response should have json data")
data = response.json
self.assertIsNotNone(data, "json data should not be none")
self.assertIsInstance(data, list, "expected list, got {0}".format(type(data)))
self.assertEqual(len(data), 1, "expected one result, got {0}".format(len(data)))
suggestion = data[0]
self.assertIsInstance(suggestion, dict, "expected dict, got {0}".format(type(suggestion)))
for key in expected_keys:
self.assertIn(key, suggestion, "suggestion should contain key '{0}'".format(key))
self.assertEqual(suggestion['input_token'], sample_word, "expected input token {expected}, got {actual}"
.format(expected=sample_words, actual=suggestion['input_token']))
self.assertEqual(suggestion['correction'], sample_words[sample_word],
"expected input token {expected}, got {actual}"
.format(expected=sample_words[sample_word], actual=suggestion['correction']))
self.assertGreater(suggestion['probability'], 0.0, "expected probability > 0, got {0}"
.format(suggestion['probability']))
def test_spell_check_empty_query(self):
"""
Tests that a 400 BAD_REQUEST is raised for an empty query
:return:
"""
params = {
"q": "",
}
url_encoded_params = self.url_encode(params)
target = "/spellcheck?{0}".format(url_encoded_params)
# Make the request and assert a 400 BAD_REQUEST response
request, response = self.get(target, 400)
def test_spell_check_no_tokens(self):
"""
Tests that a 400 BAD_REQUEST is raised for a query with no input tokens (i.e whitespace)
:return:
"""
params = {
"q": " ",
}
url_encoded_params = self.url_encode(params)
target = "/spellcheck?{0}".format(url_encoded_params)
# Make the request and assert a 400 BAD_REQUEST response
request, response = self.get(target, 400)
| 35.967391
| 116
| 0.581142
|
c76e92cb66ff615baf6fa93f979987cb9cb2e371
| 6,932
|
py
|
Python
|
netbox_loadtest.py
|
jjmanzer/netbox-loadtest
|
a32eb7953dfd9936dfbddd9705127d1d9b69805a
|
[
"MIT"
] | null | null | null |
netbox_loadtest.py
|
jjmanzer/netbox-loadtest
|
a32eb7953dfd9936dfbddd9705127d1d9b69805a
|
[
"MIT"
] | null | null | null |
netbox_loadtest.py
|
jjmanzer/netbox-loadtest
|
a32eb7953dfd9936dfbddd9705127d1d9b69805a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Test the netbox API by running a set of scenarios at it designed to show how netbox responds to heavy load.
1. Allocate addresses using get next free logic, then deallocate those addresses.
2. Randomly grab addresses, then deallocate them in that order.
3. Use get next free address logic in a heavily fragmented prefix, then deallocate only those addresses.
After running each of these 3 tests, results are combined and a more general report is generated.
"""
import argparse
import ipaddress
import queue
import random
import threading
import time
import requests
from openpyxl import Workbook
from helpers import netbox
from helpers.excel import add_worker_data_to_sheet
parser = argparse.ArgumentParser(description="Test the Netbox API.")
parser.add_argument(
"parent_prefix",
type=str,
help="the prefix the worker should pull the child prefix from",
)
parser.add_argument(
"prefix_length", type=int, help="the size of the prefix to carve out"
)
parser.add_argument("workers", type=int, help="number of workers concurrenting working")
parser.add_argument("fqdn", type=str, help="FQDN of netbox")
parser.add_argument("token", type=str, help="Auth token for netbox API")
args = parser.parse_args()
report_queue = queue.Queue()
session = requests.Session()
session.headers = {
"accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Token { args.token }",
}
def test_get_next_free_address(prefix: dict) -> dict:
"""Use the get next free logic to allocate every address in prefix then deallocate."""
report = {
"prefix": prefix["prefix"],
"allocate": {"data": {}},
"deallocate": {"data": {}},
}
addresses_assigned = []
while True:
try:
start = time.time()
address = netbox.get_and_reserve_next_free_address(
prefix, session, args.fqdn
)
_address = address["address"].split("/", 1)[0]
report["allocate"]["data"][_address] = time.time() - start
addresses_assigned.append(address)
except RuntimeError:
break
for address in addresses_assigned:
_address = address["address"].split("/", 1)[0]
start = time.time()
if netbox.deallocate_address(address, session, args.fqdn):
report["deallocate"]["data"][_address] = time.time() - start
return report
def test_get_next_free_address_fragmented(prefix: dict) -> dict:
"""Use the get next free logic to allocate every other address in prefix then deallocate them."""
report = {
"prefix": prefix["prefix"],
"allocate": {"data": {}},
"deallocate": {"data": {}},
}
addresses_assigned = []
prefix_obj = ipaddress.IPv4Network(prefix["prefix"])
fragmentated_addresses = []
# create the fragmentation
for address_obj in prefix_obj.hosts():
if int(address_obj) % 2:
address = netbox.reserve_address(
str(address_obj), session, args.fqdn, "fragmentation for a test"
)
fragmentated_addresses.append((address_obj, address))
while True:
try:
start = time.time()
address = netbox.get_and_reserve_next_free_address(
prefix, session, args.fqdn
)
_address = address["address"].split("/", 1)[0]
report["allocate"]["data"][_address] = time.time() - start
addresses_assigned.append(address)
except RuntimeError:
break
for address in addresses_assigned:
_address = address["address"].split("/", 1)[0]
start = time.time()
netbox.deallocate_address(address, session, args.fqdn)
report["deallocate"]["data"][_address] = time.time() - start
# clean up fragmentation
for address_obj, address in fragmentated_addresses:
if int(address_obj) % 2:
netbox.deallocate_address(address, session, args.fqdn)
return report
def test_scattered_assignments(prefix: dict) -> dict:
"""Execute a non-linear pattern of allocating addresses and then deallocating them."""
addresses_to_unassign = []
report = {
"prefix": prefix["prefix"],
"allocate": {"data": {}},
"deallocate": {"data": {}},
}
addresses_to_assign = [
str(address) for address in ipaddress.ip_network(prefix["prefix"]).hosts()
]
random.shuffle(addresses_to_assign)
for address in addresses_to_assign:
start = time.time()
_address = netbox.reserve_address(address, session, args.fqdn)
if _address:
addresses_to_unassign.append(_address)
report["allocate"]["data"][address] = time.time() - start
for address in addresses_to_unassign:
start = time.time()
_address = address["address"].split("/", 1)[0]
if netbox.deallocate_address(address, session, args.fqdn):
report["deallocate"]["data"][_address] = time.time() - start
return report
def worker(prefix: dict):
"""Execute all 3 scenarios against prefix then save the report."""
print(" testing with {}".format(prefix["prefix"]))
report = {}
start = time.time()
report["test_get_next_free_address"] = test_get_next_free_address(prefix)
report[
"test_get_next_free_address_fragmented"
] = test_get_next_free_address_fragmented(prefix)
report["test_scattered_assignments"] = test_scattered_assignments(prefix)
report["total_duration"] = time.time() - start
report_queue.put(report)
print(" finished with {}".format(prefix["prefix"]))
def start():
""" Spawn some worker threads and load test the NetBox API and then make an excel spreadsheet about it."""
workbook = Workbook()
worker_data = {}
for worker_max in range(1, args.workers + 1):
threads = []
print(f"starting the { worker_max } worker scenario")
for worker_id in range(1, worker_max + 1):
prefix = netbox.carve_new_prefix(
args.parent_prefix, args.prefix_length, session, args.fqdn
)
_prefix = prefix["prefix"]
print(
f" starting worker thread { worker_id } of { worker_max } with { _prefix }"
)
thread = threading.Thread(target=worker, args=(prefix,))
thread.start()
threads.append((thread, prefix))
for thread, prefix in threads:
thread.join()
netbox.delete_prefix(prefix, session, args.fqdn)
worker_data[prefix["prefix"]] = report_queue.get()
sheet = workbook.create_sheet(f"{ worker_max } workers")
add_worker_data_to_sheet(worker_data, sheet)
workbook.save(
filename="netbox_load_test_report_{}.xlsx".format(
args.parent_prefix.replace("/", "_")
)
)
| 33.167464
| 110
| 0.641085
|
cebbc235f7e121066802d55952bc32c0962500fc
| 2,024
|
py
|
Python
|
pbx_gs_python_utils/utils/Save_To_ELK.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 3
|
2018-12-14T15:43:46.000Z
|
2019-04-25T07:44:58.000Z
|
pbx_gs_python_utils/utils/Save_To_ELK.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 1
|
2019-05-11T14:19:37.000Z
|
2019-05-11T14:51:04.000Z
|
pbx_gs_python_utils/utils/Save_To_ELK.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 4
|
2018-12-27T04:54:14.000Z
|
2019-05-11T14:07:47.000Z
|
import datetime
from pbx_gs_python_utils.utils.Elastic_Search import Elastic_Search
class Save_To_ELK():
def __init__(self, index = None):
if index is None:
index = 'save_to_elk'
self.secret_id = 'elastic-logs-server-1'
self.elastic = self.setup(index)
def add_document(self, doc_type, doc_data):
return self.add_document_with_id(doc_type, None, doc_data)
def add_document_with_id(self, doc_type, doc_id, doc_data):
if isinstance(doc_data, str):
doc_data = { "str" : doc_data } # data doc_data to be an object and not a string (since once there is a string in the data field in ELK , string values will throw an exception)
item = { 'doc_type' : doc_type ,
'doc_data' : doc_data ,
'date' : datetime.datetime.utcnow()}
return self.elastic.add(item, doc_id)
def get_most_recent_version_of_document(self, lucene_query):
values = self.elastic.search_using_lucene_index_by_id(lucene_query, 1, "date:desc").values()
if values and len(values) == 1:
return list(values).pop().get('doc_data')
return None
def find_documents(self, lucene_query):
return self.elastic.search_using_lucene_index_by_id(lucene_query)
def find_documents_of_type(self, dock_type):
return self.find_documents("doc_type:{0}".format(dock_type))
def delete_documents_with_id(self, doc_id):
return self.elastic.delete_data_by_id(doc_id)
def delete_documents_with_type(self, doc_type):
keys = self.find_documents_of_type(doc_type).keys()
for key in keys:
self.elastic.delete_data_by_id(key)
def create(self):
if self.elastic.exists() is False:
self.elastic.create_index().create_index_pattern()
return self.elastic.exists()
def setup(self, index):
return Elastic_Search()._setup_Elastic_on_cloud_via_AWS_Secret(index, self.secret_id)
| 38.188679
| 200
| 0.666996
|
bce3c004c49ce78496a716705c106be8f2c98a68
| 729
|
py
|
Python
|
pelote/classes/incremental_id_register.py
|
medialab/pelote
|
cef80daeb19ef2fef73f8a1fcfc8477aa11bfb9a
|
[
"MIT"
] | 2
|
2022-03-07T20:00:10.000Z
|
2022-03-21T12:36:58.000Z
|
pelote/classes/incremental_id_register.py
|
medialab/pelote
|
cef80daeb19ef2fef73f8a1fcfc8477aa11bfb9a
|
[
"MIT"
] | 55
|
2022-03-02T16:19:30.000Z
|
2022-03-31T12:44:05.000Z
|
pelote/classes/incremental_id_register.py
|
medialab/pelote
|
cef80daeb19ef2fef73f8a1fcfc8477aa11bfb9a
|
[
"MIT"
] | null | null | null |
# =============================================================================
# Pelote Incremental Id Register Class
# =============================================================================
#
from typing import Generic, TypeVar, Dict
K = TypeVar("K")
class IncrementalIdRegister(Generic[K]):
"""
Helper class mapping incremental ids to arbitrary hashable keys.
"""
def __init__(self):
self.__counter = 0
self.__index: Dict[K, int] = {}
def __getitem__(self, item: K) -> int:
item_id = self.__index.get(item)
if item_id is None:
item_id = self.__counter
self.__counter += 1
self.__index[item] = item_id
return item_id
| 26.035714
| 79
| 0.475995
|
981799ed2c4a40961e9219ea4a833b390bc4b730
| 10,391
|
py
|
Python
|
trdg/data_generator.py
|
Kankroc/TextRecognitionDataGenerator
|
da8a40f6f0995975dd416f3e761cf5e8c48df1cb
|
[
"MIT"
] | 1
|
2017-07-17T03:57:06.000Z
|
2017-07-17T03:57:06.000Z
|
trdg/data_generator.py
|
Kankroc/TextRecognitionDataGenerator
|
da8a40f6f0995975dd416f3e761cf5e8c48df1cb
|
[
"MIT"
] | null | null | null |
trdg/data_generator.py
|
Kankroc/TextRecognitionDataGenerator
|
da8a40f6f0995975dd416f3e761cf5e8c48df1cb
|
[
"MIT"
] | null | null | null |
import os
import random as rnd
from PIL import Image, ImageFilter, ImageStat
from trdg import computer_text_generator, background_generator, distorsion_generator
from trdg.utils import mask_to_bboxes
try:
from trdg import handwritten_text_generator
except ImportError as e:
print("Missing modules for handwritten text generation.")
class FakeTextDataGenerator(object):
@classmethod
def generate_from_tuple(cls, t):
"""
Same as generate, but takes all parameters as one tuple
"""
cls.generate(*t)
@classmethod
def generate(
cls,
index,
text,
font,
out_dir,
size,
extension,
skewing_angle,
random_skew,
blur,
random_blur,
background_type,
distorsion_type,
distorsion_orientation,
is_handwritten,
name_format,
width,
alignment,
text_color,
orientation,
space_width,
character_spacing,
margins,
fit,
output_mask,
word_split,
image_dir,
stroke_width=0,
stroke_fill="#282828",
image_mode="RGB",
output_bboxes=0,
):
image = None
margin_top, margin_left, margin_bottom, margin_right = margins
horizontal_margin = margin_left + margin_right
vertical_margin = margin_top + margin_bottom
##########################
# Create picture of text #
##########################
if is_handwritten:
if orientation == 1:
raise ValueError("Vertical handwritten text is unavailable")
image, mask = handwritten_text_generator.generate(text, text_color)
else:
image, mask = computer_text_generator.generate(
text,
font,
text_color,
size,
orientation,
space_width,
character_spacing,
fit,
word_split,
stroke_width,
stroke_fill,
)
random_angle = rnd.randint(0 - skewing_angle, skewing_angle)
rotated_img = image.rotate(
skewing_angle if not random_skew else random_angle, expand=1
)
rotated_mask = mask.rotate(
skewing_angle if not random_skew else random_angle, expand=1
)
#############################
# Apply distorsion to image #
#############################
if distorsion_type == 0:
distorted_img = rotated_img # Mind = blown
distorted_mask = rotated_mask
elif distorsion_type == 1:
distorted_img, distorted_mask = distorsion_generator.sin(
rotated_img,
rotated_mask,
vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2),
)
elif distorsion_type == 2:
distorted_img, distorted_mask = distorsion_generator.cos(
rotated_img,
rotated_mask,
vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2),
)
else:
distorted_img, distorted_mask = distorsion_generator.random(
rotated_img,
rotated_mask,
vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2),
)
##################################
# Resize image to desired format #
##################################
# Horizontal text
if orientation == 0:
new_width = int(
distorted_img.size[0]
* (float(size - vertical_margin) / float(distorted_img.size[1]))
)
resized_img = distorted_img.resize(
(new_width, size - vertical_margin), Image.ANTIALIAS
)
resized_mask = distorted_mask.resize((new_width, size - vertical_margin), Image.NEAREST)
background_width = width if width > 0 else new_width + horizontal_margin
background_height = size
# Vertical text
elif orientation == 1:
new_height = int(
float(distorted_img.size[1])
* (float(size - horizontal_margin) / float(distorted_img.size[0]))
)
resized_img = distorted_img.resize(
(size - horizontal_margin, new_height), Image.ANTIALIAS
)
resized_mask = distorted_mask.resize(
(size - horizontal_margin, new_height), Image.NEAREST
)
background_width = size
background_height = new_height + vertical_margin
else:
raise ValueError("Invalid orientation")
#############################
# Generate background image #
#############################
if background_type == 0:
background_img = background_generator.gaussian_noise(
background_height, background_width
)
elif background_type == 1:
background_img = background_generator.plain_white(
background_height, background_width
)
elif background_type == 2:
background_img = background_generator.quasicrystal(
background_height, background_width
)
else:
background_img = background_generator.image(
background_height, background_width, image_dir
)
background_mask = Image.new(
"RGB", (background_width, background_height), (0, 0, 0)
)
##############################################################
# Comparing average pixel value of text and background image #
##############################################################
try:
resized_img_st = ImageStat.Stat(resized_img, resized_mask.split()[2])
background_img_st = ImageStat.Stat(background_img)
resized_img_px_mean = sum(resized_img_st.mean[:2]) / 3
background_img_px_mean = sum(background_img_st.mean) / 3
if abs(resized_img_px_mean - background_img_px_mean) < 15:
print("value of mean pixel is too similar. Ignore this image")
print("resized_img_st \n {}".format(resized_img_st.mean))
print("background_img_st \n {}".format(background_img_st.mean))
return
except Exception as err:
return
#############################
# Place text with alignment #
#############################
new_text_width, _ = resized_img.size
if alignment == 0 or width == -1:
background_img.paste(resized_img, (margin_left, margin_top), resized_img)
background_mask.paste(resized_mask, (margin_left, margin_top))
elif alignment == 1:
background_img.paste(
resized_img,
(int(background_width / 2 - new_text_width / 2), margin_top),
resized_img,
)
background_mask.paste(
resized_mask,
(int(background_width / 2 - new_text_width / 2), margin_top),
)
else:
background_img.paste(
resized_img,
(background_width - new_text_width - margin_right, margin_top),
resized_img,
)
background_mask.paste(
resized_mask,
(background_width - new_text_width - margin_right, margin_top),
)
############################################
# Change image mode (RGB, grayscale, etc.) #
############################################
background_img = background_img.convert(image_mode)
background_mask = background_mask.convert(image_mode)
#######################
# Apply gaussian blur #
#######################
gaussian_filter = ImageFilter.GaussianBlur(
radius=blur if not random_blur else rnd.randint(0, blur)
)
final_image = background_img.filter(gaussian_filter)
final_mask = background_mask.filter(gaussian_filter)
#####################################
# Generate name for resulting image #
#####################################
# We remove spaces if space_width == 0
if space_width == 0:
text = text.replace(" ", "")
if name_format == 0:
name = "{}_{}".format(text, str(index))
elif name_format == 1:
name = "{}_{}".format(str(index), text)
elif name_format == 2:
name = str(index)
else:
print("{} is not a valid name format. Using default.".format(name_format))
name = "{}_{}".format(text, str(index))
image_name = "{}.{}".format(name, extension)
mask_name = "{}_mask.png".format(name)
box_name = "{}_boxes.txt".format(name)
tess_box_name = "{}.box".format(name)
# Save the image
if out_dir is not None:
final_image.save(os.path.join(out_dir, image_name))
if output_mask == 1:
final_mask.save(os.path.join(out_dir, mask_name))
if output_bboxes == 1:
bboxes = mask_to_bboxes(final_mask)
with open(os.path.join(out_dir, box_name), "w") as f:
for bbox in bboxes:
f.write(" ".join([str(v) for v in bbox]) + "\n")
if output_bboxes == 2:
bboxes = mask_to_bboxes(final_mask, tess=True)
with open(os.path.join(out_dir, tess_box_name), "w") as f:
for bbox, char in zip(bboxes, text):
f.write(" ".join([char] + [str(v) for v in bbox] + ['0']) + "\n")
else:
if output_mask == 1:
return final_image, final_mask
return final_image
| 36.205575
| 100
| 0.523049
|
137f062e1360399bf7af091125db3d9e31a34170
| 1,219
|
py
|
Python
|
tests/molecular/writers/mdl_mol/conftest.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | null | null | null |
tests/molecular/writers/mdl_mol/conftest.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | null | null | null |
tests/molecular/writers/mdl_mol/conftest.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | null | null | null |
import pytest
import stk
from .case_data import CaseData
@pytest.fixture(
scope='session',
params=(
lambda: CaseData(
molecule=stk.BuildingBlock('BrCCBr', [stk.BromoFactory()]),
writer=stk.MolWriter(),
string=(
'\n RDKit 3D\n\n 0 0 0 0 0 0 0 0 '
' 0 0999 V3000\nM V30 BEGIN CTAB\nM V30 COUNTS 8 7 '
'0 0 0\nM V30 BEGIN ATOM\nM V30 1 Br -1.4238 1.5615 '
'0.3223 0\nM V30 2 C -0.7405 -0.2573 0.1280 0\nM V30'
' 3 C 0.7148 -0.1157 -0.3383 0\nM V30 4 Br 1.6267 0.8'
'896 1.0687 0\nM V30 5 H -1.3518 -0.8075 -0.5939 0\nM'
' V30 6 H -0.7769 -0.6964 1.1440 0\nM V30 7 H 0.7695'
' 0.5280 -1.2387 0\nM V30 8 H 1.1821 -1.1022 -0.4922 '
'0\nM V30 END ATOM\nM V30 BEGIN BOND\nM V30 1 1 1 2'
'\nM V30 2 1 2 3\nM V30 3 1 3 4\nM V30 4 1 2 5\nM '
'V30 5 1 2 6\nM V30 6 1 3 7\nM V30 7 1 3 8\nM V30 E'
'ND BOND\nM V30 END CTAB\nM END\n\n$$$$\n'
),
),
),
)
def case_data(request) -> CaseData:
return request.param()
| 35.852941
| 71
| 0.480722
|
ce241199e6818046b152b476b14447abddbab28a
| 1,431
|
py
|
Python
|
eutils/xmlfacades/esearchresult.py
|
pmartin23/eutils
|
9cd8f30a628b6d7a12b8b2a7b99c2a3e7531dd89
|
[
"Apache-2.0"
] | null | null | null |
eutils/xmlfacades/esearchresult.py
|
pmartin23/eutils
|
9cd8f30a628b6d7a12b8b2a7b99c2a3e7531dd89
|
[
"Apache-2.0"
] | null | null | null |
eutils/xmlfacades/esearchresult.py
|
pmartin23/eutils
|
9cd8f30a628b6d7a12b8b2a7b99c2a3e7531dd89
|
[
"Apache-2.0"
] | 1
|
2018-10-08T16:34:55.000Z
|
2018-10-08T16:34:55.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import lxml.etree
from eutils.exceptions import *
from eutils.xmlfacades.base import Base
class ESearchResult(Base):
_root_tag = 'eSearchResult'
@property
def count(self):
return int(self._xml_root.find('Count').text)
@property
def retmax(self):
return int(self._xml_root.find('RetMax').text)
@property
def retstart(self):
return int(self._xml_root.find('RetStart').text)
@property
def ids(self):
return [int(id) for id in self._xml_root.xpath('/eSearchResult/IdList/Id/text()')]
@property
def webenv(self):
try:
return self._xml_root.find('WebEnv').text
except AttributeError:
return None
# <LICENSE>
# Copyright 2015 eutils Committers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
# </LICENSE>
| 26.5
| 90
| 0.698812
|
7b37b9d0a70d7f7f924c4ed6459835fe0afb2fee
| 14,856
|
py
|
Python
|
Assignment3_regularization.py
|
Eni-H/Udacity-DeepLearning
|
e384ecae234ef7488adeb2b814a709a1bc22595f
|
[
"MIT"
] | null | null | null |
Assignment3_regularization.py
|
Eni-H/Udacity-DeepLearning
|
e384ecae234ef7488adeb2b814a709a1bc22595f
|
[
"MIT"
] | null | null | null |
Assignment3_regularization.py
|
Eni-H/Udacity-DeepLearning
|
e384ecae234ef7488adeb2b814a709a1bc22595f
|
[
"MIT"
] | null | null | null |
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from six.moves import cPickle as pickle
# Some personnal imports
import matplotlib.pyplot as plt
tf.disable_eager_execution()
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
batch_size = 128
graph = tf.Graph()
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
# Problem 1
# Introduce and tune L2 regularization for both logistic and neural network models.
# Remember that L2 amounts to adding a penalty on the norm of the weights to the loss.
# In TensorFlow, you can compute the L2 loss for a tensor t using nn.l2_loss(t).
# The right amount of regularization should improve your validation / test accuracy.
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
l2_reg = tf.placeholder(tf.float32)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels))+l2_reg*tf.nn.l2_loss(weights)
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, l2_reg : 0.0025}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
batch_size = 128
num_hidden_nodes = 1024
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
l2_reg = tf.placeholder(tf.float32)
# Variables.
weights1 = tf.Variable(
tf.truncated_normal([image_size * image_size, num_hidden_nodes]))
biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))
weights2 = tf.Variable(
tf.truncated_normal([num_hidden_nodes, num_labels]))
biases2 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
layer1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)
logits = tf.matmul(layer1_train, weights2) + biases2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels)) + l2_reg * (tf.nn.l2_loss(weights1) + tf.nn.l2_loss(weights2))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
layer1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset,weights1) + biases1)
valid_prediction = tf.nn.softmax(tf.matmul(layer1_valid, weights2) + biases2)
layer1_test = tf.nn.relu(tf.matmul(tf_test_dataset,weights1) + biases1)
test_prediction = tf.nn.softmax(tf.matmul(layer1_test, weights2) + biases2)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, l2_reg : 0.0025}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
l2_reg_val = [pow(10, i) for i in np.arange(-4, -2, 0.1)]
accuracy_val = []
for regul in l2_reg_val:
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, l2_reg : regul}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
accuracy_val.append(accuracy(test_prediction.eval(), test_labels))
plt.semilogx(l2_reg_val, accuracy_val)
plt.grid(True)
plt.title('Test accuracy by regularization (logistic)')
plt.show()
# Problem 2
# Let's demonstrate an extreme case of overfitting. Restrict your training data to just a few batches. What happens?
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
l2_reg = tf.placeholder(tf.float32)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels))+l2_reg*tf.nn.l2_loss(weights)
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 101
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = ((step%3) * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, l2_reg : 0.0025}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
# Problem 3
# Introduce Dropout on the hidden layer of the neural network.
# Remember: Dropout should only be introduced during training, not evaluation,
# otherwise your evaluation results would be stochastic as well.
# TensorFlow provides nn.dropout() for that, but you have to make sure it's only inserted
# during training.
# What happens to our extreme overfitting case?
batch_size = 128
num_hidden_nodes = 1024
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights1 = tf.Variable(
tf.truncated_normal([image_size * image_size, num_hidden_nodes]))
biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))
weights2 = tf.Variable(
tf.truncated_normal([num_hidden_nodes, num_labels]))
biases2 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
layer1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)
layer_dropout = tf.nn.dropout(layer1_train,0.5)
logits = tf.matmul(layer1_train, weights2) + biases2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
layer1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset,weights1) + biases1)
valid_prediction = tf.nn.softmax(tf.matmul(layer1_valid, weights2) + biases2)
layer1_test = tf.nn.relu(tf.matmul(tf_test_dataset,weights1) + biases1)
test_prediction = tf.nn.softmax(tf.matmul(layer1_test, weights2) + biases2)
num_steps = 101
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
# offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
offset = (step%3)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
| 46.425
| 147
| 0.713314
|
90e961770ff07c1af97730101c9faa1d692818f8
| 5,624
|
py
|
Python
|
tests/test_pass_defunctionalization.py
|
anonymousWork000/TVMfuzz
|
0ccbb33af89758b8ead59a8c686645246ccd0545
|
[
"Apache-2.0"
] | 16
|
2021-05-22T07:39:53.000Z
|
2022-02-23T14:50:38.000Z
|
tests/test_pass_defunctionalization.py
|
anonymousWork000/TVMfuzz
|
0ccbb33af89758b8ead59a8c686645246ccd0545
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pass_defunctionalization.py
|
anonymousWork000/TVMfuzz
|
0ccbb33af89758b8ead59a8c686645246ccd0545
|
[
"Apache-2.0"
] | 3
|
2021-05-28T07:12:14.000Z
|
2021-11-28T02:10:48.000Z
|
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.relay.backend.interpreter import ConstructorValue
from tvm.relay import transform, ExprVisitor, TypeVisitor
from tvm.relay.testing import Prelude
# determine if type t is a FuncType or has a nested FuncType
def has_func_type(t):
class FuncTypeVisitor(TypeVisitor):
def __init__(self):
super().__init__()
self.has_func = False
def visit_func_type(self, ftt):
self.has_func = True
ftvisitor = FuncTypeVisitor()
ftvisitor.visit(t)
return ftvisitor.has_func
# determine whether a program has any higher order functions
# a higher order function is defined as one that:
# - has function type arguments
# - returns a function
def assert_no_higher_order_functions(expr, mod):
class CheckFirstOrderVisitor(ExprVisitor):
def __init__(self, mod):
super().__init__()
self.mod = mod
self.hof = []
self.visited_gv = set()
def visit_call(self, call):
is_higher_order = False
# check return type
if has_func_type(call.checked_type):
is_higher_order = True
# check argument types
for a in call.args:
if has_func_type(a.checked_type):
is_higher_order = True
# if it is higher order, save it for debugging later
if is_higher_order:
self.hof.append(call)
super().visit_call(call)
def visit_global_var(self, gv):
# visit global vars to visit entire program
if gv not in self.visited_gv:
self.visited_gv.add(gv)
self.visit(self.mod[gv])
mod = transform.InferType()(mod)
check_fo_visitor = CheckFirstOrderVisitor(mod)
check_fo_visitor.visit(expr)
nl = "\n--------\n"
errmsg = f"""found {len(check_fo_visitor.hof)} higher order functions:
{nl.join(expr.astext() for expr in check_fo_visitor.hof)}"""
assert len(check_fo_visitor.hof) == 0, errmsg
# assert that a program is defunctionalized and returns
# defunctionalized module
# assumes program starts from mod['main']
def defunctionalized(mod):
mod = transform.InferType()(mod)
mod["main"] = transform.Defunctionalization(mod["main"], mod)
mod = transform.InferType()(mod)
assert_no_higher_order_functions(mod["main"], mod)
return mod
# adt list to python list
def to_list(mod, l):
list = mod.get_global_type_var("List")
list_adt = mod[list]
cons = list_adt.constructors[0]
nil = list_adt.constructors[1]
assert isinstance(l, ConstructorValue)
val = l
ret = []
while True:
if val.tag == cons.tag:
ret.append(val.fields[0].asnumpy())
val = val.fields[1]
else:
assert val.tag == nil.tag
break
return ret
# list to adt list
def to_adt_list(mod, arr):
expr = mod["main"]
l = mod.get_global_type_var("List")
list_adt = mod[l]
cons = list_adt.constructors[0]
nil = list_adt.constructors[1]
li = nil()
for a in arr:
li = cons(relay.const(a), li)
ex = relay.create_executor(mod=mod)
adt = ex.evaluate(li)
mod["main"] = expr
return adt
def test_simple():
code = """
#[version = "0.0.5"]
def @simple[A, B](%f: fn(A) -> B, %xs: A) -> B {
%f(%xs)
}
def @main(%l: Tensor[(5, 5), float32]) -> Tensor[(5, 5), float32] {
%0 = fn[A](%x: A) -> A {
%x
};
@simple(%0, %l)
}
"""
mod = tvm.parser.fromtext(code)
defunc_mod = defunctionalized(mod)
input = np.random.rand(5, 5).astype("float32")
ex = relay.create_executor("debug", mod=mod)
defunc_ex = relay.create_executor("debug", mod=defunc_mod)
out = ex.evaluate()(input)
defunc_out = defunc_ex.evaluate()(input)
np.testing.assert_equal(out.asnumpy(), defunc_out.asnumpy())
def test_global_recursion():
code = """
#[version = "0.0.5"]
type List[A] {
Cons(A, List[A]),
Nil,
}
def @id[A](%x: A) -> A {
%x
}
def @map[A, B](%f: fn(A) -> B, %xs: List[A]) -> List[B] {
match (%xs) {
Cons(%x, %rest) => Cons(%f(%x), @map(%f, %rest)),
Nil => Nil,
}
}
def @main(%l: List[float32]) -> List[float32] {
@map(@id, %l)
}
"""
mod = tvm.parser.fromtext(code)
defunc_mod = defunctionalized(mod)
input = np.random.rand(10).astype("float32")
ex = relay.create_executor("debug", mod=mod)
defunc_ex = relay.create_executor("debug", mod=defunc_mod)
out = ex.evaluate(mod["main"])(to_adt_list(mod, input))
defunc_out = defunc_ex.evaluate()(to_adt_list(defunc_mod, input))
np.testing.assert_array_equal(to_list(mod, out), to_list(defunc_mod, defunc_out))
def test_recursive_datatype():
# CPS will create recursive datatype
code = """
#[version = "0.0.5"]
type List[A] {
Cons(A, List[A]),
Nil,
}
def @sum(%f: fn(int32) -> int32, %k: List[int32]) -> int32 {
match (%k) {
Cons(%x, %rest) => %0 = fn(%n) {
%x + %f(%n)
};
@sum(%0, %rest),
Nil => %f(0),
}
}
def @id[A](%x: A) -> A {
%x
}
def @main(%l: List[int32]) -> int32 {
@sum(@id, %l)
}
"""
mod = tvm.parser.fromtext(code)
defunc_mod = defunctionalized(mod)
input = np.random.randint(1, 100, 10)
ex = relay.create_executor("debug", mod=mod)
defunc_ex = relay.create_executor("debug", mod=defunc_mod)
out = ex.evaluate(mod["main"])(to_adt_list(mod, input))
defunc_out = defunc_ex.evaluate()(to_adt_list(defunc_mod, input))
tvm.testing.assert_allclose(out.asnumpy(), defunc_out.asnumpy())
| 26.403756
| 85
| 0.613087
|
fdef4e9e812f7db547f3f6942d0735f7f7aec774
| 1,134
|
py
|
Python
|
setup.py
|
radimsuckr/django-is-core
|
72edb572c24baef5049d29b99bafacf124bfccbe
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
radimsuckr/django-is-core
|
72edb572c24baef5049d29b99bafacf124bfccbe
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
radimsuckr/django-is-core
|
72edb572c24baef5049d29b99bafacf124bfccbe
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
from is_core.version import get_version
setup(
name='django-is-core',
version=get_version(),
description="Information systems core.",
keywords='django, admin, information systems, REST',
author='Lubos Matl',
author_email='matllubos@gmail.com',
url='https://github.com/matllubos/django-is-core',
license='BSD',
package_dir={'is_core': 'is_core'},
include_package_data=True,
packages=find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
install_requires=[
'django>=1.10',
'django-pyston~=2.9.5',
'django-block-snippets==2.0.1',
'django-chamber~=0.4.6',
'python-dateutil~=2.2',
'pytz',
'Unidecode>=0.04.16',
'python-mimeparse==0.1.4',
'django-ipware~=1.0.0'
],
zip_safe=False
)
| 29.076923
| 56
| 0.601411
|
f2d7c528b8daaa56c0375a39f2056d9accd50398
| 2,953
|
py
|
Python
|
tests/integration/test_vagrant.py
|
Nightfurex/build-magic
|
ed6c5e36cd98a760a9cc1939589833722126e088
|
[
"MIT"
] | 10
|
2020-12-11T07:33:32.000Z
|
2022-03-27T20:48:12.000Z
|
tests/integration/test_vagrant.py
|
Nightfurex/build-magic
|
ed6c5e36cd98a760a9cc1939589833722126e088
|
[
"MIT"
] | 89
|
2021-03-31T06:48:11.000Z
|
2022-03-23T02:17:17.000Z
|
tests/integration/test_vagrant.py
|
Nightfurex/build-magic
|
ed6c5e36cd98a760a9cc1939589833722126e088
|
[
"MIT"
] | 3
|
2021-06-14T20:15:04.000Z
|
2022-02-10T18:20:28.000Z
|
"""Integration tests for the Vagrant CommandRunner."""
from pathlib import Path
import shutil
import subprocess
import pytest
from build_magic.reference import ExitCode
@pytest.mark.vagrant
def test_wd(cli):
"""Verify setting the working directory works correctly."""
path = Path(__file__).parent
res = subprocess.run(
"python -m build_magic --verbose --plain "
"--runner vagrant "
f"--environment {path.parent}/files/Vagrantfile "
"--wd /app "
"-c execute 'pwd'",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
output = res.stdout.decode('utf-8')
assert res.returncode == ExitCode.PASSED
assert '[ INFO ] Starting Stage 1' in output
assert '[ DONE ] ( 1/1 ) EXECUTE : pwd' in output
assert '[ INFO ] OUTPUT: /app' in output
assert '[ INFO ] Stage 1 complete with result DONE' in output
@pytest.mark.vagrant
def test_isolation(cli, tmp_path_factory):
"""Verify copying files to a working directory in the vm works correctly."""
source = tmp_path_factory.mktemp('source')
target = tmp_path_factory.mktemp('target')
main = source / 'main.cpp'
plugins = source / 'plugins.cpp'
audio = source / 'audio.cpp'
main.touch()
plugins.touch()
audio.touch()
vagrantfile = Path(__file__).parent.parent / 'files' / 'Vagrantfile'
shutil.copy2(vagrantfile, target)
res = subprocess.run(
"python -m build_magic --verbose --plain "
"--runner vagrant "
f"--environment {target.resolve()}/Vagrantfile "
f"--copy {source} "
"--wd /app "
"-c execute 'pwd' "
"-c execute 'ls /app' "
"-c execute 'cat Vagrantfile' "
"audio.cpp main.cpp plugins.cpp",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
output = res.stdout.decode('utf-8')
assert res.returncode == ExitCode.PASSED
assert '[ INFO ] Starting Stage 1' in output
assert '[ DONE ] ( 1/3 ) EXECUTE : pwd' in output
assert '[ INFO ] OUTPUT: /app' in output
assert '[ INFO ] OUTPUT: audio.cpp' in output
assert 'main.cpp' in output
assert 'plugins.cpp' in output
assert '[ INFO ] Stage 1 complete with result DONE' in output
@pytest.mark.skip
def test_cleanup(cli, tmp_path):
"""Verify cleanup is working correctly."""
# TODO: As of 0.1, cleanup isn't implemented for the Vagrant runner.
path = Path(__file__).parent
res = subprocess.run(
"python -m build_magic --verbose --plain "
"--runner vagrant "
f"--environment {path.parent}/files/Vagrantfile "
"--action cleanup "
"--wd /vagrant "
"-c execute 'touch file1.txt file2.txt' "
"-c execute 'ls'",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
output = res.stdout.decode('utf-8')
print(output)
assert False
| 32.097826
| 80
| 0.623434
|
2f85a7bc112dbe465c144c44b4fab52fdfbee543
| 302
|
py
|
Python
|
301-400/390.elimination-game.py
|
guangxu-li/leetcode-in-python
|
8a5a373b32351500342705c141591a1a8f5f1cb1
|
[
"MIT"
] | null | null | null |
301-400/390.elimination-game.py
|
guangxu-li/leetcode-in-python
|
8a5a373b32351500342705c141591a1a8f5f1cb1
|
[
"MIT"
] | null | null | null |
301-400/390.elimination-game.py
|
guangxu-li/leetcode-in-python
|
8a5a373b32351500342705c141591a1a8f5f1cb1
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=390 lang=python3
#
# [390] Elimination Game
#
# @lc code=start
class Solution:
def lastRemaining(self, n: int) -> int:
# 1 + n // 2 for rem of reverse order delete
return 1 if n == 1 else 2 * (1 + n // 2 - self.lastRemaining(n // 2))
# @lc code=end
| 23.230769
| 85
| 0.576159
|
d56002dae3474e5ce5ba22cb1b443933f806b422
| 5,485
|
py
|
Python
|
control/base.py
|
ligulfzhou/PyBaseProject
|
47924bc35e23857f0d577809f433e4ac1ce3252d
|
[
"MIT"
] | 2
|
2018-12-31T06:07:48.000Z
|
2019-02-19T09:00:38.000Z
|
control/base.py
|
ligulfzhou/PyBaseProject
|
47924bc35e23857f0d577809f433e4ac1ce3252d
|
[
"MIT"
] | null | null | null |
control/base.py
|
ligulfzhou/PyBaseProject
|
47924bc35e23857f0d577809f433e4ac1ce3252d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pdb
import json
import random
import pickle
import datetime
import logging
import hashlib
from copy import deepcopy
from decimal import Decimal
from tornado import gen
from lib import utils
from settings import A_DAY
from tornado.gen import coroutine
from tornado.options import options
from tornado import httputil
from functools import partial
class BaseCtrl(object):
def __init__(self, ctrl):
self.ctrl = ctrl
self.api = ctrl.pdb.api
def __getattr__(self, name):
return getattr(self.api, name)
def get_model_key(self, model, model_id):
return '%s_%s' % (model.lower(), model_id)
def get_all_model_key(self, model):
return 'all_%ss' % model.lower()
def _put_ids_to_key(self, key, ids):
if not ids:
return
pl = self.ctrl.rs.pipeline(transaction=True)
pl.delete(key)
pl.rpush(key, *ids)
pl.execute()
def refactor_page(self, page, page_size=20):
mpage = page // 5 + 1
ipage = page % 5
offset, limit = (ipage - 1) * page_size, page_size
return mpage, ipage, offset, limit
# put multi items to redis
def _put_multi_items_to_redis(self, tb_name, items=[], get_item_key_func=None):
if not items:
return
if not get_item_key_func:
get_item_key_func = partial(self.get_model_key_ctl, model=tb_name.lower())
k_v_dict = {get_item_key_func(model_id=item['id']): pickle.dumps(item) for item in items}
pl = self.ctrl.rs.pipeline(transaction=True)
pl.mset(k_v_dict)
for k in k_v_dict:
pl.expire(k, A_DAY)
pl.execute()
# 🌰 merge more fields to items
def tpl_merge_more_fields(self, tpls):
[tpl.update({
'sentences': tpl.get('sentences', '').split(';')
}) for tpl in tpls if isinstance(tpl.get('sentences', ''), str)]
return tpls
def effi_merge_more_fields(self, effis):
if not effis:
return
effi_ids = [i['id'] for i in effis]
id_hot_dict = self.ctrl.api.get_effi_hot_cnts_ctl(effi_ids)
print("===============returned id_hot_dict================")
print(id_hot_dict)
[effi.update({
'hot': id_hot_dict.get(effi['id'], 0)
}) for effi in effis]
return effis
def company_merge_more_fields(self, companies):
cids = [c['id'] for c in companies]
refs = self.api.get_models('Ref', [{
'company_id': cids
}])
[company.update({'refs': []}) for company in companies]
id_company_dict = {c['id']: c for c in companies}
res = []
for ref in refs:
# company.setdefault('refs', []).append()
company = id_company_dict.get(ref['company_id'], {})
company.get('refs', []).append(ref)
return companies
def post_merge_more_fields(self, posts):
post_ids = [p['id'] for p in posts]
post_images = self.api.get_models('PostImage', [{'post_id': post_ids}])
pid_images_dict = {}
[pid_images_dict.setdefault(i['post_id'], []).append(i) for i in post_images]
for post in posts:
images = pid_images_dict.get(post['id'], [])
post.update({
'post_images': images
})
return posts
def _get_multi_items(self, tb_name, ids, get_item_key_func=None, merge_item_func=None, put_items_to_rs=None):
if not ids:
return []
if not get_item_key_func:
get_item_key_func = partial(self.get_model_key_ctl, model=tb_name.lower())
if not merge_item_func:
if hasattr(self, '%s_merge_more_fields_ctl' % tb_name.lower()):
merge_item_func = getattr(self, '%s_merge_more_fields_ctl' % tb_name.lower())
if not put_items_to_rs:
put_items_to_rs = partial(self._put_multi_items_to_redis_ctl, tb_name=tb_name.lower())
multi_key = [get_item_key_func(model_id=i) for i in ids]
cached = [pickle.loads(item) if item else None for item in self.ctrl.rs.mget(multi_key)]
multi_items = dict(zip(multi_key, cached))
miss_ids = [i for i in ids if not multi_items[get_item_key_func(model_id=i)]]
# if not miss_ids:
# return [multi_items[get_item_key_func(model_id=i)] for i in ids]
if miss_ids:
miss_items = self.api.get_models(tb_name.capitalize(), [{'id': miss_ids}])
miss_ids = [i['id'] for i in miss_items]
miss_multi_key = [get_item_key_func(model_id=i) for i in miss_ids]
miss_items = dict(zip(miss_multi_key, miss_items))
if miss_items and put_items_to_rs:
put_items_to_rs(items=list(miss_items.values()))
multi_items.update(miss_items)
items = [multi_items[get_item_key_func(model_id=i)] for i in ids if get_item_key_func(model_id=i) in multi_items]
if merge_item_func:
items = merge_item_func(items)
return items
def _rpush_multi_ids_to_key(self, key, ids):
pl = self.ctrl.rs.pipeline(transaction=True)
pl.delete(key)
pl.rpush(key, *ids)
pl.expire(key, A_DAY)
pl.execute()
def update_model(self, tb_name, pk, data):
self.api.update_model(tb_name, pk, data)
key = self.get_model_key_ctl(tb_name, pk)
self.ctrl.rs.delete(key)
| 33.858025
| 121
| 0.616591
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.