hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
441bc16a94d564e780a8d9bd3badab0e2d7b518c
| 695
|
py
|
Python
|
src/imu_interface3/setup.py
|
crawlerufsc/crawler_ws
|
317294ee4f9cd517416e6e298cbb2cf92ef72eaa
|
[
"MIT"
] | null | null | null |
src/imu_interface3/setup.py
|
crawlerufsc/crawler_ws
|
317294ee4f9cd517416e6e298cbb2cf92ef72eaa
|
[
"MIT"
] | null | null | null |
src/imu_interface3/setup.py
|
crawlerufsc/crawler_ws
|
317294ee4f9cd517416e6e298cbb2cf92ef72eaa
|
[
"MIT"
] | null | null | null |
from setuptools import setup
package_name = 'imu_interface3'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='bettanin',
maintainer_email='gabrielnalinb@gmail.com',
description='Node to interface a MPU6050 over i2c',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'imu_pub = imu_interface3.imu_pub:main',
],
},
)
| 25.740741
| 55
| 0.630216
|
cb522bcdf09aa9e91332a294de3d2e212b165c63
| 82,597
|
py
|
Python
|
tensorflow/python/client/session_test.py
|
rsuderman/tensorflow
|
99a5672ef0611802e2524b49cd69a17bf07d202c
|
[
"Apache-2.0"
] | 3
|
2019-11-19T14:07:27.000Z
|
2020-10-04T12:57:40.000Z
|
tensorflow/python/client/session_test.py
|
rsuderman/tensorflow
|
99a5672ef0611802e2524b49cd69a17bf07d202c
|
[
"Apache-2.0"
] | 1
|
2020-08-28T18:17:58.000Z
|
2020-08-28T18:17:58.000Z
|
tensorflow/python/client/session_test.py
|
rsuderman/tensorflow
|
99a5672ef0611802e2524b49cd69a17bf07d202c
|
[
"Apache-2.0"
] | 4
|
2022-01-13T11:23:44.000Z
|
2022-03-02T11:11:42.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
import sys
import threading
import time
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as framework_device_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
# Import gradients to resolve circular imports
from tensorflow.python.ops import gradients # pylint: disable=unused-import
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
try:
from frozendict import frozendict # pylint:disable=g-import-not-at-top
except ImportError:
frozendict = dict # pylint:disable=invalid-name
defaultdict = collections.defaultdict # pylint:disable=invalid-name
class SessionTest(test_util.TensorFlowTestCase):
def setUp(self):
super(SessionTest, self).setUp()
warnings.simplefilter('always')
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(
np.asarray(
[[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]], dtype=np.float32),
copy_val)
def testManyCPUs(self):
with session.Session(
config=config_pb2.ConfigProto(device_count={
'CPU': 2, 'GPU': 0
})) as sess:
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp, 10.0)
num_cpu_devices = 0
num_gpu_devices = 0
for device in sess.list_devices():
device_type = framework_device_lib.DeviceSpec.from_string(
device.name).device_type
if device_type == 'CPU':
num_cpu_devices += 1
elif device_type == 'GPU':
num_gpu_devices += 1
self.assertEqual(2, num_cpu_devices)
self.assertEqual(0, num_gpu_devices)
def testPerSessionThreads(self):
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp, 10.0)
def testSessionInterOpThreadPool(self):
config_pb = config_pb2.ConfigProto()
pool = config_pb.session_inter_op_thread_pool.add()
with session.Session(config=config_pb) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config_pb.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config_pb) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
pool = config_pb.session_inter_op_thread_pool.add()
pool.num_threads = 1
pool.global_name = 't1'
run_options = config_pb2.RunOptions()
run_options.inter_op_thread_pool = (
len(config_pb.session_inter_op_thread_pool) - 1)
with session.Session(config=config_pb) as s:
inp = constant_op.constant(30.0, name='W2')
results = s.run([inp], options=run_options)
self.assertAllEqual([30.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
def exc_predicate(e):
return (e.op == c.op and e.op._original_op == b.op and
e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertIsNone(res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertIsNone(res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertIsNone(res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertIsInstance(res, list)
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertIsInstance(res, list)
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertIsInstance(res, tuple)
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertIsInstance(res, tuple)
self.assertEqual((42.0, None, 44.0, 42.0), res)
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertIsInstance(res, ABC)
self.assertEqual(42.0, res.a)
self.assertIsNone(res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertIsInstance(res, ABC)
self.assertEqual(42.0, res.a)
self.assertIsNone(res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertIsInstance(res, dict)
self.assertEqual(42.0, res['a'])
self.assertIsNone(res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertIsInstance(res, collections.OrderedDict)
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertIsNone(res[2])
self.assertEqual(44.0, res[1])
@test_util.run_v1_only('b/120545219')
def testFetchAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
val1 = np.array([1.2, 3.4, 5.6])
val2 = np.array([[1, 2], [4, 3]])
val3 = np.array([10, 20, 30])
t1 = constant_op.constant(val1)
t2 = constant_op.constant(val2)
sample = SampleAttr(t1, t2)
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val1, result.field1)
self.assertAllEqual(val2, result.field2)
result = sess.run(sample, feed_dict={sample.field1: val3})
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val3, result.field1)
self.assertAllEqual(val2, result.field2)
@test_util.run_v1_only('b/120545219')
def testFetchNestedAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field0 = attr.ib()
field1 = attr.ib()
v1 = 10
v2 = 20
v3 = np.float32(1.2)
v4 = np.float32(3.4)
v5 = np.float64(100.001)
v6 = np.float64(-23.451)
arr1 = np.array([1.2, 6.7, 3.4])
arr2 = np.array([7, 11, 3])
sample = SampleAttr(
SampleAttr(
SampleAttr(constant_op.constant(v1), constant_op.constant(v2)),
SampleAttr(constant_op.constant(arr1), constant_op.constant(arr2))),
{'A': SampleAttr(constant_op.constant(v3), constant_op.constant(v4)),
'B': [SampleAttr(constant_op.constant(v5), constant_op.constant(v6))]})
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertIsInstance(result.field0, SampleAttr)
self.assertIsInstance(result.field0.field0, SampleAttr)
self.assertIsInstance(result.field0.field1, SampleAttr)
self.assertIsInstance(result.field0.field1.field0, np.ndarray)
self.assertAllEqual(arr1, result.field0.field1.field0)
self.assertIsInstance(result.field0.field1.field1, np.ndarray)
self.assertAllEqual(arr2, result.field0.field1.field1)
self.assertIsInstance(result.field1, dict)
self.assertIn('A', result.field1)
self.assertIn('B', result.field1)
self.assertIsInstance(result.field1['A'], SampleAttr)
self.assertAllEqual(
[v3, v4],
[result.field1['A'].field0, result.field1['A'].field1])
self.assertIsInstance(result.field1['B'], list)
self.assertEqual(1, len(result.field1['B']))
self.assertIsInstance(result.field1['B'][0], SampleAttr)
self.assertAllEqual(
[v5, v6],
[result.field1['B'][0].field0, result.field1['B'][0].field1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertIsInstance(res, list)
self.assertEqual(3, len(res))
self.assertIsInstance(res[0], list)
self.assertEqual(0, len(res[0]))
self.assertIsInstance(res[1], tuple)
self.assertEqual(0, len(res[1]))
self.assertIsInstance(res[2], dict)
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertIsInstance(res, list)
self.assertEqual(4, len(res))
self.assertIsInstance(res[0], list)
self.assertEqual(0, len(res[0]))
self.assertIsInstance(res[1], tuple)
self.assertEqual(0, len(res[1]))
self.assertIsInstance(res[2], dict)
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFGHI = collections.namedtuple('DEFGHI', ['d', 'e', 'f', 'g', 'h', 'i'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
test_dct = {'a': a.name, 'c': c, 'b': b}
test_dct_types = [dict, frozendict, defaultdict]
# List of lists, tuples, namedtuple, dict, frozendict, and defaultdict
res = sess.run([
[a, b, c],
(a, b, c),
ABC(a=a, b=b, c=c),
dict(test_dct),
frozendict(test_dct),
defaultdict(str, test_dct),
])
self.assertIsInstance(res, list)
self.assertEqual(6, len(res))
self.assertIsInstance(res[0], list)
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertIsInstance(res[1], tuple)
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertIsInstance(res[2], ABC)
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
for expected_type, r in zip(test_dct_types, res[3:]):
self.assertIsInstance(r, expected_type)
self.assertEqual(3, len(r))
self.assertEqual(a_val, r['a'])
self.assertEqual(b_val, r['b'])
self.assertEqual(c_val, r['c'])
self.assertEqual(res[5].default_factory, str)
# Tuple of lists, tuples, namedtuple, dict, frozendict, and defaultdict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b,
c=c), dict(test_dct),
frozendict(test_dct), defaultdict(str, test_dct)))
self.assertIsInstance(res, tuple)
self.assertEqual(6, len(res))
self.assertIsInstance(res[0], list)
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertIsInstance(res[1], tuple)
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertIsInstance(res[2], ABC)
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
for expected_type, r in zip(test_dct_types, res[3:]):
self.assertIsInstance(r, expected_type)
self.assertEqual(3, len(r))
self.assertEqual(a_val, r['a'])
self.assertEqual(b_val, r['b'])
self.assertEqual(c_val, r['c'])
self.assertEqual(res[5].default_factory, str)
# Namedtuple of lists, tuples, namedtuples, dict, frozendict, defaultdict
res = sess.run(
DEFGHI(
d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g=dict(test_dct),
h=frozendict(test_dct),
i=defaultdict(str, test_dct)))
self.assertIsInstance(res, DEFGHI)
self.assertIsInstance(res.d, list)
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertIsInstance(res.e, tuple)
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertIsInstance(res.f, ABC)
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertIsInstance(res.g, dict)
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
self.assertIsInstance(res.h, frozendict)
self.assertEqual(3, len(res.h))
self.assertEqual(a_val, res.h['a'])
self.assertEqual(b_val, res.h['b'])
self.assertEqual(c_val, res.h['c'])
self.assertIsInstance(res.i, defaultdict)
self.assertEqual(3, len(res.i))
self.assertEqual(a_val, res.i['a'])
self.assertEqual(b_val, res.i['b'])
self.assertEqual(c_val, res.i['c'])
self.assertEqual(res.i.default_factory, str)
# Dict of lists, tuples, namedtuples, dict, frozendict, defaultdict
res = sess.run({
'd': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': dict(test_dct),
'h': frozendict(test_dct),
'i': defaultdict(str, test_dct),
})
self.assertIsInstance(res, dict)
self.assertEqual(6, len(res))
self.assertIsInstance(res['d'], list)
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertIsInstance(res['e'], tuple)
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertIsInstance(res['f'], ABC)
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
for expected_type, r_key in zip(test_dct_types, ('g', 'h', 'i')):
r = res[r_key]
self.assertIsInstance(r, expected_type)
self.assertEqual(3, len(r))
self.assertEqual(a_val, r['a'])
self.assertEqual(b_val, r['b'])
self.assertEqual(c_val, r['c'])
self.assertEqual(res['i'].default_factory, str)
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices), constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
dtype=np.float32, shape=shape, name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: (values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)), None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: (values, indices)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
@test_util.run_v1_only('b/120545219')
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.VariableV1(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(
target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
@staticmethod
def _build_graph():
time.sleep(random.random() * 0.1)
# Do some graph construction. Try to exercise non-trivial paths.
graph = ops.get_default_graph()
gdef = None
for _ in range(10):
x = array_ops.placeholder(dtype=dtypes.float32)
with ops.colocate_with(x):
y = array_ops.placeholder(dtype=dtypes.float32)
with ops.device('/cpu:0'):
z = control_flow_ops.while_loop(
lambda x, y: x < 10, lambda x, y: (x + 1, x * y), [x, y])
with graph._attr_scope({'_a': attr_value_pb2.AttrValue(b=False)}):
gradients_impl.gradients(z, [x, y])
if gdef is None:
gdef = graph.as_graph_def()
else:
importer.import_graph_def(gdef, name='import')
@test_util.run_v1_only('b/120545219')
def testParallelRunAndSingleBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in threads:
t.start()
SessionTest._build_graph()
stop.set()
for t in threads:
t.join()
@test_util.run_v1_only('b/120545219')
def testParallelRunAndParallelBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
run_threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in run_threads:
t.start()
build_threads = [self.checkedThread(target=SessionTest._build_graph)
for _ in range(10)]
for t in build_threads:
t.start()
for t in build_threads:
t.join()
# Let the run_threads run until the build threads are finished.
stop.set()
for t in run_threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals('versions { producer: %d min_consumer: %d }' %
(versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEqual(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEqual(len(sess.graph_def.node), 2)
self.assertAllEqual(c, 5.0)
self.assertAllEqual(d, 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEqual(len(sess.graph_def.node), 3)
self.assertAllEqual(e, 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegex(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegex(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegex(RuntimeError, 'The Session graph is empty.'):
sess.run({})
@test_util.run_v1_only('b/120545219')
def testNotEntered(self):
# pylint: disable=protected-access
self.assertIsNone(ops._default_session_stack.get_default())
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
@test_util.run_v1_only('b/120545219')
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e)
sess.close()
@test_util.run_v1_only('b/120545219')
def testMultipleInteractiveSessionsWarning(self):
# Reinitialize the global state to ensure that the expected warnings will
# be emitted.
session.InteractiveSession._active_session_count = 0 # pylint: disable=protected-access
sess = session.InteractiveSession()
sess.run(constant_op.constant(4.0)) # Run so that the session is "opened".
sess.close()
# Opening and closing interactive sessions serially should not warn.
with warnings.catch_warnings(record=True) as w:
sess = session.InteractiveSession()
sess.close()
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
sess = session.InteractiveSession()
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
sess2 = session.InteractiveSession()
self.assertEqual(1, len(w))
self.assertIn('An interactive session is already active. This can cause '
'out-of-memory errors in some cases. You must explicitly '
'call `InteractiveSession.close()` to release resources '
'held by the other session(s).', str(w[0].message))
sess2.close()
sess.close()
@test_util.run_v1_only('b/120545219')
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
@test_util.run_v1_only('b/120545219')
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool,
dtypes.complex64, dtypes.complex128
]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={
feed_t: np_array
}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={
feed_t: np_array
}))
# Also check that we can get both back.
out_v, feed_v = sess.run(
[out_t, feed_t], feed_dict={
feed_t: np_array
})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testMakeCallableOnTensorWithRunOptions(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
tensor_runner = sess.make_callable(a, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
res = tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(42.0, res)
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableOnOperationWithRunOptions(self):
with session.Session() as sess:
a = variables.Variable(42.0)
b = state_ops.assign_add(a, 1.0)
sess.run(a.initializer)
tensor_runner = sess.make_callable(b.op, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(43.0, sess.run(a))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableWithFeedListAndRunOptions(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
tensor_runner = sess.make_callable(
a, feed_list=[ph.name], accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
self.assertAllClose(42.0,
tensor_runner(
41.0,
options=run_options,
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testOptimizedMakeCallable(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
for _ in range(3):
callable_fn = sess._make_callable_from_options(callable_opts)
for _ in range(5):
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32)))
def testOptimizedMakeCallableWithRunMetadata(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
callable_opts.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
callable_fn = sess._make_callable_from_options(callable_opts)
run_metadata = config_pb2.RunMetadata()
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32),
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegex(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegex(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegex(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegex(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegex(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object_).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c, c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object_).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(
sess.run(feed_t, feed_dict={
feed_t: c_list
}), c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [
u'\n\x01\x00', u'\n\x00\x01', u'\u26a3 unicode',
u'\U0001f60e deal with it'
]
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object_)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegex(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertIsInstance(c.name, six.text_type)
self.assertIsInstance(d.name, six.text_type)
self.assertIsInstance(e.name, six.text_type)
self.assertIsInstance(f.name, six.text_type)
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegex(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.SOFTWARE_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertFalse(run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertFalse(run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.SOFTWARE_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(
constant_op.constant(1.0), options=None, run_metadata=run_metadata)
self.assertFalse(run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0), options=run_options, run_metadata=None)
self.assertFalse(run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegex(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Input to reshape is a tensor with 4 values, '
'but the requested shape has 21'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertNotIn('_output_shapes', sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertEqual(a, a)
def testInferShapesTrue(self):
config_pb = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config_pb)
self.assertIn('_output_shapes', sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertEqual(a, a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config_pb = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config_pb) as sess:
with ops.device('/device:GPU:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(
d,
feed_dict={a: 1.0},
options=run_options,
run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def runTestOutputPartitionGraphs(self, sess):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
a = constant_op.constant(1)
run_metadata = config_pb2.RunMetadata()
sess.run(a, options=run_options, run_metadata=run_metadata)
self.assertGreater(len(run_metadata.partition_graphs), 0)
sess.run(a, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 0)
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDirect(self):
self.runTestOutputPartitionGraphs(session.Session())
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDistributed(self):
server = server_lib.Server.create_local_server()
self.runTestOutputPartitionGraphs(session.Session(server.target))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegex(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
@test_util.run_v1_only('b/120545219')
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegex(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegex(TypeError,
'Argument `target` must be a string'):
session.Session(37)
with self.assertRaisesRegex(TypeError,
'Argument `config` must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegex(TypeError,
'Argument `graph` must be a tf.Graph'):
session.Session(graph=37)
@test_util.run_v1_only('b/120545219')
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config_pb) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
@test_util.run_v1_only('b/120545219')
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.cached_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(
squared_tensor, feed_dict={
squared_tensor: np1 * np1
})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
def testDefaultLogDevicePlacement(self):
class CaptureStderr(str):
"""Class to capture stderr from C++ shared library."""
def __enter__(self):
self._esc = compat.as_str('\b')
self._output = compat.as_str('')
self._stderr = sys.stderr
self._fd = self._stderr.fileno()
self._out_pipe, in_pipe = os.pipe()
# Save the original io stream.
self._dup_fd = os.dup(self._fd)
# Replace the original io stream with in pipe.
os.dup2(in_pipe, self._fd)
return self
def __exit__(self, *args):
self._stderr.write(self._esc)
self._stderr.flush()
self.read()
os.close(self._out_pipe)
# Restore the original io stream.
os.dup2(self._dup_fd, self._fd)
def read(self):
while True:
data = os.read(self._out_pipe, 1)
if not data or compat.as_str(data) == self._esc:
break
self._output += compat.as_str(data)
def __str__(self):
return self._output
context.set_log_device_placement(True)
if context.executing_eagerly():
with CaptureStderr() as log:
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
# Ensure if the same kernel with the same arguments is executed then its
# execution is logged.
d = a + b
else:
# Passing the config to the server, but not the session should still
# result in logging device placement.
config_pb = config_pb2.ConfigProto(log_device_placement=True)
server = server_lib.Server.create_local_server(config=config_pb)
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
d = a + b
with session.Session(server.target) as sess:
with CaptureStderr() as log:
c, d = sess.run([c, d])
self.assertEqual(c, 3)
self.assertEqual(d, 3)
# Ensure that we did log device placement.
add_executions = [l for l in str(log).splitlines() if 'AddV2' in l]
self.assertEqual(len(add_executions), 2)
@def_function.function
def fn(a, b):
c = a + b
# These two AddV2 cannot use the same argument in tf.function since an
# optimization pass will remove duplicate ops and only run it once.
d = a + c
return c, d
with CaptureStderr() as log:
c, d = self.evaluate(fn(constant_op.constant(1), constant_op.constant(2)))
self.assertEqual(c, 3)
self.assertEqual(d, 4)
# Ensure that we did log device placement.
add_executions = [l for l in str(log).splitlines() if 'AddV2' in l]
self.assertEqual(len(add_executions), 2)
@test_util.run_v1_only('b/120545219')
def testLocalMasterSessionTimeout(self):
# Test that the timeout passed in a config to the session works correctly.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server()
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target, config=config_pb) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
@test_util.run_v1_only('b/120545219')
def testDefaultServerTimeout(self):
# Test that the default server config timeout gets used when no Session
# config is provided.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server(config=config_pb)
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def runTestBuildGraphError(self, sess):
# Ensure that errors from building the graph get propagated.
data = array_ops.placeholder(dtypes.float32, shape=[])
# pylint: disable=protected-access
enter_1 = gen_control_flow_ops.enter(data, 'foo_1', False)
enter_2 = gen_control_flow_ops.enter(data, 'foo_2', False)
# pylint: enable=protected-access
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError('has inputs from different frames'):
sess.run(res, feed_dict={data: 1.0})
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDirect(self):
self.runTestBuildGraphError(session.Session())
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDist(self):
server = server_lib.Server.create_local_server()
self.runTestBuildGraphError(session.Session(server.target))
def testDeviceAttributes(self):
attrs = session._DeviceAttributes(
'/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def testDeviceAttributesCanonicalization(self):
attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1',
'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def runTestAddFunctionToSession(self, target=''):
"""Add a function to a session after the graph has already been run."""
@function.Defun(dtypes.float32)
def foo(x):
return x + 1
x = constant_op.constant(1.0)
with session.Session(target=target) as sess:
sess.run(x)
f = foo(x)
result = sess.run(f)
self.assertEqual(result, 2.0)
@test_util.run_v1_only('b/120545219')
def testAddFunctionToSession(self):
self.runTestAddFunctionToSession()
@test_util.run_v1_only('b/120545219')
def testAddFunctionToGrpcSession(self):
server = server_lib.Server.create_local_server()
self.runTestAddFunctionToSession(server.target)
def testOpenAndCloseGrpcSession(self):
server = server_lib.Server.create_local_server()
with session.Session(server.target):
pass
def testOpenAndCloseSession(self):
with session.Session():
pass
@test_util.run_v1_only('b/120545219')
def testAutoConvertAndCheckData(self):
with self.cached_session() as sess:
a = array_ops.placeholder(dtype=dtypes.string)
with self.assertRaisesRegex(
TypeError, r'Type of feed value 1 with type <(\w+) \'int\'> is not'):
sess.run(a, feed_dict={a: 1})
@test_util.run_v1_only('b/120545219')
def testOptimizerOptions(self):
config.set_optimizer_experimental_options({'min_graph_nodes': -1})
with ops.Graph().as_default():
sess = session.Session()
self.assertEqual(
sess._config.graph_options.rewrite_options.min_graph_nodes, -1)
if __name__ == '__main__':
googletest.main()
| 39.23848
| 92
| 0.648559
|
13cdeee65c751f63ecf9dde2fb14d394e96fb84c
| 9,006
|
py
|
Python
|
pymc/gp/FullRankCovariance.py
|
rsumner31/pymc3-23
|
539c0fc04c196679a1cdcbf4bc2dbea4dee10080
|
[
"Apache-2.0"
] | 1
|
2019-03-01T02:47:20.000Z
|
2019-03-01T02:47:20.000Z
|
pymc/gp/FullRankCovariance.py
|
rsumner31/pymc3-23
|
539c0fc04c196679a1cdcbf4bc2dbea4dee10080
|
[
"Apache-2.0"
] | 1
|
2019-08-17T06:58:38.000Z
|
2019-08-17T06:58:38.000Z
|
pymc/gp/FullRankCovariance.py
|
rsumner31/pymc3-23
|
539c0fc04c196679a1cdcbf4bc2dbea4dee10080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Anand Patil, 2007
__docformat__='reStructuredText'
__all__ = ['FullRankCovariance']
from numpy import *
from numpy.linalg import cholesky, LinAlgError
from GPutils import regularize_array, trisolve
from linalg_utils import dpotrf_wrap
from Covariance import Covariance
from incomplete_chol import ichol, ichol_continue
class FullRankCovariance(Covariance):
"""
C=FullRankCovariance(eval_fun, **params)
Valued as a GP covariance.
All linear algebra done with dense BLAS, so attempts to invert/ factorize
numerically singular covariance matrices will cause errors. On the other
hand, computations will be faster than with Covariance for full-rank
covariance matrices.
:Arguments:
- `eval_fun`: A function that takes either a single value x or two values x and y,
followed by an arbitrary number of keyword parameters. x and y will be of shape
(n,n_dim), where n is any integer and n_dim is the dimensionality of the space, or
shape (n). In the latter case n_dim should be assumed to be 1.
- `params`: Parameters to be passed to eval_fun.
:SeeAlso: Mean, BasisCovariance, SeparableBasisCovariance, Realization, observe
"""
def __init__(self, eval_fun, nugget=None, **params):
self.ndim = None
self.observed = False
self.obs_mesh = None
self.obs_V = None
self.Uo = None
self.obs_piv = None
self.obs_len = None
self.full_piv = None
self.full_obs_mesh = None
self.basiscov = False
self.eval_fun = eval_fun
self.params = params
self.nugget=nugget
# # Sorry... the diagonal calls are done using f2py for speed.
# def diag_cov_fun(xe):
# return self.eval_fun(xe,xe,**self.params)
#
# self.diag_cov_fun = diag_cov_fun
def cholesky(self, x, observed=True, nugget=None):
"""
U = C.cholesky(x[, observed=True, nugget=None])
Computes Cholesky factorization of self(x,x).
:Arguments:
- `x`: The input array on which to evaluate the covariance.
- `observed`: If 'True', any observations are taken into account
when computing the Cholesky factor. If not, the unobserved
version of self is used.
- `nugget`: The 'nugget' parameter, which will essentially be
added to the diagonal of C(x,x) before Cholesky factorizing.
"""
# Number of points in x.
N_new = x.shape[0]
U=self.__call__(x, x, regularize = False, observed = observed)
# print nugget, U
if nugget is not None:
for i in xrange(N_new):
U[i,i] += nugget[i]
# print self.params, x.shape, observed, nugget
info = dpotrf_wrap(U)
if info>0:
raise LinAlgError, "Matrix does not appear to be positive definite by row %i. Consider another Covariance subclass, such as NearlyFullRankCovariance." % info
return U
def continue_cholesky(self, x, x_old, U_old, observed=True, nugget=None):
"""
U = C.continue_cholesky(x, x_old, U_old[, observed=True, nugget=None])
Computes Cholesky factorization of self(z,z). Assumes the Cholesky
factorization of self(x_old, x_old) has already been computed.
:Arguments:
- `x`: The input array on which to evaluate the Cholesky factorization.
- `x_old`: The input array on which the Cholesky factorization has been
computed.
- `U_old`: The Cholesky factorization of C(x_old, x_old).
- `observed`: If 'True', any observations are taken into account
when computing the Cholesky factor. If not, the unobserved
version of self is used.
- `nugget`: The 'nugget' parameter, which will essentially be
added to the diagonal of C(x,x) before Cholesky factorizing.
"""
# Concatenation of the old points and new points.
xtot = vstack((x_old,x))
# Number of old points.
N_old = x_old.shape[0]
# Number of new points.
N_new = x.shape[0]
U_new = self.__call__(x, x, regularize=False, observed=observed)
# not really implemented yet.
if nugget is not None:
for i in xrange(N_new):
U_new[i,i] += nugget[i]
U = asmatrix(zeros((N_new + N_old, N_old + N_new), dtype=float, order='F'))
U[:N_old, :N_old] = U_old
offdiag = self.__call__(x=x_old, y=x, observed=observed, regularize=False)
trisolve(U_old,offdiag,uplo='U',transa='T', inplace=True)
U[:N_old, N_old:] = offdiag
U_new -= offdiag.T*offdiag
info = dpotrf_wrap(U_new)
if info>0:
raise LinAlgError, "Matrix does not appear to be positive definite by row %i. Consider another Covariance subclass, such as NearlyFullRankCovariance." %info
U[N_old:,N_old:] = U_new
return U
def __call__(self, x, y=None, observed=True, regularize=True):
out = Covariance.__call__(self,x,y,observed,regularize)
if self.nugget is None:
return out
if x is y:
for i in xrange(out.shape[0]):
out[i,i] += self.nugget
elif y is None:
out += self.nugget
return out
def observe(self, obs_mesh, obs_V, assume_full_rank=True):
"""
Observes self at obs_mesh with variance given by obs_V.
Returns an upper-triangular Cholesky factor of self's evaluation on obs_mesh
conditional on all previous observations.
"""
# print 'C.observe called'
# Number of spatial dimensions.
ndim = obs_mesh.shape[1]
if self.ndim is not None:
if not ndim==self.ndim:
raise ValueError, "Dimension of observation mesh is not equal to dimension of base mesh."
else:
self.ndim = ndim
# print ndim
# =====================================
# = If self hasn't been observed yet: =
# =====================================
if not self.observed:
# If self has not been observed, get the Cholesky factor of self(obs_mesh, obs_mesh)
# and the side information and store it.
# Number of observation points so far is 0.
N_old = 0
N_new = obs_mesh.shape[0]
U = self.cholesky(obs_mesh, nugget = obs_V, observed=False)
# Upper-triangular Cholesky factor of self(obs_mesh, obs_mesh)
self.full_Uo = U
self.Uo = U
# Pivots.
piv_new = arange(N_new)
self.full_piv = piv_new
self.obs_piv = piv_new
# Remember full observation mesh.
self.full_obs_mesh = obs_mesh
# relevant slice is the positive-definite indices, which get into obs_mesh_*. See documentation.
relevant_slice = self.obs_piv
self.obs_mesh = obs_mesh
self.obs_V = obs_V
self.obs_len = N_new
# =======================================
# = If self has been observed already: =
# =======================================
else:
# If self has been observed, get the Cholesky factor of the _full_ observation mesh (new
# and old observations) using continue_cholesky, along with side information, and store it.
# Number of observations so far.
N_old = self.full_obs_mesh.shape[0]
# Number of new observations.
N_new = obs_mesh.shape[0]
# Call to self.continue_cholesky.
U_new = self.continue_cholesky( x=obs_mesh,
x_old = self.full_obs_mesh,
U_old = self.full_Uo,
observed = False,
nugget = obs_V)
# Full Cholesky factor of self(obs_mesh, obs_mesh), where obs_mesh is the combined observation mesh.
self.full_Uo = U_new
# Square upper-triangular Cholesky factor of self(obs_mesh_*, obs_mesh_*). See documentation.
self.Uo=self.full_Uo
# Pivots.
piv_new = arange(N_old + N_new)
self.obs_piv = piv_new
self.full_piv = piv_new
# Concatenate old and new observation meshes.
self.full_obs_mesh = vstack((self.full_obs_mesh, obs_mesh))
self.obs_mesh = self.full_obs_mesh
self.obs_V = hstack((self.obs_V, obs_V))
# Length of obs_mesh_*.
self.obs_len = N_old + N_new
self.observed = True
return slice(None, None, None), obs_mesh, self.full_Uo[N_old:N_new+N_old, N_old:N_new+N_old]
| 33.857143
| 169
| 0.584832
|
24574c45216f9cde7ce8bc6e0d69818fac35cd11
| 11,334
|
py
|
Python
|
BOFdat/core/maintenance.py
|
jclachance/BOFdat
|
cf2de074c2d789ad47b890083cbbffadab40f177
|
[
"MIT"
] | 21
|
2017-11-21T23:21:14.000Z
|
2022-03-27T11:48:17.000Z
|
BOFdat/core/maintenance.py
|
jclachance/BOFdat
|
cf2de074c2d789ad47b890083cbbffadab40f177
|
[
"MIT"
] | 21
|
2017-11-21T22:16:44.000Z
|
2021-11-02T06:24:33.000Z
|
BOFdat/core/maintenance.py
|
jclachance/biomass
|
cf2de074c2d789ad47b890083cbbffadab40f177
|
[
"MIT"
] | 3
|
2018-02-06T13:05:33.000Z
|
2018-10-24T12:12:56.000Z
|
"""
Maintenance
===========
Growth associated maintenance (GAM) is defined as the energy cost of growth, namely polymerization of macromolecules.
Non-Growth associated maintenance (NGAM) represent all the extra costs that the cell must overcome to operate.
This package offers two options for the user:
1- Calculate GAM and NGAM from experimental data
2- Estimate GAM and NGAM from theorical values
"""
import cobra
from BOFdat.util.update import _get_biomass_objective_function
import pandas as pd
import numpy as np
import warnings
import seaborn as sns
import matplotlib.pyplot as plt
def _import_model(path_to_model):
extension = path_to_model.split('.')[-1]
if extension == 'json':
return cobra.io.load_json_model(path_to_model)
elif extension == 'xml':
return cobra.io.read_sbml_model(path_to_model)
else:
raise Exception('Model format not compatible, provide xml or json')
def _import_data(path_to_data):
data = pd.read_csv(path_to_data)
'''
#1- This file should have a header
for i in data.columns:
try:
float(i)
raise ValueError('Provide file header')
except:
pass
'''
return data
#From experimental data growth rate on a given carbon source
#Obtain best fit from model to experimental data
def _get_carbon_sources(data):
return [c for c in data.Source]
def _attribute_colors(data,carbon_sources):
# Attribute colors to carbon sources for ploting
# Set a color palette
import seaborn as sb
color_palette = sb.color_palette('deep',len(carbon_sources))
data['color'] = ''
for i in len(carbon_sources):
data.loc[data.Source == carbon_sources[i], 'color'] = color_palette[i]
return data
def _atp_cost(model):
solution = model.optimize().objective_value
if solution == None:
solution = 0
return solution
def _calculate_gam(model,data,show_GAM):
#Build the output matrix
#Contains the ATP costs for each carbon source
raw_GAM = pd.DataFrame(index=[data.loc[i,'Source'] for i in data.index],
columns=['Growth_rate', 'ATP', 'ATP_min', 'ATP_max', 'ATP_err']
)
raw_GAM['Growth_rate'] = [data.loc[i,'GR'] for i in data.index]
carbon_sources = _get_carbon_sources(data)
if show_GAM:
data = _attribute_colors(data,carbon_sources)
else:
data = data
#Set parameters for all models
#Set lower bound of BOF to 0
#Get biomass objective function
biomass = _get_biomass_objective_function(model)
biomass.lower_bound = 0.
biomass.objective_coefficient = 0.
# remove GAM from biomass function
for key, value in biomass.metabolites.items():
if abs(value) > 50:
biomass.add_metabolites({key: -value})
#Set carbon sources to 0
'''
Will need to prepare the model to run the simulations
'''
#model.reactions.EX_glc_LPAREN_e_RPAREN_.lower_bound = 0
#Optimize for ATP maintenance
model.reactions.ATPM.lower_bound = 0
model.reactions.ATPM.objective_coefficient = 1.
#model.reactions.EX_o2_LPAREN_e_RPAREN_.lower_bound = -1000
atp, atp_min, atp_max, atp_err = [],[],[],[]
for i in data.index:
c_source = data.loc[i, 'Source']
#Get growth rate
growth_rate = data.loc[i, 'GR']
growth_rate_err = data.loc[i, 'GR_std']
#Get substrate uptake rate for the carbon source
# gur = Glucose uptake rate
substrate_uptake_rate = data.loc[i, 'SUR']
# sur - substrate uptake rate
substrate_uptake_rate_err = data.loc[i, 'SUR_std']
# Mean
#Excretion rates
#Requires that the data file is generated appropriately
for j in range(6,len(data.columns)):
#Force flux through excretion
if j % 2 == 0:
mean_excretion = data.loc[i,data.columns[j]]
model.reactions.get_by_id(data.columns[j]).lower_bound = mean_excretion
'''
Should return some errors if the file is not generated appropriately
'''
#Fix biomass at growth rate
biomass.lower_bound = growth_rate
# Iterate the Carbon source as %s
model.reactions.get_by_id('%s' %(c_source,)).lower_bound = -1 * substrate_uptake_rate
#Obtain solution and determine ATP cost
atp.append(_atp_cost(model))
#Include experimental error in calculation
# For both the excretion (max) and uptake (min) to get the max range
#Minimum
#Set biomass to growth rate
biomass.lower_bound = growth_rate + growth_rate_err
#Set uptake rate to the carbon source uptake rate
model.reactions.get_by_id('%s' % c_source).lower_bound = -1 * (substrate_uptake_rate - substrate_uptake_rate_err)
for j in range(6,len(data.columns)):
#Force flux through excretion
if j % 2 == 0:
mean_excretion = data.loc[i,data.columns[j]]
std_excretion = data.loc[i,data.columns[j+1]]
model.reactions.get_by_id(data.columns[j]).lower_bound = mean_excretion - std_excretion
atp_min.append(_atp_cost(model))
#Maximum
biomass.lower_bound = growth_rate - growth_rate_err
for j in range(6,len(data.columns)):
#Force flux through excretion
if j % 2 == 0:
mean_excretion = data.loc[i,data.columns[j]]
std_excretion = data.loc[i,data.columns[j+1]]
model.reactions.get_by_id(data.columns[j]).lower_bound = mean_excretion + std_excretion
atp_max.append(_atp_cost(model))
delta = atp_max[i] - atp_min[i]
atp_err.append(delta / 2)
#Reset uptake to 0
model.reactions.get_by_id('%s' % c_source).lower_bound = 0.
#Reset secretion to 0
for j in range(6,len(data.columns)):
#Force flux through excretion
if j % 2 == 0:
mean_excretion = 0
model.reactions.get_by_id(data.columns[j]).lower_bound = mean_excretion
'''
Will have to define whether the aerobic/anaerobic option should be included
'''
# model.reactions.EX_o2_LPAREN_e_RPAREN_.lower_bound = -1000
raw_GAM['ATP'] = atp
raw_GAM['ATP_min'] = atp_min
raw_GAM['ATP_max'] = atp_max
raw_GAM['ATP_err'] = atp_err
return raw_GAM
def _show_gam(raw_GAM):
sns.set_style('whitegrid')
x = raw_GAM['Growth_rate']
y = raw_GAM['ATP']
# Fit with np.polyfit
m, b = np.polyfit(x, y, 1)
print('m', m, 'b', b)
# Get correlation
x = raw_GAM['Growth_rate']
y = raw_GAM['ATP']
correlation = np.corrcoef(x, y)[0, 1]
print('R2=', correlation ** 2)
plt.scatter(raw_GAM['Growth_rate'], raw_GAM['ATP'])
# plt.scatter(filtered_data['GR'],filtered_data['ATP'], color=filtered_data['color'], marker=filtered_data['marker'].tolist())
plt.ylabel('ATP')
plt.xlabel('Growth rate')
plt.xlim([0, 1.1])
plt.ylim([0, 110])
plt.show()
# plt.savefig('all_data.png')
# plt.savefig('all_data.svg')
plt.close()
'''
plt.errorbar(filtered_data['GR'], filtered_data['ATP'], xerr=filtered_data['GR_std'],
yerr=filtered_data['ATP_err'], fmt='.', ecolor='black')
# plt.scatter(filtered_data['GR'],filtered_data['ATP'], color=filtered_data['color'])
plt.plot(x, m * x + b, '-')
plt.xlim([0, 1.1])
plt.ylim([0, 120])
plt.savefig('all_data_w_errors.png')
plt.savefig('all_data_w_errors.svg')
# embed()
plt.show()
plt.close()
# plt.plot(x, y, '.')
plt.scatter(filtered_data['GR'], filtered_data['ATP'], color=filtered_data['color'])
plt.plot(x, m * x + b, '-')
plt.xlim([0, 1.1])
plt.ylim([0, 90])
# plt.savefig('GAM_est.svg')
plt.savefig('GAM_all_data_fit.png')
plt.savefig('GAM_all_data_fit.svg')
plt.show()
plt.close()
# for CI calc: combine dataset with std erors
data2 = pd.DataFrame()
data2 = data2.append(pd.DataFrame({'GR': filtered_data['GR'].tolist(), 'ATP': filtered_data['ATP'].tolist()}))
# positive error
test1 = filtered_data['GR'] + filtered_data['GR_std']
test2 = filtered_data['ATP'] + filtered_data['ATP_err']
data2 = data2.append(pd.DataFrame({'GR': test1, 'ATP': test2}))
# negative error
test1 = filtered_data['GR'] - filtered_data['GR_std']
test2 = filtered_data['ATP'] - filtered_data['ATP_err']
data2 = data2.append(pd.DataFrame({'GR': test1, 'ATP': test2}))
# positive error
# test1=filtered_data['GR']+filtered_data['GR_std']
# test2=filtered_data['ATP']-filtered_data['ATP_err']
# data2=data2.append(pd.DataFrame({'GR':test1,'ATP':test2}))
# negative error
# test1=filtered_data['GR']-filtered_data['GR_std']
# test2=filtered_data['ATP']+filtered_data['ATP_err']
# data2=data2.append(pd.DataFrame({'GR':test1,'ATP':test2}))
sns.lmplot(x='GR', y='ATP', data=data2)
plt.savefig('GAM_all_data_fit_CI.png')
plt.savefig('GAM_all_data_fit_CI.svg')
x = data2['GR']
y = data2['ATP']
# fit with np.polyfit
m, b = np.polyfit(x, y, 1)
print m, b
plt.show()
'''
return {'GAM': m, 'NGAM': b}
def experimental_maintenance(path_to_data, path_to_model,show_GAM=False):
"""
Growth-associated maintenance (GAM) is the ATP cost of assembling macromolecules in the organism.
This function calculates GAM from provided path to experimental data. This data includes growth rates on
different carbon sources, the associated uptake rate for each carbon source and the secretion rates of metabolic
wastes. More information on the format in which to provide the experimental data is available on GitHub.
:param path_to_data: The data file is the outcome of the HPLC growth, uptake and secretion rate experiment.
:param path_to_model: The path to the model, json or sbml formats supported
:param show_GAM: bool, will associate colors with carbon sources for easier display later
:return: a dictionary {GAM:value, NGAM:value}
"""
#1- Import model
model = _import_model(path_to_model)
#2- Import experimental data
data = _import_data(path_to_data)
#3- Calculate GAM
raw_GAM = _calculate_gam(model, data,show_GAM)
#4-
gams= _show_gam(raw_GAM)
#Grs = growth rates
#new_gams_calc_grs(gams)
return gams
def update_maintenance_costs(gams,model,RNA_atp):
"""
This function updates the cost of maintenance in the BOF and the ATPM function.
As of version 0.1.1 BOFdat assumes that the ATP maintenance reaction in the model is named ATPM and the
biomass is set as the first objective of the model.
:param gams: the dictionary generated by the experimental_maintenance function {GAM:x,NGAM:y}
:param model: the model to update
:param RNA_atp: the atp coefficient from the RNA
"""
#Add the atp consumption to the biomass
from BOFdat import update
BOFdat.update.update_maintenance(gams,model,RNA_atp)
'''
Functions yet to be implemented
def theorical_GAM(protein_percent, rna_percent, dna_percent,CP = 4.324, CD = 1.365, CR = 0.406):
#From experimental data: % of dry weight for each category
'''
| 33.632047
| 130
| 0.655373
|
c8f888b742efb312e3222b86fa1661ba26647a07
| 6,807
|
py
|
Python
|
tests/api/test_variant.py
|
projectssimm/simmweb
|
2935af248ee05e8795086d5bab12ff41632722ea
|
[
"BSD-3-Clause"
] | 9
|
2021-08-08T22:42:18.000Z
|
2021-11-23T06:50:14.000Z
|
tests/api/test_variant.py
|
projectssimm/simmweb
|
2935af248ee05e8795086d5bab12ff41632722ea
|
[
"BSD-3-Clause"
] | 3
|
2020-03-24T16:21:02.000Z
|
2021-02-02T21:57:49.000Z
|
tests/api/test_variant.py
|
projectssimm/simmweb
|
2935af248ee05e8795086d5bab12ff41632722ea
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import graphene
import pytest
from django.shortcuts import reverse
from tests.utils import get_graphql_content
def test_fetch_variant(admin_api_client, product):
query = """
query ProductVariantDetails($id: ID!) {
productVariant(id: $id) {
id
attributes {
attribute {
id
name
slug
values {
id
name
slug
}
}
value {
id
name
slug
}
}
costPrice {
currency
amount
}
images {
edges {
node {
id
}
}
}
name
priceOverride {
currency
amount
}
product {
id
}
}
}
"""
variant = product.variants.first()
variant_id = graphene.Node.to_global_id('ProductVariant', variant.pk)
variables = json.dumps({ 'id': variant_id })
response = admin_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['productVariant']
assert data['name'] == variant.name
def test_create_variant(admin_api_client, product, product_type):
query = """
mutation createVariant (
$productId: ID!,
$sku: String!,
$priceOverride: Decimal,
$costPrice: Decimal,
$quantity: Int!,
$attributes: [AttributeValueInput],
$trackInventory: Boolean!) {
productVariantCreate(
input: {
product: $productId,
sku: $sku,
priceOverride: $priceOverride,
costPrice: $costPrice,
quantity: $quantity,
attributes: $attributes,
trackInventory: $trackInventory
}) {
productVariant {
name
sku
attributes {
attribute {
slug
}
value {
slug
}
}
quantity
priceOverride {
currency
amount
localized
}
costPrice {
currency
amount
localized
}
}
}
}
"""
product_id = graphene.Node.to_global_id('Product', product.pk)
sku = "1"
price_override = 1
cost_price = 3
quantity = 10
variant_slug = product_type.variant_attributes.first().slug
variant_value = 'test-value'
variables = json.dumps({
'productId': product_id,
'sku': sku,
'quantity': quantity,
'costPrice': cost_price,
'priceOverride': price_override,
'attributes': [
{'slug': variant_slug, 'value': variant_value}],
'trackInventory': True})
response = admin_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['productVariantCreate']['productVariant']
assert data['name'] == ""
assert data['quantity'] == quantity
assert data['costPrice']['amount'] == cost_price
assert data['priceOverride']['amount'] == price_override
assert data['sku'] == sku
assert data['attributes'][0]['attribute']['slug'] == variant_slug
assert data['attributes'][0]['value']['slug'] == variant_value
def test_update_product_variant(admin_api_client, product):
query = """
mutation updateVariant (
$id: ID!,
$sku: String!,
$costPrice: Decimal,
$quantity: Int!,
$trackInventory: Boolean!) {
productVariantUpdate(
id: $id,
input: {
sku: $sku,
costPrice: $costPrice,
quantity: $quantity,
trackInventory: $trackInventory
}) {
productVariant {
name
sku
quantity
costPrice {
currency
amount
localized
}
}
}
}
"""
variant = product.variants.first()
variant_id = graphene.Node.to_global_id('ProductVariant', variant.pk)
sku = "test sku"
cost_price = 3
quantity = 123
variables = json.dumps({
'id': variant_id,
'sku': sku,
'quantity': quantity,
'costPrice': cost_price,
'trackInventory': True})
response = admin_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['productVariantUpdate']['productVariant']
assert data['name'] == ""
assert data['quantity'] == quantity
assert data['costPrice']['amount'] == cost_price
assert data['sku'] == sku
def test_delete_variant(admin_api_client, product):
query = """
mutation variantDelete($id: ID!) {
productVariantDelete(id: $id) {
productVariant {
sku
id
}
}
}
"""
variant = product.variants.first()
variant_id = graphene.Node.to_global_id('ProductVariant', variant.pk)
variables = json.dumps({'id': variant_id})
response = admin_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['productVariantDelete']
assert data['productVariant']['sku'] == variant.sku
with pytest.raises(variant._meta.model.DoesNotExist):
variant.refresh_from_db()
| 31.082192
| 73
| 0.456883
|
4e990cbe9d56c610c8d033d55136d4e750a702b9
| 1,002
|
py
|
Python
|
setup.py
|
Team-95/videolocale
|
70b6ca9708838e0c8e0dcb4f56c486af712ab961
|
[
"MIT"
] | 3
|
2016-04-25T19:39:00.000Z
|
2016-12-13T01:27:41.000Z
|
setup.py
|
Team-95/beats-by-region
|
70b6ca9708838e0c8e0dcb4f56c486af712ab961
|
[
"MIT"
] | 25
|
2016-04-27T19:15:06.000Z
|
2016-05-25T00:46:55.000Z
|
setup.py
|
Team-95/beats-by-region
|
70b6ca9708838e0c8e0dcb4f56c486af712ab961
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open('requirements.txt') as f:
requirements = f.read().splitlines()
readme = ''
with open('README.md') as f:
readme = f.read()
setup(name='Video Locale',
author='Team 95',
url='https://github.com/Team-95/videolocale',
packages=find_packages(),
license='MIT',
description='A website that generates YouTube playlists based on selected geographical regions.',
long_description=readme,
include_package_data=True,
install_requires=requirements,
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: Multimedia :: Video :: Display'
]
)
| 31.3125
| 103
| 0.628743
|
d03233f7de45191007913011f75fbd3d7b8fc402
| 25,261
|
py
|
Python
|
fairseq/utils.py
|
jm-glowienke/fairseq
|
ca45353322f92776e34a7308bf3fab75af9c1d50
|
[
"MIT"
] | null | null | null |
fairseq/utils.py
|
jm-glowienke/fairseq
|
ca45353322f92776e34a7308bf3fab75af9c1d50
|
[
"MIT"
] | null | null | null |
fairseq/utils.py
|
jm-glowienke/fairseq
|
ca45353322f92776e34a7308bf3fab75af9c1d50
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import copy
import importlib
import logging
import os
import sys
import tempfile
import warnings
from itertools import accumulate
from typing import Callable, Dict, List, Optional
import torch
import torch.nn.functional as F
from fairseq.modules.multihead_attention import MultiheadAttention
from torch import Tensor
try:
from amp_C import multi_tensor_l2norm
multi_tensor_l2norm_available = True
except ImportError:
multi_tensor_l2norm_available = False
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
logger = logging.getLogger(__name__)
MANIFOLD_PATH_SEP = "|"
class FileContentsAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(FileContentsAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
from fairseq.file_io import PathManager
if PathManager.isfile(values):
with PathManager.open(values) as f:
argument = f.read().strip()
else:
argument = values
setattr(namespace, self.dest, argument)
def split_paths(paths: str) -> List[str]:
return (
paths.split(os.pathsep)
if "://" not in paths
else paths.split(MANIFOLD_PATH_SEP)
)
def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
from fairseq import checkpoint_utils
deprecation_warning(
"utils.load_ensemble_for_inference is deprecated. "
"Please use checkpoint_utils.load_model_ensemble instead."
)
return checkpoint_utils.load_model_ensemble(
filenames, arg_overrides=model_arg_overrides, task=task
)
def apply_to_sample(f, sample):
if hasattr(sample, "__len__") and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
def move_to_cuda(sample, device=None):
device = device or torch.cuda.current_device()
def _move_to_cuda(tensor):
# non_blocking is ignored if tensor is not pinned, so we can always set
# to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620)
return tensor.to(device=device, non_blocking=True)
return apply_to_sample(_move_to_cuda, sample)
def move_to_cpu(sample):
def _move_to_cpu(tensor):
# PyTorch has poor support for half tensors (float16) on CPU.
# Move any such tensors to float32.
if tensor.dtype in {torch.bfloat16, torch.float16}:
tensor = tensor.to(dtype=torch.float32)
return tensor.cpu()
return apply_to_sample(_move_to_cpu, sample)
def move_to_tpu(sample):
import torch_xla.core.xla_model as xm
device = xm.xla_device()
def _move_to_tpu(tensor):
return tensor.to(device)
return apply_to_sample(_move_to_tpu, sample)
def get_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
return module.get_incremental_state(incremental_state, key)
def set_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
result = module.set_incremental_state(incremental_state, key, value)
if result is not None:
incremental_state = result
return incremental_state
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str) and len(replace_unk) > 0 and '@@' not in replace_unk:
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, "r") as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor(
[float(weight) for weight in pieces[1:]]
)
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk, input_str):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"]
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_idx, tgt_index = alignment[i]
src_token = src_tokens[src_idx]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token) # first value is searchd for, second is returned if not found
if hypo_tokens[i] == unk and input_str is not None:
input_tokens = tokenizer.tokenize_line(input_str) + ["<eos>"]
# replace unk token with corresponding word from raw input string
hypo_tokens[i] = input_tokens[src_idx]
return " ".join(hypo_tokens)
def post_process_prediction(
hypo_tokens,
src_str,
alignment,
align_dict,
tgt_dict,
remove_bpe=None,
extra_symbols_to_ignore=None,
input_str=None
):
hypo_str = tgt_dict.string(
hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore
)
if align_dict is not None:
hypo_str = replace_unk(hypo_str, src_str, alignment, align_dict,
tgt_dict.unk_string(), input_str)
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx: int, onnx_trace: bool = False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, "buf"):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(
src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False
):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
buffered = torch.empty(0).long()
if max_len > 0:
torch.arange(max_len, out=buffered)
range = buffered.type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
# tpu-comment: making this a no-op for xla devices.
if torch.is_tensor(tensor) and tensor.device.type == 'xla':
return tensor.detach()
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor:
per_device_grads = {}
norms = []
for grad in grads:
device = grad.device
cur_device_grads = per_device_grads.get(device)
if cur_device_grads is None:
cur_device_grads = []
per_device_grads[device] = cur_device_grads
cur_device_grads.append(grad)
for device in per_device_grads.keys():
cur_device_grads = per_device_grads[device]
if device.type == "cuda":
# TODO(msb) return has_inf
has_inf = torch.zeros((1, 1), dtype=torch.int, device=device)
with torch.cuda.device(device):
norm = multi_tensor_l2norm(
chunk_size, has_inf, [cur_device_grads], False
)
norms.append(norm[0].to(torch.cuda.current_device()))
else:
norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads]
total_norm = torch.norm(torch.stack(norms))
return total_norm
@torch.no_grad()
def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:
def grad_exists(p):
return p is not None and getattr(p, "grad", None) is not None
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
grads = [p.grad.detach() for p in params if grad_exists(p) and not hasattr(p, 'expert')]
expert_grads = [p.grad.detach() for p in params if grad_exists(p) and hasattr(p, 'expert')]
if len(grads) == 0:
if len(params) > 0:
return params[0].new_tensor(0.0)
else:
return torch.tensor(0.0)
if len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
if torch.cuda.is_available():
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
device = torch.cuda.current_device()
elif grads[0].device.type == "xla":
device = grads[0].device
else:
device = torch.device("cpu")
total_norm = torch.norm(
torch.stack(
[torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads]
)
)
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads + expert_grads:
g.mul_(clip_coef)
return total_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def _match_types(arg1, arg2):
"""Convert the numerical argument to the same type as the other argument"""
def upgrade(arg_number, arg_structure):
if isinstance(arg_structure, tuple):
return tuple([arg_number] * len(arg_structure))
elif isinstance(arg_structure, dict):
arg = copy.deepcopy(arg_structure)
for k in arg:
arg[k] = upgrade(arg_number, arg_structure[k])
return arg
else:
return arg_number
if isinstance(arg1, float) or isinstance(arg1, int):
return upgrade(arg1, arg2), arg2
elif isinstance(arg2, float) or isinstance(arg2, int):
return arg1, upgrade(arg2, arg1)
return arg1, arg2
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
max_positions, arg = _match_types(max_positions, arg)
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
max_positions = map_value_update(max_positions, arg)
else:
max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))
return max_positions
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path) and not os.path.isfile(os.path.dirname(module_path)):
fairseq_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
else:
fairseq_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
else:
raise FileNotFoundError(module_path)
# ensure that user modules are only imported once
import_user_module.memo = getattr(import_user_module, "memo", set())
if module_path not in import_user_module.memo:
import_user_module.memo.add(module_path)
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
else:
raise ImportError(
"Failed to import --user-dir={} because the corresponding module name "
"({}) is not globally unique. Please rename the directory to "
"something unique and try again.".format(module_path, module_name)
)
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def log_softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.log_softmax(x.float(), dim=dim)
else:
return F.log_softmax(x, dim=dim, dtype=torch.float32)
def get_perplexity(loss, round=2, base=2):
from fairseq.logging.meters import safe_round
if loss is None:
return 0.0
try:
return safe_round(base ** loss, round)
except OverflowError:
return float("inf")
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def get_activation_fn(activation: str) -> Callable:
""" Returns the activation function corresponding to `activation` """
from fairseq.modules import gelu, gelu_accurate
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
deprecation_warning(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"tanh",
"linear",
]
@contextlib.contextmanager
def model_eval(model):
is_training = model.training
model.eval()
yield
model.train(is_training)
def has_parameters(module):
try:
next(module.parameters())
return True
except StopIteration:
return False
def get_rng_state():
state = {"torch_rng_state": torch.get_rng_state()}
if xm is not None:
state["xla_rng_state"] = xm.get_rng_state()
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(state):
torch.set_rng_state(state["torch_rng_state"])
if xm is not None:
xm.set_rng_state(state["xla_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
class set_torch_seed(object):
def __init__(self, seed):
assert isinstance(seed, int)
self.rng_state = get_rng_state()
torch.manual_seed(seed)
if xm is not None:
xm.set_rng_state(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def __enter__(self):
return self
def __exit__(self, *exc):
set_rng_state(self.rng_state)
def parse_alignment(line):
"""
Parses a single line from the alingment file.
Args:
line (str): String containing the alignment of the format:
<src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..
<src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.
Returns:
torch.IntTensor: packed alignments of shape (2 * m).
"""
alignments = line.strip().split()
parsed_alignment = torch.IntTensor(2 * len(alignments))
for idx, alignment in enumerate(alignments):
src_idx, tgt_idx = alignment.split("-")
parsed_alignment[2 * idx] = int(src_idx)
parsed_alignment[2 * idx + 1] = int(tgt_idx)
return parsed_alignment
def get_token_to_word_mapping(tokens, exclude_list):
n = len(tokens)
word_start = [int(token not in exclude_list) for token in tokens]
word_idx = list(accumulate(word_start))
token_to_word = {i: word_idx[i] for i in range(n)}
return token_to_word
def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = (
((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1)
)
src_invalid = (
((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1)
)
src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])
tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])
alignment = []
if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):
attn_valid = attn[tgt_valid]
attn_valid[:, src_invalid] = float("-inf")
_, src_indices = attn_valid.max(dim=1)
for tgt_idx, src_idx in zip(tgt_valid, src_indices):
alignment.append(
(
src_token_to_word[src_idx.item()] - 1,
tgt_token_to_word[tgt_idx.item()] - 1,
)
)
return alignment
def extract_soft_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = (
((tgt_sent != pad)).nonzero(as_tuple=False)
)
src_valid = (
((src_sent != pad)).nonzero(as_tuple=False).squeeze(dim=-1)
)
alignment = []
if len(tgt_valid) != 0 and len(src_valid) != 0:
attn_valid = attn[tgt_valid, src_valid]
alignment = [
["{:.6f}".format(p) for p in src_probs.tolist()]
for src_probs in attn_valid
]
return alignment
def new_arange(x, *size):
"""
Return a Tensor of `size` filled with a range function on the device of x.
If size is empty, using the size of the variable x.
"""
if len(size) == 0:
size = x.size()
return torch.arange(size[-1], device=x.device).expand(*size).contiguous()
def get_tpu_device():
return xm.xla_device()
def tpu_data_loader(itr):
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
from fairseq.data import iterators
xm.rendezvous("tpu_data_loader") # wait for all workers
xm.mark_step()
device = xm.xla_device()
return iterators.CountingIterator(
pl.ParallelLoader(itr, [device]).per_device_loader(device),
start=getattr(itr, "n", 0),
total=len(itr),
)
def is_xla_tensor(tensor):
return torch.is_tensor(tensor) and tensor.device.type == 'xla'
def index_put(tensor, indices, value):
if is_xla_tensor(tensor):
for _ in range(indices.dim(), tensor.dim()):
indices = indices.unsqueeze(-1)
if indices.size(-1) < tensor.size(-1):
indices = indices.expand_as(tensor)
tensor = torch.mul(tensor, ~indices) + torch.mul(value, indices)
else:
tensor[indices] = value
return tensor
def xla_device_to_cpu(dat):
import torch_xla.core.xla_model as xm
return xm._maybe_convert_to_cpu(dat)
class CudaEnvironment(object):
def __init__(self):
cur_device = torch.cuda.current_device()
prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device))
self.name = prop.name
self.major = prop.major
self.minor = prop.minor
self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024
@staticmethod
def pretty_print_cuda_env_list(cuda_env_list):
"""
Given a list of CudaEnviorments, pretty print them
"""
num_workers = len(cuda_env_list)
center = "CUDA enviroments for all {} workers".format(num_workers)
banner_len = 40 - len(center) // 2
first_line = "*" * banner_len + center + "*" * banner_len
logger.info(first_line)
for r, env in enumerate(cuda_env_list):
logger.info(
"rank {:3d}: ".format(r)
+ "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor)
+ "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB)
+ "name = {:40s}".format(env.name)
)
logger.info(first_line)
def csv_str_list(x):
return x.split(",")
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_str_dict(x, type=dict):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
return x
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
| 32.138677
| 127
| 0.637346
|
72a2c0afbb03430ca8ebe01b8f384084841fb28a
| 22,366
|
py
|
Python
|
lib/spack/spack/build_systems/autotools.py
|
oracleLee/spack
|
a4786c2043b505ac7d263d949865e77d2314e29c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
lib/spack/spack/build_systems/autotools.py
|
oracleLee/spack
|
a4786c2043b505ac7d263d949865e77d2314e29c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-10-19T02:33:37.000Z
|
2020-10-19T02:33:37.000Z
|
lib/spack/spack/build_systems/autotools.py
|
oracleLee/spack
|
a4786c2043b505ac7d263d949865e77d2314e29c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
import fileinput
import os
import os.path
import shutil
import stat
import sys
import re
from subprocess import PIPE
from subprocess import check_call
import llnl.util.tty as tty
from llnl.util.filesystem import working_dir, force_remove
from spack.package import PackageBase, run_after, run_before
from spack.util.executable import Executable
class AutotoolsPackage(PackageBase):
"""Specialized class for packages built using GNU Autotools.
This class provides four phases that can be overridden:
1. :py:meth:`~.AutotoolsPackage.autoreconf`
2. :py:meth:`~.AutotoolsPackage.configure`
3. :py:meth:`~.AutotoolsPackage.build`
4. :py:meth:`~.AutotoolsPackage.install`
They all have sensible defaults and for many packages the only thing
necessary will be to override the helper method
:py:meth:`~.AutotoolsPackage.configure_args`.
For a finer tuning you may also override:
+-----------------------------------------------+--------------------+
| **Method** | **Purpose** |
+===============================================+====================+
| :py:attr:`~.AutotoolsPackage.build_targets` | Specify ``make`` |
| | targets for the |
| | build phase |
+-----------------------------------------------+--------------------+
| :py:attr:`~.AutotoolsPackage.install_targets` | Specify ``make`` |
| | targets for the |
| | install phase |
+-----------------------------------------------+--------------------+
| :py:meth:`~.AutotoolsPackage.check` | Run build time |
| | tests if required |
+-----------------------------------------------+--------------------+
"""
#: Phases of a GNU Autotools package
phases = ['autoreconf', 'configure', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'AutotoolsPackage'
#: Whether or not to update ``config.guess`` and ``config.sub`` on old
#: architectures
patch_config_files = True
#: Whether or not to update ``libtool``
#: (currently only for Arm/Clang/Fujitsu compilers)
patch_libtool = True
#: Targets for ``make`` during the :py:meth:`~.AutotoolsPackage.build`
#: phase
build_targets = []
#: Targets for ``make`` during the :py:meth:`~.AutotoolsPackage.install`
#: phase
install_targets = ['install']
#: Callback names for build-time test
build_time_test_callbacks = ['check']
#: Callback names for install-time test
install_time_test_callbacks = ['installcheck']
#: Set to true to force the autoreconf step even if configure is present
force_autoreconf = False
#: Options to be passed to autoreconf when using the default implementation
autoreconf_extra_args = []
@property
def archive_files(self):
"""Files to archive for packages based on autotools"""
return [os.path.join(self.build_directory, 'config.log')]
@run_after('autoreconf')
def _do_patch_config_files(self):
"""Some packages ship with older config.guess/config.sub files and
need to have these updated when installed on a newer architecture.
In particular, config.guess fails for PPC64LE for version prior
to a 2013-06-10 build date (automake 1.13.4) and for ARM (aarch64)."""
if not self.patch_config_files or (
not self.spec.satisfies('target=ppc64le:') and
not self.spec.satisfies('target=aarch64:')
):
return
# TODO: Expand this to select the 'config.sub'-compatible architecture
# for each platform (e.g. 'config.sub' doesn't accept 'power9le', but
# does accept 'ppc64le').
if self.spec.satisfies('target=ppc64le:'):
config_arch = 'ppc64le'
elif self.spec.satisfies('target=aarch64:'):
config_arch = 'aarch64'
else:
config_arch = 'local'
my_config_files = {'guess': None, 'sub': None}
config_files = {'guess': None, 'sub': None}
config_args = {'guess': [], 'sub': [config_arch]}
for config_name in config_files.keys():
config_file = 'config.{0}'.format(config_name)
if os.path.exists(config_file):
# First search the top-level source directory
my_config_files[config_name] = config_file
else:
# Then search in all sub directories recursively.
# We would like to use AC_CONFIG_AUX_DIR, but not all packages
# ship with their configure.in or configure.ac.
config_path = next((os.path.join(r, f)
for r, ds, fs in os.walk('.') for f in fs
if f == config_file), None)
my_config_files[config_name] = config_path
if my_config_files[config_name] is not None:
try:
config_path = my_config_files[config_name]
check_call([config_path] + config_args[config_name],
stdout=PIPE, stderr=PIPE)
# The package's config file already runs OK, so just use it
continue
except Exception as e:
tty.debug(e)
else:
continue
# Look for a spack-installed automake package
if 'automake' in self.spec:
automake_dir = 'automake-' + str(self.spec['automake'].version)
automake_path = os.path.join(self.spec['automake'].prefix,
'share', automake_dir)
path = os.path.join(automake_path, config_file)
if os.path.exists(path):
config_files[config_name] = path
# Look for the system's config.guess
if (config_files[config_name] is None and
os.path.exists('/usr/share')):
automake_dir = [s for s in os.listdir('/usr/share') if
"automake" in s]
if automake_dir:
automake_path = os.path.join('/usr/share', automake_dir[0])
path = os.path.join(automake_path, config_file)
if os.path.exists(path):
config_files[config_name] = path
if config_files[config_name] is not None:
try:
config_path = config_files[config_name]
my_config_path = my_config_files[config_name]
check_call([config_path] + config_args[config_name],
stdout=PIPE, stderr=PIPE)
m = os.stat(my_config_path).st_mode & 0o777 | stat.S_IWUSR
os.chmod(my_config_path, m)
shutil.copyfile(config_path, my_config_path)
continue
except Exception as e:
tty.debug(e)
raise RuntimeError('Failed to find suitable ' + config_file)
@run_before('configure')
def _set_autotools_environment_variables(self):
"""Many autotools builds use a version of mknod.m4 that fails when
running as root unless FORCE_UNSAFE_CONFIGURE is set to 1.
We set this to 1 and expect the user to take responsibility if
they are running as root. They have to anyway, as this variable
doesn't actually prevent configure from doing bad things as root.
Without it, configure just fails halfway through, but it can
still run things *before* this check. Forcing this just removes a
nuisance -- this is not circumventing any real protection.
"""
os.environ["FORCE_UNSAFE_CONFIGURE"] = "1"
@run_after('configure')
def _do_patch_libtool(self):
"""If configure generates a "libtool" script that does not correctly
detect the compiler (and patch_libtool is set), patch in the correct
flags for the Arm, Clang/Flang, and Fujitsu compilers."""
libtool = os.path.join(self.build_directory, "libtool")
if self.patch_libtool and os.path.exists(libtool):
if self.spec.satisfies('%arm') or self.spec.satisfies('%clang') \
or self.spec.satisfies('%fj'):
for line in fileinput.input(libtool, inplace=True):
# Replace missing flags with those for Arm/Clang
if line == 'wl=""\n':
line = 'wl="-Wl,"\n'
if line == 'pic_flag=""\n':
line = 'pic_flag="{0}"\n'\
.format(self.compiler.cc_pic_flag)
if self.spec.satisfies('%fj') and 'fjhpctag.o' in line:
line = re.sub(r'/\S*/fjhpctag.o', '', line)
sys.stdout.write(line)
@property
def configure_directory(self):
"""Returns the directory where 'configure' resides.
:return: directory where to find configure
"""
return self.stage.source_path
@property
def configure_abs_path(self):
# Absolute path to configure
configure_abs_path = os.path.join(
os.path.abspath(self.configure_directory), 'configure'
)
return configure_abs_path
@property
def build_directory(self):
"""Override to provide another place to build the package"""
return self.configure_directory
@run_before('autoreconf')
def delete_configure_to_force_update(self):
if self.force_autoreconf:
force_remove(self.configure_abs_path)
def autoreconf(self, spec, prefix):
"""Not needed usually, configure should be already there"""
# If configure exists nothing needs to be done
if os.path.exists(self.configure_abs_path):
return
# Else try to regenerate it
autotools = ['m4', 'autoconf', 'automake', 'libtool']
missing = [x for x in autotools if x not in spec]
if missing:
msg = 'Cannot generate configure: missing dependencies {0}'
raise RuntimeError(msg.format(missing))
tty.msg('Configure script not found: trying to generate it')
tty.warn('*********************************************************')
tty.warn('* If the default procedure fails, consider implementing *')
tty.warn('* a custom AUTORECONF phase in the package *')
tty.warn('*********************************************************')
with working_dir(self.configure_directory):
m = inspect.getmodule(self)
# This line is what is needed most of the time
# --install, --verbose, --force
autoreconf_args = ['-ivf']
for dep in spec.dependencies(deptype='build'):
if os.path.exists(dep.prefix.share.aclocal):
autoreconf_args.extend([
'-I', dep.prefix.share.aclocal
])
autoreconf_args += self.autoreconf_extra_args
m.autoreconf(*autoreconf_args)
@run_after('autoreconf')
def set_configure_or_die(self):
"""Checks the presence of a ``configure`` file after the
autoreconf phase. If it is found sets a module attribute
appropriately, otherwise raises an error.
:raises RuntimeError: if a configure script is not found in
:py:meth:`~AutotoolsPackage.configure_directory`
"""
# Check if a configure script is there. If not raise a RuntimeError.
if not os.path.exists(self.configure_abs_path):
msg = 'configure script not found in {0}'
raise RuntimeError(msg.format(self.configure_directory))
# Monkey-patch the configure script in the corresponding module
inspect.getmodule(self).configure = Executable(
self.configure_abs_path
)
def configure_args(self):
"""Produces a list containing all the arguments that must be passed to
configure, except ``--prefix`` which will be pre-pended to the list.
:return: list of arguments for configure
"""
return []
def flags_to_build_system_args(self, flags):
"""Produces a list of all command line arguments to pass specified
compiler flags to configure."""
# Has to be dynamic attribute due to caching.
setattr(self, 'configure_flag_args', [])
for flag, values in flags.items():
if values:
values_str = '{0}={1}'.format(flag.upper(), ' '.join(values))
self.configure_flag_args.append(values_str)
# Spack's fflags are meant for both F77 and FC, therefore we
# additionaly set FCFLAGS if required.
values = flags.get('fflags', None)
if values:
values_str = 'FCFLAGS={0}'.format(' '.join(values))
self.configure_flag_args.append(values_str)
def configure(self, spec, prefix):
"""Runs configure with the arguments specified in
:py:meth:`~.AutotoolsPackage.configure_args`
and an appropriately set prefix.
"""
options = getattr(self, 'configure_flag_args', [])
options += ['--prefix={0}'.format(prefix)]
options += self.configure_args()
with working_dir(self.build_directory, create=True):
inspect.getmodule(self).configure(*options)
def build(self, spec, prefix):
"""Makes the build targets specified by
:py:attr:``~.AutotoolsPackage.build_targets``
"""
with working_dir(self.build_directory):
inspect.getmodule(self).make(*self.build_targets)
def install(self, spec, prefix):
"""Makes the install targets specified by
:py:attr:``~.AutotoolsPackage.install_targets``
"""
with working_dir(self.build_directory):
inspect.getmodule(self).make(*self.install_targets)
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Searches the Makefile for targets ``test`` and ``check``
and runs them if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute('test')
self._if_make_target_execute('check')
def _activate_or_not(
self,
name,
activation_word,
deactivation_word,
activation_value=None
):
"""This function contains the current implementation details of
:py:meth:`~.AutotoolsPackage.with_or_without` and
:py:meth:`~.AutotoolsPackage.enable_or_disable`.
Args:
name (str): name of the variant that is being processed
activation_word (str): the default activation word ('with' in the
case of ``with_or_without``)
deactivation_word (str): the default deactivation word ('without'
in the case of ``with_or_without``)
activation_value (callable): callable that accepts a single
value. This value is either one of the allowed values for a
multi-valued variant or the name of a bool-valued variant.
Returns the parameter to be used when the value is activated.
The special value 'prefix' can also be assigned and will return
``spec[name].prefix`` as activation parameter.
Examples:
Given a package with:
.. code-block:: python
variant('foo', values=('x', 'y'), description='')
variant('bar', default=True, description='')
calling this function like:
.. code-block:: python
_activate_or_not(
'foo', 'with', 'without', activation_value='prefix'
)
_activate_or_not('bar', 'with', 'without')
will generate the following configuration options:
.. code-block:: console
--with-x=<prefix-to-x> --without-y --with-bar
for ``<spec-name> foo=x +bar``
Returns:
list of strings that corresponds to the activation/deactivation
of the variant that has been processed
Raises:
KeyError: if name is not among known variants
"""
spec = self.spec
args = []
if activation_value == 'prefix':
activation_value = lambda x: spec[x].prefix
# Defensively look that the name passed as argument is among
# variants
if name not in self.variants:
msg = '"{0}" is not a variant of "{1}"'
raise KeyError(msg.format(name, self.name))
# Create a list of pairs. Each pair includes a configuration
# option and whether or not that option is activated
if set(self.variants[name].values) == set((True, False)):
# BoolValuedVariant carry information about a single option.
# Nonetheless, for uniformity of treatment we'll package them
# in an iterable of one element.
condition = '+{name}'.format(name=name)
options = [(name, condition in spec)]
else:
condition = '{name}={value}'
# "feature_values" is used to track values which correspond to
# features which can be enabled or disabled as understood by the
# package's build system. It excludes values which have special
# meanings and do not correspond to features (e.g. "none")
feature_values = getattr(
self.variants[name].values, 'feature_values', None
) or self.variants[name].values
options = [
(value, condition.format(name=name, value=value) in spec)
for value in feature_values
]
# For each allowed value in the list of values
for option_value, activated in options:
# Search for an override in the package for this value
override_name = '{0}_or_{1}_{2}'.format(
activation_word, deactivation_word, option_value
)
line_generator = getattr(self, override_name, None)
# If not available use a sensible default
if line_generator is None:
def _default_generator(is_activated):
if is_activated:
line = '--{0}-{1}'.format(
activation_word, option_value
)
if activation_value is not None and activation_value(option_value): # NOQA=ignore=E501
line += '={0}'.format(
activation_value(option_value)
)
return line
return '--{0}-{1}'.format(deactivation_word, option_value)
line_generator = _default_generator
args.append(line_generator(activated))
return args
def with_or_without(self, name, activation_value=None):
"""Inspects a variant and returns the arguments that activate
or deactivate the selected feature(s) for the configure options.
This function works on all type of variants. For bool-valued variants
it will return by default ``--with-{name}`` or ``--without-{name}``.
For other kinds of variants it will cycle over the allowed values and
return either ``--with-{value}`` or ``--without-{value}``.
If activation_value is given, then for each possible value of the
variant, the option ``--with-{value}=activation_value(value)`` or
``--without-{value}`` will be added depending on whether or not
``variant=value`` is in the spec.
Args:
name (str): name of a valid multi-valued variant
activation_value (callable): callable that accepts a single
value and returns the parameter to be used leading to an entry
of the type ``--with-{name}={parameter}``.
The special value 'prefix' can also be assigned and will return
``spec[name].prefix`` as activation parameter.
Returns:
list of arguments to configure
"""
return self._activate_or_not(name, 'with', 'without', activation_value)
def enable_or_disable(self, name, activation_value=None):
"""Same as :py:meth:`~.AutotoolsPackage.with_or_without` but substitute
``with`` with ``enable`` and ``without`` with ``disable``.
Args:
name (str): name of a valid multi-valued variant
activation_value (callable): if present accepts a single value
and returns the parameter to be used leading to an entry of the
type ``--enable-{name}={parameter}``
The special value 'prefix' can also be assigned and will return
``spec[name].prefix`` as activation parameter.
Returns:
list of arguments to configure
"""
return self._activate_or_not(
name, 'enable', 'disable', activation_value
)
run_after('install')(PackageBase._run_default_install_time_test_callbacks)
def installcheck(self):
"""Searches the Makefile for an ``installcheck`` target
and runs it if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute('installcheck')
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
| 42.683206
| 111
| 0.576589
|
e2f03b08d5e19a133ff13b690f92a06feb9657d6
| 3,792
|
py
|
Python
|
pytype/tests/test_protocols1.py
|
Jrryy/pytype
|
2d2855dc97d5ccee22ad233a83524616c17c44c9
|
[
"Apache-2.0"
] | 3,882
|
2015-03-22T12:17:15.000Z
|
2022-03-31T17:13:20.000Z
|
pytype/tests/test_protocols1.py
|
Jrryy/pytype
|
2d2855dc97d5ccee22ad233a83524616c17c44c9
|
[
"Apache-2.0"
] | 638
|
2015-11-03T06:34:44.000Z
|
2022-03-31T23:41:48.000Z
|
pytype/tests/test_protocols1.py
|
Jrryy/pytype
|
2d2855dc97d5ccee22ad233a83524616c17c44c9
|
[
"Apache-2.0"
] | 301
|
2015-08-14T10:21:17.000Z
|
2022-03-08T11:03:40.000Z
|
"""Tests for matching against protocols.
Based on PEP 544 https://www.python.org/dev/peps/pep-0544/.
"""
from pytype import file_utils
from pytype.pytd import pytd_utils
from pytype.tests import test_base
class ProtocolTest(test_base.BaseTest):
"""Tests for protocol implementation."""
def test_use_iterable(self):
ty = self.Infer("""
class A:
def __iter__(self):
return iter(__any_object__)
v = list(A())
""", deep=False)
self.assertTypesMatchPytd(ty, """
from typing import Any
class A:
def __iter__(self) -> Any: ...
v = ... # type: list
""")
def test_generic(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Generic, Protocol, TypeVar
T = TypeVar("T")
class Foo(Protocol[T]): ...
""")
self.Check("""
import foo
""", pythonpath=[d.path])
def test_generic_py(self):
ty = self.Infer("""
from typing import Protocol, TypeVar
T = TypeVar("T")
class Foo(Protocol[T]):
pass
""")
self.assertTypesMatchPytd(ty, """
from typing import Generic, Protocol, TypeVar
T = TypeVar("T")
class Foo(Protocol, Generic[T]): ...
""")
def test_generic_alias(self):
foo_ty = self.Infer("""
from typing import Protocol, TypeVar
T = TypeVar("T")
Foo = Protocol[T]
class Bar(Foo[T]):
pass
""")
self.assertTypesMatchPytd(foo_ty, """
from typing import Generic, Protocol, TypeVar
T = TypeVar("T")
Foo = Protocol[T]
class Bar(Protocol, Generic[T]): ...
""")
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(foo_ty))
ty = self.Infer("""
import foo
from typing import TypeVar
T = TypeVar('T')
class Baz(foo.Foo[T]):
pass
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
foo: module
from typing import Generic, Protocol, TypeVar
T = TypeVar('T')
class Baz(Protocol, Generic[T]): ...
""")
def test_self_referential_protocol(self):
# Some protocols use methods that return instances of the protocol, e.g.
# Iterator's __next__ returns Iterator. Make sure that doesn't crash pytype.
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Generic, TypeVar
_TElem = TypeVar("_TElem")
_TIter = TypeVar("_TIter", bound=Iter)
class Iter(Generic[_TElem]):
def __init__(self): ...
def next(self) -> _TElem: ...
def __next__(self) -> _TElem: ...
def __iter__(self) -> _TIter: ...
""")
self.Check("""
import foo
i = foo.Iter[int]()
next(i)
""", pythonpath=[d.path])
def test_attribute(self):
self.CheckWithErrors("""
from typing import Protocol
class Foo(Protocol):
x = 0
class Bar:
x = 1
class Baz:
x = '2'
def f(foo):
# type: (Foo) -> None
pass
f(Bar())
f(Baz()) # wrong-arg-types
""")
def test_pyi_protocol_in_typevar(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Generic, TypeVar
from typing_extensions import Protocol
T = TypeVar('T', bound=SupportsClose)
class SupportsClose(Protocol):
def close(self) -> object: ...
class Foo(Generic[T]):
def __init__(self, x: T) -> None: ...
""")
self.Check("""
import foo
class Bar:
def close(self) -> None:
pass
foo.Foo(Bar())
""", pythonpath=[d.path])
if __name__ == "__main__":
test_base.main()
| 26.333333
| 80
| 0.564082
|
cbe4e20aacdfcbc5fcbfc627d75fa3f0d4fff98e
| 886
|
py
|
Python
|
shopping_web/focus/forms.py
|
polegithub/shopping_web_python
|
364effc838d9bf39c060065cb8c982efaae2e34c
|
[
"MIT"
] | null | null | null |
shopping_web/focus/forms.py
|
polegithub/shopping_web_python
|
364effc838d9bf39c060065cb8c982efaae2e34c
|
[
"MIT"
] | null | null | null |
shopping_web/focus/forms.py
|
polegithub/shopping_web_python
|
364effc838d9bf39c060065cb8c982efaae2e34c
|
[
"MIT"
] | null | null | null |
from django import forms
class LoginForm(forms.Form):
uid = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control' ,'id':'uid', 'placeholder': 'Username'}))
pwd = forms.CharField(widget=forms.PasswordInput(attrs={'class':'form-control' ,'id':'pwd', 'placeholder': 'Password'}))
class RegisterForm(forms.Form):
username = forms.CharField(label='username', max_length=100,
widget=forms.TextInput(attrs={'id':'username', 'onblur': 'authentication()'}))
email = forms.EmailField()
password1 = forms.CharField(widget=forms.PasswordInput)
password2 = forms.CharField(widget=forms.PasswordInput)
class SetInfoForm(forms.Form):
username = forms.CharField()
class CommmentForm(forms.Form):
comment = forms.CharField(label='', widget=forms.Textarea(attrs={'cols': '60', 'rows': '6'}))
class SearchForm(forms.Form):
keyword = forms.CharField(widget=forms.TextInput)
| 40.272727
| 121
| 0.738149
|
4f5b35ccbba0317dd17acdd57ef6201da95c3384
| 798
|
py
|
Python
|
project/apps/profiles/models.py
|
iguanaus/FPG_Website_Backtest
|
d597dcd291344d7edf97c2db6fdc535d623d47fc
|
[
"Apache-2.0"
] | null | null | null |
project/apps/profiles/models.py
|
iguanaus/FPG_Website_Backtest
|
d597dcd291344d7edf97c2db6fdc535d623d47fc
|
[
"Apache-2.0"
] | 1
|
2019-01-05T14:32:40.000Z
|
2019-01-05T14:32:40.000Z
|
project/apps/profiles/models.py
|
iguanaus/FPG_Website_Backtest
|
d597dcd291344d7edf97c2db6fdc535d623d47fc
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractUser, UserManager
from core.utils import GenerateRandomFilename
image_dir = "profiles"
allowed_ext = ['png', 'jpeg', 'jpg', 'gif', 'bmp']
generate_random_filename = GenerateRandomFilename(image_dir, allowed_ext=allowed_ext)
class User(AbstractUser):
# common
profile_img = models.ImageField(upload_to=generate_random_filename, blank=True, null=True)
def __unicode__(self):
return self.username
@property
def profile_img_url(self):
if self.profile_img and self.profile_img.url:
return self.profile_img.url
return settings.MEDIA_URL + "%s/empty_user_profile_img.png" % (image_dir,)
| 28.5
| 94
| 0.746867
|
6a1c2528ee175d4729b6f619b790a4cb81158e20
| 899
|
py
|
Python
|
pi/ch_14_charlieplexing.py
|
qiang-cynthia/Electronics_Cookbook
|
05966b5194068249054471b08393e39a41eeab15
|
[
"MIT"
] | 42
|
2016-11-02T09:06:06.000Z
|
2022-03-20T08:26:57.000Z
|
pi/ch_14_charlieplexing.py
|
qiang-cynthia/Electronics_Cookbook
|
05966b5194068249054471b08393e39a41eeab15
|
[
"MIT"
] | null | null | null |
pi/ch_14_charlieplexing.py
|
qiang-cynthia/Electronics_Cookbook
|
05966b5194068249054471b08393e39a41eeab15
|
[
"MIT"
] | 14
|
2018-02-02T20:01:45.000Z
|
2021-07-14T11:00:37.000Z
|
import RPi.GPIO as GPIO
import thread, time
GPIO.setmode(GPIO.BCM)
pins = [18, 23, 24]
pin_led_states = [
[1, 0, -1], # LED1
[0, 1, -1], # LED2
[-1, 1, 0], # LED3
[-1, 0, 1], # LED4
[1, -1, 0], # LED5
[0, -1, 1] # LED6
]
led_states = [0, 0, 0, 0, 0, 0]
def set_pins(led):
for pin in range(0, 3):
if pin_led_states[led][pin] == -1:
GPIO.setup(pins[pin], GPIO.IN)
else:
GPIO.setup(pins[pin], GPIO.OUT)
GPIO.output(pins[pin], pin_led_states[led][pin])
def clear_pins():
for pin in range(0, 3):
GPIO.setup(pins[pin], GPIO.IN)
def refresh():
while True:
for led in range(0, 6):
clear_pins()
if led_states[led]:
set_pins(led)
else:
clear_pins()
time.sleep(0.001)
thread.start_new_thread(refresh, ())
while True:
x = int(raw_input("Pin (0 to 5) :"))
led_states[x] = not led_states[x]
| 19.977778
| 58
| 0.559511
|
da32cc7de2c0fd2a5f3f8c7da511e4a164fea9b1
| 2,172
|
py
|
Python
|
wye/regions/migrations/0001_initial.py
|
salmanulfarzy/wye
|
a52c15725f44688243c4b63ff7375553c7002d7b
|
[
"MIT"
] | 75
|
2015-08-27T04:16:17.000Z
|
2022-01-05T13:59:46.000Z
|
wye/regions/migrations/0001_initial.py
|
salmanulfarzy/wye
|
a52c15725f44688243c4b63ff7375553c7002d7b
|
[
"MIT"
] | 396
|
2015-09-13T04:50:58.000Z
|
2022-03-11T23:25:50.000Z
|
wye/regions/migrations/0001_initial.py
|
taranjeet/wye
|
ac4cc23d38cf2e72f87a0c1d26fff0316645c1ea
|
[
"MIT"
] | 112
|
2015-08-30T12:58:50.000Z
|
2021-01-31T17:02:31.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created At')),
('modified_at', models.DateTimeField(verbose_name='Last Modified At', auto_now=True)),
('name', models.CharField(max_length=300, unique=True)),
],
options={
'db_table': 'locations',
},
),
migrations.CreateModel(
name='RegionalLead',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('leads', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
('location', models.ForeignKey(to='regions.Location')),
],
options={
'db_table': 'regional_lead',
'verbose_name': 'RegionalLead',
'verbose_name_plural': 'RegionalLeads',
},
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created At')),
('modified_at', models.DateTimeField(verbose_name='Last Modified At', auto_now=True)),
('name', models.CharField(max_length=300, unique=True)),
],
options={
'db_table': 'states',
},
),
migrations.AddField(
model_name='location',
name='state',
field=models.ForeignKey(to='regions.State'),
),
]
| 37.448276
| 114
| 0.559392
|
6957eff5193da15fe8780511af41d2f58ce45f27
| 3,947
|
py
|
Python
|
keeper/v2api/builds.py
|
lsst-sqre/ltd-keeper
|
c658bcce726764e7416a8a386b418e83912b0f32
|
[
"Apache-2.0",
"MIT"
] | 5
|
2016-05-16T18:46:26.000Z
|
2019-07-08T15:16:41.000Z
|
keeper/v2api/builds.py
|
lsst-sqre/ltd-keeper
|
c658bcce726764e7416a8a386b418e83912b0f32
|
[
"Apache-2.0",
"MIT"
] | 46
|
2016-02-18T16:54:36.000Z
|
2022-03-25T19:43:45.000Z
|
keeper/v2api/builds.py
|
lsst-sqre/ltd-keeper
|
c658bcce726764e7416a8a386b418e83912b0f32
|
[
"Apache-2.0",
"MIT"
] | 4
|
2016-08-20T23:10:07.000Z
|
2022-03-25T19:52:09.000Z
|
"""Handers for v2 build endpoints."""
from __future__ import annotations
from typing import Dict, Tuple
from flask import request
from flask_accept import accept_fallback
from keeper.auth import token_auth
from keeper.logutils import log_route
from keeper.models import Build, Organization, Product, db
from keeper.services.createbuild import (
create_build,
create_presigned_post_urls,
)
from keeper.services.updatebuild import update_build
from keeper.taskrunner import launch_tasks
from keeper.v2api import v2api
from ._models import (
BuildPatchRequest,
BuildPostRequest,
BuildResponse,
BuildsResponse,
)
from ._urls import url_for_build
__all__ = ["get_builds", "get_build", "post_build", "patch_build"]
@v2api.route("/orgs/<org>/projects/<project>/builds", methods=["GET"])
@accept_fallback
@log_route()
@token_auth.login_required
def get_builds(org: str, project: str) -> str:
builds = (
Build.query.join(Product, Product.id == Build.product_id)
.join(Organization, Organization.id == Product.organization_id)
.filter(Organization.slug == org)
.filter(Product.slug == project)
.all()
)
response = BuildsResponse.from_builds(builds)
return response.json()
@v2api.route("/orgs/<org>/projects/<project>/builds/<id>", methods=["GET"])
@accept_fallback
@log_route()
@token_auth.login_required
def get_build(org: str, project: str, id: str) -> str:
build = (
Build.query.join(Product, Product.id == Build.product_id)
.join(Organization, Organization.id == Product.organization_id)
.filter(Organization.slug == org)
.filter(Product.slug == project)
.filter(Build.slug == id)
.first_or_404()
)
response = BuildResponse.from_build(build)
return response.json()
@v2api.route("/orgs/<org>/projects/<project>/builds", methods=["POST"])
@accept_fallback
@log_route()
@token_auth.login_required
def post_build(org: str, project: str) -> Tuple[str, int, Dict[str, str]]:
product = (
Product.query.join(
Organization, Organization.id == Product.organization_id
)
.filter(Organization.slug == org)
.filter(Product.slug == project)
.first_or_404()
)
request_data = BuildPostRequest.parse_obj(request.json)
try:
build, edition = create_build(
product=product,
git_ref=request_data.git_ref,
github_requester=None,
slug=request_data.slug,
)
except Exception:
db.session.rollback()
raise
presigned_prefix_urls, presigned_dir_urls = create_presigned_post_urls(
build=build, directories=request_data.directories
)
build_response = BuildResponse.from_build(
build,
post_prefix_urls=presigned_prefix_urls,
post_dir_urls=presigned_dir_urls,
)
build_url = url_for_build(build)
return build_response.json(), 201, {"Location": build_url}
@v2api.route("/orgs/<org>/projects/<project>/builds/<id>", methods=["PATCH"])
@accept_fallback
@log_route()
@token_auth.login_required
def patch_build(
org: str, project: str, id: str
) -> Tuple[str, int, Dict[str, str]]:
build = (
Build.query.join(Product, Product.id == Build.product_id)
.join(Organization, Organization.id == Product.organization_id)
.filter(Organization.slug == org)
.filter(Product.slug == project)
.filter(Build.slug == id)
.first_or_404()
)
request_data = BuildPatchRequest.parse_obj(request.json)
try:
build = update_build(build=build, uploaded=request_data.uploaded)
except Exception:
db.session.rollback()
# Run the task queue
task = launch_tasks()
build_url = url_for_build(build)
response = BuildResponse.from_build(build, task=task)
return (
response.json(),
202,
{"Location": build_url},
)
| 28.395683
| 77
| 0.67697
|
17265972fd7688b56dc5c749c7f733e5ba2f75d9
| 1,519
|
py
|
Python
|
Tools/compute_difference_percentiles.py
|
nhsx-mirror/riskscore-swift-public
|
95b2bbda68112f9f9d912c5faf2dccf9960bf82e
|
[
"MIT"
] | 1
|
2020-11-23T07:55:21.000Z
|
2020-11-23T07:55:21.000Z
|
Tools/compute_difference_percentiles.py
|
nhsx-mirror/riskscore-swift-public
|
95b2bbda68112f9f9d912c5faf2dccf9960bf82e
|
[
"MIT"
] | null | null | null |
Tools/compute_difference_percentiles.py
|
nhsx-mirror/riskscore-swift-public
|
95b2bbda68112f9f9d912c5faf2dccf9960bf82e
|
[
"MIT"
] | 1
|
2020-10-29T15:01:23.000Z
|
2020-10-29T15:01:23.000Z
|
#!/usr/bin/env python3
import subprocess
import numpy as np
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-s", "--scan-instances-dir", dest="scanInstancesDir",
help="The directory containing ScanInstances to compare", metavar="DIR_PATH", required=True)
parser.add_argument("-r", "--risk-score-data-file", dest="riskScoreDataFile",
help="The file containing the Python riskscore calculations", metavar="FILE_PATH", required=True)
parser.add_argument("outputFilePath", nargs='?', default=None, help="if present, store percentiles at <filepath>.csv and histogram at <filepath>.png, otherwise write percentiles to standard out")
args = parser.parse_args()
results = subprocess.run(["swift", "run", "--configuration", "release", "risk-score-checker", args.scanInstancesDir, args.riskScoreDataFile], capture_output=True, text=True)
differences = [ float(r.split(" ")[-1]) for r in results.stdout.strip().split("\n")[1:] ]
percentiles = [80, 85, 90, 95, 100]
percentileResults = zip(percentiles, np.percentile([abs(d) for d in differences], percentiles))
percentileString = "percentile value\n" + "\n".join(["{} {}".format(p, v) for p, v in percentileResults])
if args.outputFilePath is None:
print(percentileString)
else:
with open(args.outputFilePath + ".csv", "w", encoding="utf8") as f:
f.write(percentileString)
import matplotlib.pyplot as plt
plt.hist(differences)
plt.savefig(args.outputFilePath + ".png")
| 46.030303
| 195
| 0.712969
|
f4755714a3e31714bb3e96ad82ae05648a978ec7
| 22,613
|
py
|
Python
|
playbooks/library/repoquery.py
|
jstuever/oa-testing
|
943fcbf13cd48cec87ae95c491b88629be4ea57c
|
[
"Apache-2.0"
] | null | null | null |
playbooks/library/repoquery.py
|
jstuever/oa-testing
|
943fcbf13cd48cec87ae95c491b88629be4ea57c
|
[
"Apache-2.0"
] | null | null | null |
playbooks/library/repoquery.py
|
jstuever/oa-testing
|
943fcbf13cd48cec87ae95c491b88629be4ea57c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# pylint: disable=missing-docstring
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
# pylint: disable=wrong-import-order,wrong-import-position,unused-import
from __future__ import print_function # noqa: F401
import copy # noqa: F401
import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
except ImportError:
import yaml # noqa: F401
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/repoquery -*- -*- -*-
DOCUMENTATION = '''
---
module: repoquery
short_description: Query package information from Yum repositories
description:
- Query package information from Yum repositories.
options:
state:
description:
- The expected state. Currently only supports list.
required: false
default: list
choices: ["list"]
aliases: []
name:
description:
- The name of the package to query
required: true
default: None
aliases: []
query_type:
description:
- Narrows the packages queried based off of this value.
- If repos, it narrows the query to repositories defined on the machine.
- If installed, it narrows the query to only packages installed on the machine.
- If available, it narrows the query to packages that are available to be installed.
- If recent, it narrows the query to only recently edited packages.
- If updates, it narrows the query to only packages that are updates to existing installed packages.
- If extras, it narrows the query to packages that are not present in any of the available repositories.
- If all, it queries all of the above.
required: false
default: repos
aliases: []
verbose:
description:
- Shows more detail for the requested query.
required: false
default: false
aliases: []
show_duplicates:
description:
- Shows multiple versions of a package.
required: false
default: false
aliases: []
match_version:
description:
- Match the specific version given to the package.
required: false
default: None
aliases: []
author:
- "Matt Woodson <mwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
# Example 1: Get bash versions
- name: Get bash version
repoquery:
name: bash
show_duplicates: True
register: bash_out
# Results:
# ok: [localhost] => {
# "bash_out": {
# "changed": false,
# "results": {
# "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates bash",
# "package_found": true,
# "package_name": "bash",
# "returncode": 0,
# "versions": {
# "available_versions": [
# "4.2.45",
# "4.2.45",
# "4.2.45",
# "4.2.46",
# "4.2.46",
# "4.2.46",
# "4.2.46"
# ],
# "available_versions_full": [
# "4.2.45-5.el7",
# "4.2.45-5.el7_0.2",
# "4.2.45-5.el7_0.4",
# "4.2.46-12.el7",
# "4.2.46-19.el7",
# "4.2.46-20.el7_2",
# "4.2.46-21.el7_3"
# ],
# "latest": "4.2.46",
# "latest_full": "4.2.46-21.el7_3"
# }
# },
# "state": "present"
# }
# }
# Example 2: Get bash versions verbosely
- name: Get bash versions verbosely
repoquery:
name: bash
show_duplicates: True
verbose: True
register: bash_out
# Results:
# ok: [localhost] => {
# "bash_out": {
# "changed": false,
# "results": {
# "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates bash",
# "package_found": true,
# "package_name": "bash",
# "raw_versions": {
# "4.2.45-5.el7": {
# "arch": "x86_64",
# "release": "5.el7",
# "repo": "rhel-7-server-rpms",
# "version": "4.2.45",
# "version_release": "4.2.45-5.el7"
# },
# "4.2.45-5.el7_0.2": {
# "arch": "x86_64",
# "release": "5.el7_0.2",
# "repo": "rhel-7-server-rpms",
# "version": "4.2.45",
# "version_release": "4.2.45-5.el7_0.2"
# },
# "4.2.45-5.el7_0.4": {
# "arch": "x86_64",
# "release": "5.el7_0.4",
# "repo": "rhel-7-server-rpms",
# "version": "4.2.45",
# "version_release": "4.2.45-5.el7_0.4"
# },
# "4.2.46-12.el7": {
# "arch": "x86_64",
# "release": "12.el7",
# "repo": "rhel-7-server-rpms",
# "version": "4.2.46",
# "version_release": "4.2.46-12.el7"
# },
# "4.2.46-19.el7": {
# "arch": "x86_64",
# "release": "19.el7",
# "repo": "rhel-7-server-rpms",
# "version": "4.2.46",
# "version_release": "4.2.46-19.el7"
# },
# "4.2.46-20.el7_2": {
# "arch": "x86_64",
# "release": "20.el7_2",
# "repo": "rhel-7-server-rpms",
# "version": "4.2.46",
# "version_release": "4.2.46-20.el7_2"
# },
# "4.2.46-21.el7_3": {
# "arch": "x86_64",
# "release": "21.el7_3",
# "repo": "rhel-7-server-rpms",
# "version": "4.2.46",
# "version_release": "4.2.46-21.el7_3"
# }
# },
# "results": "4.2.45|5.el7|x86_64|rhel-7-server-rpms|4.2.45-5.el7\n4.2.45|5.el7_0.2|x86_64|rhel-7-server-rpms|4.2.45-5.el7_0.2\n4.2.45|5.el7_0.4|x86_64|rhel-7-server-rpms|4.2.45-5.el7_0.4\n4.2.46|12.el7|x86_64|rhel-7-server-rpms|4.2.46-12.el7\n4.2.46|19.el7|x86_64|rhel-7-server-rpms|4.2.46-19.el7\n4.2.46|20.el7_2|x86_64|rhel-7-server-rpms|4.2.46-20.el7_2\n4.2.46|21.el7_3|x86_64|rhel-7-server-rpms|4.2.46-21.el7_3\n",
# "returncode": 0,
# "versions": {
# "available_versions": [
# "4.2.45",
# "4.2.45",
# "4.2.45",
# "4.2.46",
# "4.2.46",
# "4.2.46",
# "4.2.46"
# ],
# "available_versions_full": [
# "4.2.45-5.el7",
# "4.2.45-5.el7_0.2",
# "4.2.45-5.el7_0.4",
# "4.2.46-12.el7",
# "4.2.46-19.el7",
# "4.2.46-20.el7_2",
# "4.2.46-21.el7_3"
# ],
# "latest": "4.2.46",
# "latest_full": "4.2.46-21.el7_3"
# }
# },
# "state": "present"
# }
# }
# Example 3: Match a specific version
- name: matched versions repoquery test
repoquery:
name: atomic-openshift
show_duplicates: True
match_version: 3.3
register: openshift_out
# Result:
# ok: [localhost] => {
# "openshift_out": {
# "changed": false,
# "results": {
# "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates atomic-openshift",
# "package_found": true,
# "package_name": "atomic-openshift",
# "returncode": 0,
# "versions": {
# "available_versions": [
# "3.2.0.43",
# "3.2.1.23",
# "3.3.0.32",
# "3.3.0.34",
# "3.3.0.35",
# "3.3.1.3",
# "3.3.1.4",
# "3.3.1.5",
# "3.3.1.7",
# "3.4.0.39"
# ],
# "available_versions_full": [
# "3.2.0.43-1.git.0.672599f.el7",
# "3.2.1.23-1.git.0.88a7a1d.el7",
# "3.3.0.32-1.git.0.37bd7ea.el7",
# "3.3.0.34-1.git.0.83f306f.el7",
# "3.3.0.35-1.git.0.d7bd9b6.el7",
# "3.3.1.3-1.git.0.86dc49a.el7",
# "3.3.1.4-1.git.0.7c8657c.el7",
# "3.3.1.5-1.git.0.62700af.el7",
# "3.3.1.7-1.git.0.0988966.el7",
# "3.4.0.39-1.git.0.5f32f06.el7"
# ],
# "latest": "3.4.0.39",
# "latest_full": "3.4.0.39-1.git.0.5f32f06.el7",
# "matched_version_found": true,
# "matched_version_full_latest": "3.3.1.7-1.git.0.0988966.el7",
# "matched_version_latest": "3.3.1.7",
# "matched_versions": [
# "3.3.0.32",
# "3.3.0.34",
# "3.3.0.35",
# "3.3.1.3",
# "3.3.1.4",
# "3.3.1.5",
# "3.3.1.7"
# ],
# "matched_versions_full": [
# "3.3.0.32-1.git.0.37bd7ea.el7",
# "3.3.0.34-1.git.0.83f306f.el7",
# "3.3.0.35-1.git.0.d7bd9b6.el7",
# "3.3.1.3-1.git.0.86dc49a.el7",
# "3.3.1.4-1.git.0.7c8657c.el7",
# "3.3.1.5-1.git.0.62700af.el7",
# "3.3.1.7-1.git.0.0988966.el7"
# ],
# "requested_match_version": "3.3"
# }
# },
# "state": "present"
# }
# }
'''
# -*- -*- -*- End included fragment: doc/repoquery -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/repoquery.py -*- -*- -*-
'''
class that wraps the repoquery commands in a subprocess
'''
# pylint: disable=too-many-lines,wrong-import-position,wrong-import-order
from collections import defaultdict # noqa: E402
# pylint: disable=no-name-in-module,import-error
# Reason: pylint errors with "No name 'version' in module 'distutils'".
# This is a bug: https://github.com/PyCQA/pylint/issues/73
from distutils.version import LooseVersion # noqa: E402
import subprocess # noqa: E402
class RepoqueryCLIError(Exception):
'''Exception class for repoquerycli'''
pass
def _run(cmds):
''' Actually executes the command. This makes mocking easier. '''
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
return proc.returncode, stdout, stderr
# pylint: disable=too-few-public-methods
class RepoqueryCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
verbose=False):
''' Constructor for RepoqueryCLI '''
self.verbose = verbose
self.verbose = True
def _repoquery_cmd(self, cmd, output=False, output_type='json'):
'''Base command for repoquery '''
cmds = ['/usr/bin/repoquery', '--plugins', '--quiet']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
returncode, stdout, stderr = _run(cmds)
rval = {
"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if returncode == 0:
if output:
if output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print(stdout)
print(stderr)
if err:
rval.update({
"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({
"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
# -*- -*- -*- End included fragment: lib/repoquery.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/repoquery.py -*- -*- -*-
class Repoquery(RepoqueryCLI):
''' Class to wrap the repoquery
'''
# pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(self, name, query_type, show_duplicates,
match_version, ignore_excluders, verbose):
''' Constructor for YumList '''
super(Repoquery, self).__init__(None)
self.name = name
self.query_type = query_type
self.show_duplicates = show_duplicates
self.match_version = match_version
self.ignore_excluders = ignore_excluders
self.verbose = verbose
if self.match_version:
self.show_duplicates = True
self.query_format = "%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}"
self.tmp_file = None
def build_cmd(self):
''' build the repoquery cmd options '''
repo_cmd = []
repo_cmd.append("--pkgnarrow=" + self.query_type)
repo_cmd.append("--queryformat=" + self.query_format)
if self.show_duplicates:
repo_cmd.append('--show-duplicates')
if self.ignore_excluders:
repo_cmd.append('--config=' + self.tmp_file.name)
repo_cmd.append(self.name)
return repo_cmd
@staticmethod
def process_versions(query_output):
''' format the package data into something that can be presented '''
version_dict = defaultdict(dict)
for version in query_output.decode().split('\n'):
pkg_info = version.split("|")
pkg_version = {}
pkg_version['version'] = pkg_info[0]
pkg_version['release'] = pkg_info[1]
pkg_version['arch'] = pkg_info[2]
pkg_version['repo'] = pkg_info[3]
pkg_version['version_release'] = pkg_info[4]
version_dict[pkg_info[4]] = pkg_version
return version_dict
def format_versions(self, formatted_versions):
''' Gather and present the versions of each package '''
versions_dict = {}
versions_dict['available_versions_full'] = list(formatted_versions.keys())
# set the match version, if called
if self.match_version:
versions_dict['matched_versions_full'] = []
versions_dict['requested_match_version'] = self.match_version
versions_dict['matched_versions'] = []
# get the "full version (version - release)
versions_dict['available_versions_full'].sort(key=LooseVersion)
versions_dict['latest_full'] = versions_dict['available_versions_full'][-1]
# get the "short version (version)
versions_dict['available_versions'] = []
for version in versions_dict['available_versions_full']:
versions_dict['available_versions'].append(formatted_versions[version]['version'])
if self.match_version:
if version.startswith(self.match_version):
versions_dict['matched_versions_full'].append(version)
versions_dict['matched_versions'].append(formatted_versions[version]['version'])
versions_dict['available_versions'].sort(key=LooseVersion)
versions_dict['latest'] = versions_dict['available_versions'][-1]
# finish up the matched version
if self.match_version:
if versions_dict['matched_versions_full']:
versions_dict['matched_version_found'] = True
versions_dict['matched_versions'].sort(key=LooseVersion)
versions_dict['matched_version_latest'] = versions_dict['matched_versions'][-1]
versions_dict['matched_version_full_latest'] = versions_dict['matched_versions_full'][-1]
else:
versions_dict['matched_version_found'] = False
versions_dict['matched_versions'] = []
versions_dict['matched_version_latest'] = ""
versions_dict['matched_version_full_latest'] = ""
return versions_dict
def repoquery(self):
'''perform a repoquery '''
if self.ignore_excluders:
# Duplicate yum.conf and reset exclude= line to an empty string
# to clear a list of all excluded packages
self.tmp_file = tempfile.NamedTemporaryFile()
with open("/etc/yum.conf", "r") as file_handler:
yum_conf_lines = file_handler.readlines()
yum_conf_lines = ["exclude=" if l.startswith("exclude=") else l for l in yum_conf_lines]
with open(self.tmp_file.name, "w") as file_handler:
file_handler.writelines(yum_conf_lines)
file_handler.flush()
repoquery_cmd = self.build_cmd()
rval = self._repoquery_cmd(repoquery_cmd, True, 'raw')
# check to see if there are actual results
if rval['results']:
processed_versions = Repoquery.process_versions(rval['results'].strip())
formatted_versions = self.format_versions(processed_versions)
rval['package_found'] = True
rval['versions'] = formatted_versions
rval['package_name'] = self.name
if self.verbose:
rval['raw_versions'] = processed_versions
else:
del rval['results']
# No packages found
else:
rval['package_found'] = False
if self.ignore_excluders:
self.tmp_file.close()
return rval
@staticmethod
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
repoquery = Repoquery(
params['name'],
params['query_type'],
params['show_duplicates'],
params['match_version'],
params['ignore_excluders'],
params['verbose'],
)
state = params['state']
if state == 'list':
results = repoquery.repoquery()
if results['returncode'] != 0:
return {'failed': True,
'msg': results}
return {'changed': False, 'module_results': results, 'state': 'list', 'check_mode': check_mode}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/repoquery.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/repoquery.py -*- -*- -*-
def main():
'''
ansible repoquery module
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='list', type='str', choices=['list']),
name=dict(default=None, required=True, type='str'),
query_type=dict(default='repos', required=False, type='str',
choices=[
'installed', 'available', 'recent',
'updates', 'extras', 'all', 'repos'
]),
verbose=dict(default=False, required=False, type='bool'),
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
ignore_excluders=dict(default=False, required=False, type='bool'),
retries=dict(default=4, required=False, type='int'),
retry_interval=dict(default=5, required=False, type='int'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
)
tries = 1
while True:
rval = Repoquery.run_ansible(module.params, module.check_mode)
if 'failed' not in rval:
module.exit_json(**rval)
elif tries > module.params['retries']:
module.fail_json(**rval)
tries += 1
time.sleep(module.params['retry_interval'])
if __name__ == "__main__":
main()
# -*- -*- -*- End included fragment: ansible/repoquery.py -*- -*- -*-
| 35.113354
| 434
| 0.499049
|
09941d0c2502be1b7585470863a26458c59d0322
| 25,067
|
py
|
Python
|
blind_walking/envs/sensors/environment_sensors.py
|
mcx-lab/rl-baselines3-zoo
|
f89938be3b4d9769d9562e7b3d6d1342461adc5c
|
[
"MIT"
] | null | null | null |
blind_walking/envs/sensors/environment_sensors.py
|
mcx-lab/rl-baselines3-zoo
|
f89938be3b4d9769d9562e7b3d6d1342461adc5c
|
[
"MIT"
] | 19
|
2021-08-23T03:21:10.000Z
|
2021-11-26T07:36:42.000Z
|
blind_walking/envs/sensors/environment_sensors.py
|
mcx-lab/rl-baselines3-zoo
|
f89938be3b4d9769d9562e7b3d6d1342461adc5c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple sensors related to the environment."""
import csv
import typing
import numpy as np
from blind_walking.envs.sensors import sensor
from numpy.lib.function_base import _angle_dispatcher
_ARRAY = typing.Iterable[float] # pylint:disable=invalid-name
_FLOAT_OR_ARRAY = typing.Union[float, _ARRAY] # pylint:disable=invalid-name
_DATATYPE_LIST = typing.Iterable[typing.Any] # pylint:disable=invalid-name
class LastActionSensor(sensor.BoxSpaceSensor):
"""A sensor that reports the last action taken."""
def __init__(
self,
num_actions: int,
lower_bound: _FLOAT_OR_ARRAY = -np.pi,
upper_bound: _FLOAT_OR_ARRAY = np.pi,
name: typing.Text = "LastAction",
enc_name: typing.Text = "flatten",
dtype: typing.Type[typing.Any] = np.float64,
) -> None:
"""Constructs LastActionSensor.
Args:
num_actions: the number of actions to read
lower_bound: the lower bound of the actions
upper_bound: the upper bound of the actions
name: the name of the sensor
dtype: data type of sensor value
"""
self._num_actions = num_actions
self._env = None
super(LastActionSensor, self).__init__(
name=name,
shape=(self._num_actions,),
enc_name=enc_name,
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype,
)
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
def _get_observation(self) -> _ARRAY:
"""Returns the last action of the environment."""
return self._env.last_action
class ControllerKpSensor(sensor.BoxSpaceSensor):
"""
A sensor that reports the Kp coefficients
used in the PD controller that converts angles to torques
"""
def __init__(
self,
num_motors: int,
lower_bound: _FLOAT_OR_ARRAY = 0,
upper_bound: _FLOAT_OR_ARRAY = 100,
name: typing.Text = "ControllerKp",
enc_name: typing.Text = "flatten",
dtype: typing.Type[typing.Any] = np.float64,
) -> None:
"""Constructs ControllerKpSensor.
Args:
lower_bound: the lower bound of the gains
upper_bound: the upper bound of the gains
name: the name of the sensor
dtype: data type of sensor value
"""
self._env = None
super(ControllerKpSensor, self).__init__(
name=name,
shape=(num_motors,),
enc_name=enc_name,
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype,
)
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
def _get_observation(self) -> _ARRAY:
"""Returns the Kp coefficients."""
return self._env.robot.GetMotorPositionGains()
class ControllerKdSensor(sensor.BoxSpaceSensor):
"""
A sensor that reports the Kd coefficients
used in the PD controller that converts angles to torques
"""
def __init__(
self,
num_motors: int,
lower_bound: _FLOAT_OR_ARRAY = 0.0,
upper_bound: _FLOAT_OR_ARRAY = 2.0,
name: typing.Text = "ControllerKd",
enc_name: typing.Text = "flatten",
dtype: typing.Type[typing.Any] = np.float64,
) -> None:
"""Constructs ControllerKdSensor.
Args:
lower_bound: the lower bound of the gain
upper_bound: the upper bound of the gain
name: the name of the sensor
dtype: data type of sensor value
"""
self._env = None
super(ControllerKdSensor, self).__init__(
name=name,
shape=(num_motors,),
enc_name=enc_name,
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype,
)
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
def _get_observation(self) -> _ARRAY:
"""Returns the Kd coefficients."""
return self._env._robot.GetMotorVelocityGains()
class MotorStrengthSensor(sensor.BoxSpaceSensor):
"""
A sensor that reports the relative motor strength for each joint
"""
def __init__(
self,
num_motors: int,
lower_bound: _FLOAT_OR_ARRAY = 0.0,
upper_bound: _FLOAT_OR_ARRAY = 1.5,
name: typing.Text = "MotorStrength",
enc_name: typing.Text = "flatten",
dtype: typing.Type[typing.Any] = np.float64,
) -> None:
"""Constructs MotorStrengthSensor.
Args:
lower_bound: the lower bound of the gains
upper_bound: the upper bound of the gains
name: the name of the sensor
dtype: data type of sensor value
"""
self._env = None
super(MotorStrengthSensor, self).__init__(
name=name,
shape=(num_motors,),
enc_name=enc_name,
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype,
)
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
def _get_observation(self) -> _ARRAY:
"""Returns the relative motor strength (1 = full strength)."""
return self._env._robot.GetMotorStrengthRatios()
class FootFrictionSensor(sensor.BoxSpaceSensor):
def __init__(
self,
num_legs: int = 4,
lower_bound: _FLOAT_OR_ARRAY = 0.0,
upper_bound: _FLOAT_OR_ARRAY = 5.0,
name: typing.Text = "FootFriction",
enc_name: typing.Text = "flatten",
dtype: typing.Type[typing.Any] = np.float64,
) -> None:
"""Constructs FootFrictionSensor.
Args:
lower_bound: the lower bound of the foot friction
upper_bound: the upper bound of the foot friction
name: the name of the sensor
dtype: data type of sensor value
"""
self._env = None
super(FootFrictionSensor, self).__init__(
name=name,
shape=(num_legs,),
enc_name=enc_name,
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype,
)
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
def _get_observation(self) -> _ARRAY:
"""Returns the friction for each foot."""
return self._env._robot.GetFootFriction()
class TargetPositionSensor(sensor.BoxSpaceSensor):
"""A sensor that reports the relative target position."""
def __init__(
self,
max_distance: float = 0.022,
lower_bound: _FLOAT_OR_ARRAY = -1.0,
upper_bound: _FLOAT_OR_ARRAY = 1.0,
name: typing.Text = "TargetPosition",
enc_name: typing.Text = "flatten",
dtype: typing.Type[typing.Any] = np.float64,
) -> None:
"""Constructs TargetPositionSensor.
Args:
lower_bound: the lower bound of the target position
upper_bound: the upper bound of the target position
name: the name of the sensor
dtype: data type of sensor value
"""
self._env = None
# Get data from file
filepath = "blind_walking/envs/tasks/target_positions.csv"
with open(filepath, newline="") as f:
reader = csv.reader(f, delimiter=",", quoting=csv.QUOTE_NONNUMERIC)
self._data = list(reader)
super(TargetPositionSensor, self).__init__(
name=name,
shape=(2,),
enc_name=enc_name,
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype,
)
self._max_distance = max_distance
self._distance = self._max_distance
self._last_base_pos = np.zeros(3)
self._current_base_pos = np.zeros(3)
self._last_yaw = 0
self._current_yaw = 0
def on_step(self, env):
self._last_base_pos = self._current_base_pos
self._current_base_pos = self._env._robot.GetBasePosition()
self._last_yaw = self._current_yaw
self._current_yaw = self._env._robot.GetTrueBaseRollPitchYaw()[2]
# # Hardcoded, for better training of speed change
# speed_timestep_signals = [1900, 1600, 1300, 1000]
# target_speeds = [0.0, 0.014, 0.016, 0.018]
# for i, t in enumerate(speed_timestep_signals):
# if env._env_step_counter > t:
# self._distance = target_speeds[i]
# break
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
self._distance = self._max_distance
self._current_base_pos = self._env._robot.GetBasePosition()
self._last_base_pos = self._current_base_pos
self._current_yaw = self._env._robot.GetTrueBaseRollPitchYaw()[2]
self._last_yaw = self._current_yaw
def _get_observation(self) -> _ARRAY:
target_pos = self._data[self._env._env_step_counter]
dx_target = target_pos[0] - self._current_base_pos[0]
dy_target = target_pos[1] - self._current_base_pos[1]
# Transform to local frame
dx_target_local, dy_target_local = self.to_local_frame(dx_target, dy_target, self._current_yaw)
target_distance = np.linalg.norm([dx_target_local, dy_target_local])
# If target is too far, scale down to maximum possible
if target_distance and abs(target_distance) > self._distance:
scale_ratio = self._distance / target_distance
dx_target_local = dx_target_local * scale_ratio
dy_target_local = dy_target_local * scale_ratio
return [dx_target_local, dy_target_local]
@staticmethod
def to_local_frame(dx, dy, yaw):
# Transform the x and y direction distances to the robot's local frame
dx_local = np.cos(yaw) * dx + np.sin(yaw) * dy
dy_local = -np.sin(yaw) * dx + np.cos(yaw) * dy
return dx_local, dy_local
class ForwardTargetPositionSensor(sensor.BoxSpaceSensor):
"""A sensor that reports the relative target position."""
def __init__(
self,
max_distance: float = 0.022,
lower_bound: _FLOAT_OR_ARRAY = -1.0,
upper_bound: _FLOAT_OR_ARRAY = 1.0,
name: typing.Text = "TargetPosition",
enc_name: typing.Text = "flatten",
dtype: typing.Type[typing.Any] = np.float64,
) -> None:
"""Constructs ForwardTargetPositionSensor.
Args:
lower_bound: the lower bound of the target position
upper_bound: the upper bound of the target position
name: the name of the sensor
dtype: data type of sensor value
"""
self._env = None
super(ForwardTargetPositionSensor, self).__init__(
name=name,
shape=(2,),
enc_name=enc_name,
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype,
)
self._max_distance = max_distance
self._last_base_pos = np.zeros(3)
self._current_base_pos = np.zeros(3)
self._last_yaw = 0
self._current_yaw = 0
def on_step(self, env):
self._last_base_pos = self._current_base_pos
self._current_base_pos = self._env._robot.GetBasePosition()
self._last_yaw = self._current_yaw
self._current_yaw = self._env._robot.GetTrueBaseRollPitchYaw()[2]
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
self._current_base_pos = self._env._robot.GetBasePosition()
self._last_base_pos = self._current_base_pos
self._current_yaw = self._env._robot.GetTrueBaseRollPitchYaw()[2]
self._last_yaw = self._current_yaw
def _get_observation(self) -> _ARRAY:
# target y position is always zero
dy_target = 0 - self._current_base_pos[1]
# give some leeway for the robot to walk forward
dy_target = max(min(dy_target, self._max_distance / 2), -self._max_distance / 2)
# target x position is always forward
dx_target = np.sqrt(pow(self._max_distance, 2) - pow(dy_target, 2))
# Transform to local frame
dx_target_local, dy_target_local = self.to_local_frame(dx_target, dy_target, self._current_yaw)
return [dx_target_local, dy_target_local]
@staticmethod
def to_local_frame(dx, dy, yaw):
# Transform the x and y direction distances to the robot's local frame
dx_local = np.cos(yaw) * dx + np.sin(yaw) * dy
dy_local = -np.sin(yaw) * dx + np.cos(yaw) * dy
return dx_local, dy_local
class LocalDistancesToGroundSensor(sensor.BoxSpaceSensor):
"""A sensor that detects the local terrain height around the robot"""
def __init__(
self,
grid_unit: float = 0.05,
grid_size: int = 16,
lower_bound: _FLOAT_OR_ARRAY = -100,
upper_bound: _FLOAT_OR_ARRAY = 100,
name: typing.Text = "LocalDistancesToGround",
enc_name: typing.Text = "flatten",
dtype: typing.Type[typing.Any] = np.float64,
) -> None:
"""Constructs LocalDistancesToGroundSensor.
Args:
grid_unit: Side length of one square in the grid
grid_size: Number of squares along one side of grid
lower_bound: the lower bound of the distance to ground.
upper_bound: the upper bound of the distance to ground.
name: the name of the sensor.
dtype: data type of sensor value.
"""
self._env = None
self.grid_unit = grid_unit
self.grid_size = grid_size
super(LocalDistancesToGroundSensor, self).__init__(
name=name,
shape=(grid_size ** 2,),
enc_name=enc_name,
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype,
)
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
def _get_observation(self) -> _ARRAY:
"""Returns the local distances to ground"""
return self._env.robot.GetLocalDistancesToGround(grid_unit=self.grid_unit, grid_size=self.grid_size).reshape(-1)
class LocalTerrainViewSensor(sensor.BoxSpaceSensor):
"""A sensor that gets a view of the local terrain around the robot"""
def __init__(
self,
grid_unit: float = 0.1,
grid_size: typing.Tuple[int] = (10, 10),
transform: typing.Tuple[float] = (0, 0),
eachfoot: bool = False,
lower_bound: _FLOAT_OR_ARRAY = 0,
upper_bound: _FLOAT_OR_ARRAY = 1,
name: typing.Text = "LocalTerrainView",
enc_name: typing.Text = "flatten",
dtype: typing.Type[typing.Any] = np.float64,
) -> None:
"""Constructs LocalTerrainViewSensor.
Args:
grid_unit: Side length of one square in the grid
grid_size: Number of squares along one side of grid
lower_bound: the lower bound of the terrain view.
upper_bound: the upper bound of the terrain view.
name: the name of the sensor.
dtype: data type of sensor value.
"""
self._env = None
self.grid_unit = grid_unit
self.grid_size = grid_size
self.transform = transform
self.eachfoot = eachfoot
shape = (grid_size[0] * grid_size[1] * 4,) if self.eachfoot else (1, grid_size[0], grid_size[1])
super(LocalTerrainViewSensor, self).__init__(
name=name,
shape=shape,
enc_name=enc_name,
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype,
)
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
def _get_observation(self) -> _ARRAY:
"""Returns the local distances to ground"""
if self.eachfoot:
foot_positions = self._env.robot.GetFootPositionsInBaseFrame()
heightmap = []
for foot_pos in foot_positions:
transform = np.array(self.transform) + np.array(foot_pos[:2])
local_heightmap = self._env.robot.GetLocalTerrainView(
grid_unit=self.grid_unit, grid_size=self.grid_size, transform=transform
)
heightmap = np.concatenate((heightmap, local_heightmap), axis=None)
return heightmap
else:
return self._env.robot.GetLocalTerrainView(
grid_unit=self.grid_unit, grid_size=self.grid_size, transform=self.transform
).reshape(1, self.grid_size[0], self.grid_size[1])
class LocalTerrainDepthSensor(sensor.BoxSpaceSensor):
"""A sensor that gets the depth from the robot to the ground"""
def __init__(
self,
noisy_reading: bool = True,
grid_unit: typing.Tuple[float] = (0.1, 0.1),
grid_size: typing.Tuple[int] = (10, 10),
transform: typing.Tuple[float] = (0, 0),
ray_origin: typing.Text = "body",
lower_bound: _FLOAT_OR_ARRAY = 0.0,
upper_bound: _FLOAT_OR_ARRAY = 8.0,
name: typing.Text = "LocalTerrainDepth",
enc_name: typing.Text = "flatten",
dtype: typing.Type[typing.Any] = np.float64,
) -> None:
"""Constructs LocalTerrainDepthSensor.
Args:
grid_unit: Side length of one square in the grid
grid_size: Number of squares along one side of grid
lower_bound: the lower bound of the terrain view.
upper_bound: the upper bound of the terrain view.
name: the name of the sensor.
dtype: data type of sensor value.
"""
self._env = None
self._noisy_reading = noisy_reading
self.grid_unit = grid_unit
self.grid_size = grid_size
self.transform = transform
self.ray_origin = ray_origin
shape = (1, grid_size[0], grid_size[1])
super(LocalTerrainDepthSensor, self).__init__(
name=name,
shape=shape,
enc_name=enc_name,
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype,
)
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
def _get_observation(self) -> _ARRAY:
"""Returns the local distances to ground"""
heightmap = self._env.robot.GetLocalTerrainDepth(
grid_unit=self.grid_unit,
grid_size=self.grid_size,
transform=self.transform,
ray_origin=self.ray_origin,
).reshape(1, self.grid_size[0], self.grid_size[1])
# Add noise
if self._noisy_reading:
heightmap = heightmap + np.random.normal(scale=0.01, size=heightmap.shape)
# Clip readings
heightmap = np.minimum(np.maximum(heightmap, 0.1), 8.0)
return heightmap
class LocalTerrainDepthByAngleSensor(sensor.BoxSpaceSensor):
"""A sensor that gets the depth from the robot to the ground"""
def __init__(
self,
noisy_reading: bool = True,
grid_angle: typing.Tuple[float] = (0.1, 0.1),
grid_size: typing.Tuple[int] = (10, 10),
transform_angle: typing.Tuple[float] = (0, 0),
ray_origin: typing.Text = "body",
lower_bound: _FLOAT_OR_ARRAY = 0.0,
upper_bound: _FLOAT_OR_ARRAY = 8.0,
name: typing.Text = "LocalTerrainDepthByAngle",
enc_name: typing.Text = "flatten",
dtype: typing.Type[typing.Any] = np.float64,
) -> None:
"""Constructs LocalTerrainDepthByAngleSensor.
Args:
grid_unit: Side length of one square in the grid
grid_size: Number of squares along one side of grid
lower_bound: the lower bound of the terrain view.
upper_bound: the upper bound of the terrain view.
name: the name of the sensor.
dtype: data type of sensor value.
"""
self._env = None
self._noisy_reading = noisy_reading
self.grid_angle = grid_angle
self.grid_size = grid_size
self.transform_angle = transform_angle
self.ray_origin = ray_origin
shape = (1, grid_size[0], grid_size[1])
super(LocalTerrainDepthByAngleSensor, self).__init__(
name=name,
shape=shape,
enc_name=enc_name,
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype,
)
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
def _get_observation(self) -> _ARRAY:
"""Returns the local distances to ground"""
heightmap = self._env.robot.GetLocalTerrainDepthByAngle(
grid_angle=self.grid_angle,
grid_size=self.grid_size,
transform_angle=self.transform_angle,
ray_origin=self.ray_origin,
).reshape(1, self.grid_size[0], self.grid_size[1])
# Add noise
if self._noisy_reading:
heightmap = heightmap + np.random.normal(scale=0.01, size=heightmap.shape)
# Clip readings
heightmap = np.minimum(np.maximum(heightmap, 0.1), 8.0)
return heightmap
class PhaseSensor(sensor.BoxSpaceSensor):
"""
A sensor that returns a 2D unit vector corresponding to a point in a gait cycle
"""
def __init__(
self,
init_angle: float = 0,
frequency: float = 1.0, # Hertz
lower_bound: _FLOAT_OR_ARRAY = -1.0,
upper_bound: _FLOAT_OR_ARRAY = 1.0,
name: typing.Text = "Phase",
enc_name: typing.Text = "flatten",
dtype: typing.Type[typing.Any] = np.float64,
) -> None:
"""Constructs PhaseSensor.
Args:
init_phase: Initial phase angle at env_time_step = 0
frequency: Number of cycles per second
"""
self._env = None
self.init_angle = init_angle
self.frequency = frequency
super(PhaseSensor, self).__init__(
name=name,
shape=(2,),
enc_name=enc_name,
lower_bound=lower_bound,
upper_bound=upper_bound,
dtype=dtype,
)
def on_reset(self, env):
"""From the callback, the sensor remembers the environment.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
@property
def cycle_delta(self):
"""Return the fraction of a cycle traversed after 1 time step"""
return self.frequency * self._env.env_time_step
@staticmethod
def angle_to_vector(angle):
"""Convert a 1D angle into the corresponding 2D unit vector"""
return np.array([np.cos(angle), np.sin(angle)])
def _get_observation(self) -> _ARRAY:
"""Returns the current phase value"""
cycle = self._env.env_step_counter * self.cycle_delta
# Get the angle corresponding to the cycle
angle = cycle * 2 * np.pi + self.init_angle
return self.angle_to_vector(angle)
| 34.8637
| 120
| 0.617744
|
ab4e2b73c071f0e59c01ef8c8a28b731a9a0a4f3
| 10,906
|
py
|
Python
|
SurfaceBrightness/SBCalc_MERGED.py
|
crhea93/AstronomyTools
|
b63a6902b36d992ea46e7889f8dd0b59124f4361
|
[
"MIT"
] | 8
|
2020-01-08T16:39:06.000Z
|
2021-12-19T01:55:04.000Z
|
SurfaceBrightness/SBCalc_MERGED.py
|
crhea93/AstronomyTools
|
b63a6902b36d992ea46e7889f8dd0b59124f4361
|
[
"MIT"
] | 13
|
2018-09-21T17:17:03.000Z
|
2022-03-11T23:38:09.000Z
|
SurfaceBrightness/SBCalc_MERGED.py
|
crhea93/AstronomyTools
|
b63a6902b36d992ea46e7889f8dd0b59124f4361
|
[
"MIT"
] | null | null | null |
'''
Calculate Surface Brightness from Scratch for MERGED Images
This involves created merged folders for each region and energy range
PLEASE RUN SPECEXTRACT ON EACH OBSERVATION FOR EACH REGION BEFORE RUNNING
INPUTS:
chandra_dir -- full path to data directory (e.g. '/home/user/Documents/Data')
evt_file -- name of event file without extension (e.g. 'acisf#####_repro_evt2')
Also used to calculate on the fly exposure map
energy_range -- energy range in electron volts (e.g. '500:2000')
region -- name of region file of interest without .reg extension (e.g. 'simple')
background -- name of background region file without .reg extension (e.g. 'simple_background')
confidence -- confidence level (e.g. 0.9)
exposure -- Boolean determining method to calculate Net Energy Flux. See
Documentation for more information. (e.g. True)
OUTPUTS:
.par file containing aprates solutions meaning all counts/rates/flux info (e.g. aprates+region.par)
'''
import os
from shutil import copyfile
from astropy.io import fits
from ciao_contrib.runtool import *
#------------------INPUTS------------------------------------------------------#
chandra_dir = '/media/carterrhea/1895813a-f52b-4ccc-9bab-1ee15fee024b/carterrhea/Pipeline-Clusters/Data/SPT-CLJ0000-5748/SPT-CLJ0000-5748/SurfaceBrightness'
output_dir = ''
obs_to_merge = ['9335']
repro_dir = 'repro'
evt_file = 'merged_evt'
energy_range = '500:2000' #in electron volts
regions = ['400kpc'] #set None if for entire image
background = 'bkg'
exposure = False
#------------------------------------------------------------------------------#
#-------------------------------------------------#
#-------------------------------------------------#
'''
calculate effective monochromatic energy
parameter:
region - region of interest (e.g. 'simple')
energy_range2 - energy range in kiloelectron volts (e.g. '0.5:2.0')
'''
def calc_effenergy(region,energy_range2):
dmtcalc.infile = region+'.arf'
dmtcalc.outfile = "arf_weights"+str(region)
dmtcalc.expression = "mid_energy=(energ_lo+energ_hi)/2.0;weights=(mid_energy*specresp)"
dmtcalc.clobber =True
dmtcalc()
dmstat.infile = "arf_weights"+str(region)+"[mid_energy="+str(energy_range2)+"][cols weights]"
dmstat.verbose = True
dmstat()
weight_sum = float(dmstat.out_sum)
dmstat.infile = "arf_weights"+str(region)+"[mid_energy="+str(energy_range2)+"][cols specresp]"
dmstat.verbose = True
dmstat()
specresp_sum = float(dmstat.out_sum)
eff_energy = weight_sum/specresp_sum
print("Our effective energy is: "+str(eff_energy))
return eff_energy
#-------------------------------------------------#
#-------------------------------------------------#
'''
Calculate various quantities considered surface brightness such as:
- net counts
- net count Rate
- net photon flux
- net energy flux (two options)
see further documentation
parameters:
evt_file - classic event fits file (e.g. 'acsif_#####_repro_evt2')
if merged ('merged_evt')
energy_range - energy range in electron volts (e.g. 500:2000)
region - region of interest (e.g. 'simple')
background - background .reg file without extension (e.g. 'simple_bkg')
exposure - boolean to use exposure fluxes (e.g. True) (See documentation)
merged - boolean for merged data set or not (e.g. True)
outputs:
.par file containing all calculated quantities (.e.g. 'aprates_'+region+'.par')
Notes:
Usually we use the region name along with the arf files to calculate the monochromatic
energy, but if the data set is merged then we must use the evt_file name (see documentation).
This is handled in the code but be sure to name things appropriately!
'''
def calc_flux(evt_file,energy_range,region,background,exposure = False,merged_obs = ['']):
#Rearrange energy ranges
energies = [float(x) for x in energy_range.split(':')]
energy_range2 = str(energies[0]/1000)+':'+str(energies[1]/1000) #for effective energy (eV)
energy_range3 = str(energies[0]/1000)+'-'+str(energies[1]/1000) #For average effective exposures (eV)
#Get counts for region and background
print("Calculating all data needed to calculate flux")
dmextract.infile = evt_file+".fits[energy="+energy_range+"][bin sky=region("+region+".reg)]"
dmextract.outfile = region+'_counts.fits'
dmextract.opt = 'generic'
dmextract.bkg = evt_file+".fits[energy="+energy_range+"][bin sky=region("+background+".reg)]"
dmextract.clobber = True
dmextract()
dmstat.infile = region+'_counts.fits[cols counts]'
dmstat()
counts = float(dmstat.out_sum)
dmstat.infile = region+'_counts.fits[cols area]'
dmstat()
area = float(dmstat.out_sum)
dmstat.infile = region+'_counts.fits[cols bg_counts]'
dmstat()
bg_counts = float(dmstat.out_sum)
dmstat.infile = region+'_counts.fits[cols bg_area]'
dmstat()
bg_area = float(dmstat.out_sum)
#Set PSF elements
alpha = 1 #PSF fraction in source aperature; 1-perfect
beta = 0 #PSF fraction in background aperature; 0-perfect
#Exposure Time
T_s = 0
T_b = 0
for obsid in merged_obs:
hdu = fits.open(obsid+'.fits')
hdr = hdu[0].header
T_s += hdr['TSTOP']-hdr['TSTART']
T_b += T_s
hdu.close()
#Calculate average effective exposures
dmstat.punlearn()
dmstat.infile = energy_range3+'_thresh.expmap[sky=region('+region+'.reg)]'
dmstat.centroid = False
dmstat()
E_s = dmstat.out_mean
dmstat.punlearn()
dmstat.infile = energy_range3+'_thresh.expmap[sky=region('+background+'.reg)]'
dmstat.centroid = False
dmstat()
E_b = dmstat.out_mean
#Calculate average photon energies in source and background aperature
if exposure == False:
dmtcalc.punlearn()
dmtcalc.infile = evt_file+".fits[energy="+energy_range+",sky=region("+region+".reg)]"
dmtcalc.outfile = region+"_source_energy.fits"
dmtcalc.expression = 'energy=1.6e-12*energy' #Convert to ergs
dmtcalc.clobber = True
dmtcalc()
dmstat.punlearn()
dmstat.infile = region+'_source_energy.fits[cols energy]'
dmstat()
eng_s = dmstat.out_mean
dmtcalc.punlearn()
dmtcalc.infile = evt_file+".fits[energy="+energy_range+",sky=region("+background+".reg)]"
dmtcalc.outfile = region+"_background_energy.fits"
dmtcalc.expression = 'energy=1.6e-12*energy' #Convert to ergs
dmtcalc.clobber = True
dmtcalc()
dmstat.punlearn()
dmstat.infile = region+'_background_energy.fits[cols energy]'
dmstat()
eng_b = dmstat.out_mean
#set flux_s,flux_b to zero to ignore exposure
flux_s = 1; flux_b = 1
if exposure == True:
eff2evt.punlearn()
eff2evt.infile = evt_file+".fits[energy="+energy_range+"][sky=region("+region+".reg)]"
eff2evt.outfile = region+"_source_effexp.fits"
eff2evt.clobber = True
eff2evt()
dmstat.punlearn()
dmstat.infile = region+'_source_effexp.fits[cols flux]'
dmstat()
flux_s = dmstat.out_mean
eff2evt.punlearn()
eff2evt.infile = evt_file+".fits[energy="+energy_range+"][sky=region("+background+".reg)]"
eff2evt.outfile = region+"_background_effexp.fits"
eff2evt.clobber = True
eff2evt()
dmstat.punlearn()
dmstat.infile = region+'_background_effexp.fits[cols flux]'
dmstat()
flux_b = dmstat.out_mean
#Conversely set eng_s,eng_b to one to signify we are using effective exposure
eng_s = 1; eng_b = 1
#Calculate energy flux and bounds
print("Setting aprates values")
aprates.punlearn()
aprates.conf = 0.90
aprates.n = counts
aprates.m = bg_counts
aprates.A_s = area
aprates.A_b = bg_area
aprates.alpha = alpha
aprates.beta = beta
aprates.T_s = T_s
aprates.T_b = T_b
aprates.E_s = E_s
aprates.E_b = E_b
aprates.eng_s = eng_s
aprates.eng_b = eng_b
aprates.flux_s = flux_s
aprates.flux_b = flux_b
aprates.outfile = 'aprates_'+region+'.par'
aprates.clobber = True
aprates.pdf = 'alternate'
print("Running aprates for flux value")
aprates()
return None
def create_arf(obs_to_merge,region,repro_dir):
#Create arf files
arf_files = ''
pi_files = ''
for obsid in obs_to_merge:
arf_files += obsid+'/'+repro_dir+'/'+region+'.arf,'
pi_files += obsid+'/'+repro_dir+'/'+region+'.pi,'
arf_files = arf_files[:-1]#get rid of final comma
pi_files = pi_files[:-1]
addresp.punlearn()
addresp.infile = ''
addresp.arffile = arf_files
addresp.phafile = pi_files
addresp.outfile = ''
addresp.outarf = region+'_merged.arf'
addresp.clobber = True
addresp()
def merge_observations(obs_to_merge,output_dir,repro_dir,energy_range2,mono_energy):
#Merge individual region files
merging_files = ''
for obsid in obs_to_merge:
merging_files += obsid+'/'+repro_dir+'/acisf'+obsid+'_repro_evt2.fits,'
merging_files = merging_files[:-1]
merge_obs.punlearn()
merge_obs.infile = merging_files
merge_obs.outroot = output_dir+'/'
merge_obs.bands = energy_range2+":"+str(mono_energy)
merge_obs.clobber = True
merge_obs()
def main():
os.chdir(chandra_dir)
arfs = input('Do we need to create merged ARF files: ')
if arfs.lower() == 'yes' or arfs == '':
print("Combining ARF files")
for region in regions:
create_arf(obs_to_merge,region,repro_dir)
if arfs.lower() != 'yes' and arfs != '':
print("Combined ARFs not being created")
energies = [float(x) for x in energy_range.split(':')]
energy_range2 = str(energies[0]/1000)+':'+str(energies[1]/1000)
#mono_energy = calc_effenergy(region+'_merged',energy_range2)
print("")
print("We must now created a merged observation file for this energy band...")
merge_observations(obs_to_merge,output_dir,repro_dir,energy_range2,1.5)
#We need to copy the region files over AND each individual event file
for region in regions:
copyfile(chandra_dir+'/'+obs_to_merge[0]+'/repro/'+region+'.reg',chandra_dir+'/'+output_dir+'/'+region+'.reg')
copyfile(chandra_dir+'/'+obs_to_merge[0]+'/repro/'+background+'.reg',chandra_dir+'/'+output_dir+'/'+background+'.reg')
for obser in obs_to_merge:
copyfile(chandra_dir+'/'+obser+'/repro/acisf'+obser+'_repro_evt2.fits',chandra_dir+'/'+output_dir+'/'+obser+'.fits')
os.chdir(chandra_dir+'/'+output_dir)
for region in regions:
print("Calculating flux for "+region)
calc_flux(evt_file,energy_range,region,background,exposure,obs_to_merge)
main()
| 41.154717
| 156
| 0.648725
|
2d2b947113f02b5ef1cd0346e5a1e4a18d8a3d5a
| 769
|
py
|
Python
|
09-Hot_or_Not/solution.py
|
alvarogzp/badoo-challenge-2015
|
f4e1d8b1837c7cc5ae31bb3fa808a24b60513214
|
[
"MIT"
] | 1
|
2016-01-10T16:59:00.000Z
|
2016-01-10T16:59:00.000Z
|
09-Hot_or_Not/solution.py
|
alvarogzp/badoo-challenge-2015
|
f4e1d8b1837c7cc5ae31bb3fa808a24b60513214
|
[
"MIT"
] | null | null | null |
09-Hot_or_Not/solution.py
|
alvarogzp/badoo-challenge-2015
|
f4e1d8b1837c7cc5ae31bb3fa808a24b60513214
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import png # PyPNG: https://github.com/drj11/pypng doc: https://pythonhosted.org/pypng/index.html
png_reader = png.Reader("HoN.png")
width, height, pixels, metadata = png_reader.read()
out_pixels = []
for row in pixels:
out_row = []
for pixel_start_index in range(0, len(row), 3):
r, g, b = row[pixel_start_index:pixel_start_index+3]
# only use value if odd
r = r if r % 2 == 1 else 0
g = g if g % 2 == 1 else 0
b = b if b % 2 == 1 else 0
out_row.append(r)
out_row.append(g)
out_row.append(b)
out_pixels.append(out_row)
png_writer = png.Writer(width=width, height=height)
out_file = open("out.png", "wb")
png_writer.write(out_file, out_pixels)
out_file.close()
| 26.517241
| 99
| 0.63329
|
78a9f849eccea2d5252fa82c9d9fa7773236da4f
| 5,228
|
py
|
Python
|
fasterai/modules.py
|
ranihorev/DeOldify
|
334030a0983333293f969664f9147a11e8dfd0d4
|
[
"MIT"
] | 54
|
2019-05-13T02:36:25.000Z
|
2022-03-07T08:48:04.000Z
|
fasterai/modules.py
|
wangroot/DeOldify-1
|
bc9d4562bf2014f5268f5c616ae31873577d9fde
|
[
"MIT"
] | 2
|
2021-06-08T23:55:11.000Z
|
2021-09-08T03:06:38.000Z
|
fasterai/modules.py
|
wangroot/DeOldify-1
|
bc9d4562bf2014f5268f5c616ae31873577d9fde
|
[
"MIT"
] | 10
|
2019-10-15T06:16:02.000Z
|
2021-07-06T04:59:52.000Z
|
from fastai.torch_imports import *
from fastai.conv_learner import *
from torch.nn.utils.spectral_norm import spectral_norm
class ConvBlock(nn.Module):
def __init__(self, ni:int, no:int, ks:int=3, stride:int=1, pad:int=None, actn:bool=True,
bn:bool=True, bias:bool=True, sn:bool=False, leakyReLu:bool=False, self_attention:bool=False,
inplace_relu:bool=True):
super().__init__()
if pad is None: pad = ks//2//stride
if sn:
layers = [spectral_norm(nn.Conv2d(ni, no, ks, stride, padding=pad, bias=bias))]
else:
layers = [nn.Conv2d(ni, no, ks, stride, padding=pad, bias=bias)]
if actn:
layers.append(nn.LeakyReLU(0.2, inplace=inplace_relu)) if leakyReLu else layers.append(nn.ReLU(inplace=inplace_relu))
if bn:
layers.append(nn.BatchNorm2d(no))
if self_attention:
layers.append(SelfAttention(no, 1))
self.seq = nn.Sequential(*layers)
def forward(self, x):
return self.seq(x)
class UpSampleBlock(nn.Module):
@staticmethod
def _conv(ni:int, nf:int, ks:int=3, bn:bool=True, sn:bool=False, leakyReLu:bool=False):
layers = [ConvBlock(ni, nf, ks=ks, sn=sn, bn=bn, actn=False, leakyReLu=leakyReLu)]
return nn.Sequential(*layers)
@staticmethod
def _icnr(x:torch.Tensor, scale:int=2):
init=nn.init.kaiming_normal_
new_shape = [int(x.shape[0] / (scale ** 2))] + list(x.shape[1:])
subkernel = torch.zeros(new_shape)
subkernel = init(subkernel)
subkernel = subkernel.transpose(0, 1)
subkernel = subkernel.contiguous().view(subkernel.shape[0],
subkernel.shape[1], -1)
kernel = subkernel.repeat(1, 1, scale ** 2)
transposed_shape = [x.shape[1]] + [x.shape[0]] + list(x.shape[2:])
kernel = kernel.contiguous().view(transposed_shape)
kernel = kernel.transpose(0, 1)
return kernel
def __init__(self, ni:int, nf:int, scale:int=2, ks:int=3, bn:bool=True, sn:bool=False, leakyReLu:bool=False):
super().__init__()
layers = []
assert (math.log(scale,2)).is_integer()
for i in range(int(math.log(scale,2))):
layers += [UpSampleBlock._conv(ni, nf*4,ks=ks, bn=bn, sn=sn, leakyReLu=leakyReLu),
nn.PixelShuffle(2)]
if bn:
layers += [nn.BatchNorm2d(nf)]
ni = nf
self.sequence = nn.Sequential(*layers)
self._icnr_init()
def _icnr_init(self):
conv_shuffle = self.sequence[0][0].seq[0]
kernel = UpSampleBlock._icnr(conv_shuffle.weight)
conv_shuffle.weight.data.copy_(kernel)
def forward(self, x):
return self.sequence(x)
class UnetBlock(nn.Module):
def __init__(self, up_in:int , x_in:int , n_out:int, bn:bool=True, sn:bool=False, leakyReLu:bool=False,
self_attention:bool=False, inplace_relu:bool=True):
super().__init__()
up_out = x_out = n_out//2
self.x_conv = ConvBlock(x_in, x_out, ks=1, bn=False, actn=False, sn=sn, inplace_relu=inplace_relu)
self.tr_conv = UpSampleBlock(up_in, up_out, 2, bn=bn, sn=sn, leakyReLu=leakyReLu)
self.relu = nn.LeakyReLU(0.2, inplace=inplace_relu) if leakyReLu else nn.ReLU(inplace=inplace_relu)
out_layers = []
if bn:
out_layers.append(nn.BatchNorm2d(n_out))
if self_attention:
out_layers.append(SelfAttention(n_out))
self.out = nn.Sequential(*out_layers)
def forward(self, up_p:int, x_p:int):
up_p = self.tr_conv(up_p)
x_p = self.x_conv(x_p)
x = torch.cat([up_p,x_p], dim=1)
x = self.relu(x)
return self.out(x)
class SaveFeatures():
features=None
def __init__(self, m:nn.Module):
self.hook = m.register_forward_hook(self.hook_fn)
def hook_fn(self, module, input, output):
self.features = output
def remove(self):
self.hook.remove()
class SelfAttention(nn.Module):
def __init__(self, in_channel:int, gain:int=1):
super().__init__()
self.query = self._spectral_init(nn.Conv1d(in_channel, in_channel // 8, 1),gain=gain)
self.key = self._spectral_init(nn.Conv1d(in_channel, in_channel // 8, 1),gain=gain)
self.value = self._spectral_init(nn.Conv1d(in_channel, in_channel, 1), gain=gain)
self.gamma = nn.Parameter(torch.tensor(0.0))
def _spectral_init(self, module:nn.Module, gain:int=1):
nn.init.kaiming_uniform_(module.weight, gain)
if module.bias is not None:
module.bias.data.zero_()
return spectral_norm(module)
def forward(self, input:torch.Tensor):
shape = input.shape
flatten = input.view(shape[0], shape[1], -1)
query = self.query(flatten).permute(0, 2, 1)
key = self.key(flatten)
value = self.value(flatten)
query_key = torch.bmm(query, key)
attn = F.softmax(query_key, 1)
attn = torch.bmm(value, attn)
attn = attn.view(*shape)
out = self.gamma * attn + input
return out
| 39.308271
| 130
| 0.609602
|
688962353130fbb80f7d61ef655a7b7d69ac032e
| 794
|
py
|
Python
|
ex39_sql_create/test.py
|
techieguy007/learn-more-python-the-hard-way-solutions
|
7886c860f69d69739a41d6490b8dc3fa777f227b
|
[
"Zed",
"Unlicense"
] | 466
|
2016-11-01T19:40:59.000Z
|
2022-03-23T16:34:13.000Z
|
ex39_sql_create/test.py
|
Desperaaado/learn-more-python-the-hard-way-solutions
|
7886c860f69d69739a41d6490b8dc3fa777f227b
|
[
"Zed",
"Unlicense"
] | 2
|
2017-09-20T09:01:53.000Z
|
2017-09-21T15:03:56.000Z
|
ex39_sql_create/test.py
|
Desperaaado/learn-more-python-the-hard-way-solutions
|
7886c860f69d69739a41d6490b8dc3fa777f227b
|
[
"Zed",
"Unlicense"
] | 241
|
2017-06-17T08:02:26.000Z
|
2022-03-30T09:09:39.000Z
|
class Person(object):
def __init__(self, first_name,
last_name, age, pets):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.pets = pets
class Pet(object):
def __init__(self, name, breed,
age, dead):
self.name = name
self.breed = breed
self.age = age
self.dead = dead
self.owners = []
# simulate insert
fluffy = Pet('Fluffy', 'Unicorn', 12, False)
gigantor = Pet('Gigantor', 'Robot', 2, False)
pete = Person("Zed", "Shaw", 43, [fluffy, gigantor])
fluffy.owners.append(pete)
gigantor.owners.append(pete)
DB = {
'person': [ pete ],
'pet': [fluffy, gigantor],
}
dead_pets = [pet for pet in DB['pet'] if pet.dead == False]
print(dead_pets)
| 22.685714
| 59
| 0.583123
|
9ed017592afdcf6608833458eba192f616c9249d
| 3,514
|
py
|
Python
|
tensorflow/contrib/input_pipeline/python/ops/input_pipeline_ops_test.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 522
|
2016-06-08T02:15:50.000Z
|
2022-03-02T05:30:36.000Z
|
tensorflow/contrib/input_pipeline/python/ops/input_pipeline_ops_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 133
|
2017-04-26T16:49:49.000Z
|
2019-10-15T11:39:26.000Z
|
tensorflow/contrib/input_pipeline/python/ops/input_pipeline_ops_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 108
|
2016-06-16T15:34:05.000Z
|
2022-03-12T13:23:11.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for input_pipeline_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.input_pipeline.python.ops import input_pipeline_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InputPipelineOpsTest(test.TestCase):
def testObtainNext(self):
with self.test_session():
var = state_ops.variable_op([], dtypes.int64)
state_ops.assign(var, -1).op.run()
c = constant_op.constant(["a", "b"])
sample1 = input_pipeline_ops.obtain_next(c, var)
self.assertEqual(b"a", sample1.eval())
self.assertEqual(0, var.eval())
sample2 = input_pipeline_ops.obtain_next(c, var)
self.assertEqual(b"b", sample2.eval())
self.assertEqual(1, var.eval())
sample3 = input_pipeline_ops.obtain_next(c, var)
self.assertEqual(b"a", sample3.eval())
self.assertEqual(0, var.eval())
def testSeekNext(self):
string_list = ["a", "b", "c"]
with self.test_session() as session:
elem = input_pipeline_ops.seek_next(string_list)
session.run([variables.global_variables_initializer()])
self.assertEqual(b"a", session.run(elem))
self.assertEqual(b"b", session.run(elem))
self.assertEqual(b"c", session.run(elem))
# Make sure we loop.
self.assertEqual(b"a", session.run(elem))
# Helper method that runs the op len(expected_list) number of times, asserts
# that the results are elements of the expected_list and then throws an
# OutOfRangeError.
def _assert_output(self, expected_list, session, op):
for element in expected_list:
self.assertEqual(element, session.run(op))
with self.assertRaises(errors.OutOfRangeError):
session.run(op)
def testSeekNextLimitEpochs(self):
string_list = ["a", "b", "c"]
with self.test_session() as session:
elem = input_pipeline_ops.seek_next(string_list, num_epochs=1)
session.run([
variables.local_variables_initializer(),
variables.global_variables_initializer()
])
self._assert_output([b"a", b"b", b"c"], session, elem)
def testSeekNextLimitEpochsThree(self):
string_list = ["a", "b", "c"]
with self.test_session() as session:
elem = input_pipeline_ops.seek_next(string_list, num_epochs=3)
session.run([
variables.local_variables_initializer(),
variables.global_variables_initializer()
])
# Expect to see [a, b, c] three times.
self._assert_output([b"a", b"b", b"c"] * 3, session, elem)
if __name__ == "__main__":
test.main()
| 39.044444
| 80
| 0.69778
|
5f6e7a738313a4f240ac0bce257385fe5c41ecca
| 2,923
|
py
|
Python
|
eval/generate_visual.py
|
urasakikeisuke/rigidmask
|
4bb781102218dfd11efa767e2d0ba987d9949fd1
|
[
"MIT"
] | 138
|
2021-01-12T03:02:04.000Z
|
2022-03-30T07:14:15.000Z
|
eval/generate_visual.py
|
urasakikeisuke/rigidmask
|
4bb781102218dfd11efa767e2d0ba987d9949fd1
|
[
"MIT"
] | 12
|
2021-02-02T14:19:30.000Z
|
2022-03-28T01:23:44.000Z
|
eval/generate_visual.py
|
urasakikeisuke/rigidmask
|
4bb781102218dfd11efa767e2d0ba987d9949fd1
|
[
"MIT"
] | 14
|
2021-01-13T01:31:34.000Z
|
2022-01-30T14:48:06.000Z
|
import os
import sys
sys.path.insert(0,os.getcwd())
import cv2
import torch
import glob
import numpy as np
import pdb
import imageio
import detectron2
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog
from typing import Any, Dict, List, Tuple, Union
from utils.util_flow import readPFM
coco_metadata = MetadataCatalog.get("coco_2017_val")
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--datapath', default='',
help='dataset path')
parser.add_argument('--imgpath', default='',
help='dataset path')
args = parser.parse_args()
class Object(object):
def has(self, name: str) -> bool:
return name in self._fields
def __getattr__(self, name: str) -> Any:
if name == "_fields" or name not in self._fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
return self._fields[name]
frames=[]
for i,path in enumerate(sorted(glob.glob('%s/pm*'%args.datapath))):
print(path)
pred = readPFM(path)[0]
center_img = cv2.imread(path.replace('pm', 'mvis').replace('.pfm', '.jpg'))
img = cv2.imread('%s/%s.png'%(args.imgpath,path.split('/')[-1].split('pm-')[1].split('.pfm')[0]))
if img is None:
img = cv2.imread('%s/%s.jpg'%(args.imgpath,path.split('/')[-1].split('pm-')[1].split('.pfm')[0]))
shape = pred.shape[:2]
num_instances = int(pred.max())
# if no object detected
if num_instances==0:
_, pred =cv2.connectedComponentsWithAlgorithm((1-(pred==0).astype(np.uint8)),connectivity=8,ltype=cv2.CV_16U,ccltype=cv2.CCL_WU)
num_instances = pred.max()
if num_instances>0:
pred_masks = torch.zeros((num_instances,)+shape).bool()
for k in range(num_instances):
pred_masks[k] = torch.Tensor(pred==(k+1))
obj = Object()
obj.image_height = shape[0]
obj.image_width = shape[1]
obj._fields = {}
obj._fields["pred_masks"] = pred_masks
v = Visualizer(img, coco_metadata, scale=0.5, instance_mode=ColorMode.IMAGE_BW)
try:
vis = v.draw_instance_predictions(obj)
except:pdb.set_trace()
mask_result = vis.get_image()
else:
mask_result = cv2.resize(img,None,fx=0.5,fy=0.5)
# write results
cv2.imwrite(path.replace('pm-', 'vis-').replace('.pfm','.png'), mask_result)
try:
center_img = cv2.resize(center_img, mask_result.shape[:2][::-1])
blend = cv2.addWeighted(mask_result, 1, center_img, 1, 0)
cv2.imwrite(path.replace('pm-', 'bvis-').replace('.pfm','.png'), blend)
except:pass
frame = blend[:,:,::-1].copy()
frames.append(frame)
imageio.mimsave('./output-seg.gif', frames, duration=5./len(frames))
| 35.216867
| 136
| 0.64728
|
056b8515e46fab8a10ac9dca6110660df686dfd7
| 1,396
|
py
|
Python
|
Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/sqlparse/__init__.py
|
tchamabe1979/exareme
|
462983e4feec7808e1fd447d02901502588a8879
|
[
"MIT"
] | null | null | null |
Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/sqlparse/__init__.py
|
tchamabe1979/exareme
|
462983e4feec7808e1fd447d02901502588a8879
|
[
"MIT"
] | null | null | null |
Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/sqlparse/__init__.py
|
tchamabe1979/exareme
|
462983e4feec7808e1fd447d02901502588a8879
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""Parse SQL statements."""
__version__ = '0.1.1'
class SQLParseError(Exception):
"""Base class for exceptions in this module."""
# Setup namespace
from sqlparse import engine
from sqlparse import filters
from sqlparse import formatter
def parse(sql):
"""Parse sql and return a list of statements.
*sql* is a single string containting one or more SQL statements.
Returns a tuple of :class:`~sqlparse.sql.Statement` instances.
"""
stack = engine.FilterStack()
stack.full_analyze()
return tuple(stack.run(sql))
def format(sql, **options):
"""Format *sql* according to *options*.
Available options are documented in :ref:`formatting`.
Returns the formatted SQL statement as string.
"""
stack = engine.FilterStack()
options = formatter.validate_options(options)
stack = formatter.build_filter_stack(stack, options)
stack.postprocess.append(filters.SerializerUnicode())
return ''.join(stack.run(sql))
def split(sql):
"""Split *sql* into single statements.
Returns a list of strings.
"""
stack = engine.FilterStack()
stack.split_statements = True
return [unicode(stmt) for stmt in stack.run(sql)]
| 25.381818
| 70
| 0.704871
|
b3679abad6712667ca62ef99ad69ba40e363b492
| 18,836
|
py
|
Python
|
glide/extract.py
|
pombredanne/glide
|
5c3a6caa157da626281aa9562ef0cba9bc4919e9
|
[
"MIT"
] | null | null | null |
glide/extract.py
|
pombredanne/glide
|
5c3a6caa157da626281aa9562ef0cba9bc4919e9
|
[
"MIT"
] | null | null | null |
glide/extract.py
|
pombredanne/glide
|
5c3a6caa157da626281aa9562ef0cba9bc4919e9
|
[
"MIT"
] | null | null | null |
"""A home for common data extraction nodes"""
import codecs
from collections import OrderedDict
from copy import deepcopy
import csv
from email import parser, policy
from io import BytesIO
from imapclient import IMAPClient
from pandas.io.common import get_filepath_or_buffer
import requests
from tlbx import st, read_chunks, extract_email_payload
from glide.core import Node
from glide.sql_utils import build_table_select
from glide.sql import SQLNode
from glide.utils import (
dbg,
read_excel,
find_class_in_dict,
get_class_list_docstring,
not_none,
)
class CSVExtract(Node):
"""Extract data from a CSV"""
def run(
self,
f,
open_flags="r",
chunksize=None,
nrows=None,
reader=csv.DictReader,
**kwargs
):
"""Extract data for input file and push dict rows
Parameters
----------
f : file path or buffer
file path or buffer to read CSV
open_flags : str, optional
Flags to pass to open() if f is not already an opened buffer
chunksize : int, optional
Read data in chunks of this size
nrows : int, optional
Limit to reading this number of rows
reader : csv Reader, optional
The CSV reader class to use. Defaults to csv.DictReader
**kwargs
keyword arguments passed to the reader
"""
# Re-use pandas functionality utilized by read_csv
# TODO: this uses urlopen under the hood. It may be more efficient to use
# requests.get() with stream=True.
# https://stackoverflow.com/a/42979967/10682164
f, encoding, _, should_close = get_filepath_or_buffer(f)
close = False or should_close
decode = False
if isinstance(f, str):
f = open(f, open_flags)
close = True
elif isinstance(f, BytesIO) or encoding:
decode = True
try:
if decode:
reader = reader(codecs.iterdecode(f, encoding or "utf-8"), **kwargs)
else:
reader = reader(f, **kwargs)
if chunksize:
for chunk in read_chunks(reader, chunksize, limit=nrows):
self.push(chunk)
else:
rows = []
for i, row in enumerate(reader):
if nrows and i >= nrows:
break
rows.append(row)
self.push(rows)
finally:
if close:
try:
f.close()
except ValueError:
pass
class ExcelExtract(Node):
"""Extract data from an Excel file"""
def run(self, f, dict_rows=False, **kwargs):
"""Use pyexcel to read data from a file
Parameters
----------
f : str or buffer
The Excel file to read. Multiple excel formats supported.
dict_rows : bool, optional
If true the rows of each sheet will be converted to dicts with
column names as keys.
**kwargs
Keyword arguments passed to pyexcel
"""
data = read_excel(f, **kwargs)
if dict_rows:
for sheet_name in data.keys():
data[sheet_name] = [
OrderedDict(zip(data[sheet_name][0], data[sheet_name][i]))
for i in range(1, len(data[sheet_name]))
]
self.push(data)
class SQLExtract(SQLNode):
"""Generic SQL extract Node"""
def run(
self,
sql,
conn,
cursor=None,
cursor_type=None,
params=None,
chunksize=None,
**kwargs
):
"""Extract data for input query and push fetched rows.
Parameters
----------
sql : str
SQL query to run
conn
SQL connection object
cursor : optional
SQL connection cursor object
cursor_type : optional
SQL connection cursor type when creating a cursor is necessary
params : tuple or dict, optional
A tuple or dict of params to pass to the execute method
chunksize : int, optional
Fetch and push data in chunks of this size
**kwargs
Keyword arguments pushed to the execute method
"""
if not cursor:
cursor = self.get_sql_executor(conn, cursor_type=cursor_type)
params = params or ()
fetcher = self.execute(conn, cursor, sql, params=params, **kwargs)
self.do_push(fetcher, chunksize=chunksize)
class SQLParamExtract(SQLExtract):
"""Generic SQL extract node that expects SQL params as data instead of a query"""
def run(
self, params, sql, conn, cursor=None, cursor_type=None, chunksize=None, **kwargs
):
"""Extract data for input params and push fetched rows.
Parameters
----------
params : tuple or dict
A tuple or dict of params to pass to the execute method
sql : str
SQL query to run
conn
SQL connection object
cursor : optional
SQL connection cursor object
cursor_type : optional
SQL connection cursor type when creating a cursor is necessary
chunksize : int, optional
Fetch and push data in chunks of this size
**kwargs
Keyword arguments pushed to the execute method
"""
super().run(
sql,
conn,
cursor=cursor,
cursor_type=cursor_type,
params=params,
chunksize=chunksize,
**kwargs
)
class SQLTableExtract(SQLNode):
"""Generic SQL table extract node"""
def run(
self,
table,
conn,
cursor=None,
cursor_type=None,
where=None,
limit=None,
params=None,
chunksize=None,
**kwargs
):
"""Extract data for input table and push fetched rows
Parameters
----------
table : str
SQL table name
conn
SQL connection object
cursor : optional
SQL connection cursor object
cursor_type : optional
SQL connection cursor type when creating a cursor is necessary
where : str, optional
SQL where clause
limit : int, optional
Limit to put in SQL limit clause
params : tuple or dict, optional
A tuple or dict of params to pass to the execute method
chunksize : int, optional
Fetch and push data in chunks of this size
**kwargs
Keyword arguments passed to cursor.execute
"""
if not cursor:
cursor = self.get_sql_executor(conn, cursor_type=cursor_type)
sql = build_table_select(table, where=where, limit=limit)
params = params or ()
fetcher = self.execute(conn, cursor, sql, params=params, **kwargs)
self.do_push(fetcher, chunksize=chunksize)
class FileExtract(Node):
"""Extract raw data from a file"""
def run(self, f, open_flags="r", chunksize=None, push_lines=False, limit=None):
"""Extract raw data from a file or buffer and push contents
Parameters
----------
f : file path or buffer
File path or buffer to read
open_flags : str, optional
Flags to pass to open() if f is not already an opened buffer
chunksize : int, optional
Push lines in chunks of this size
push_lines : bool, optional
Push each line as it's read instead of reading entire file and pushing
limit : int, optional
Limit to first N lines
"""
assert not (
chunksize and push_lines
), "Only one of chunksize and push_lines may be specified"
f, _, _, should_close = get_filepath_or_buffer(f)
close = False or should_close
if isinstance(f, str):
f = open(f, open_flags)
close = True
try:
data = []
count = 0
for line in f:
count += 1
if push_lines:
self.push(line)
else:
data.append(line)
if chunksize and (count % chunksize == 0):
self.push("".join(data))
data = []
if limit and count >= limit:
break
if ((not push_lines) and data) or count == 0:
self.push("".join(data))
finally:
if close:
try:
f.close()
except ValueError:
pass
class URLExtract(Node):
"""Extract data from a URL with requests"""
def run(
self,
request,
data_type="content",
session=None,
skip_raise=False,
page_size=None,
page_size_param="size",
page_offset_param="offset",
page_request_param="params",
page_key=None,
page_len=len,
page_limit=None,
push_pages=False,
**kwargs
):
"""Extract data from a URL using requests and push
response.content. Input request may be a string (GET that url) or a
dictionary of args to requests.request:
http://2.python-requests.org/en/master/api/?highlight=get#requests.request
See the requests docs for information on authentication options:
https://requests.kennethreitz.org/en/master/user/authentication/
Parameters
----------
request : str or dict
If str, a URL to GET. If a dict, args to requests.request
data_type : str, optional
One of "content", "text", or "json" to control extraction of
data from requests response.
session : optional
A requests Session to use to make the request
skip_raise : bool, optional
if False, raise exceptions for bad response status
page_size : int, optional
If specified, request in pages of this size. Only supported with
data_type="json".
page_size_param : str, optional
The request parameter to put the page size in
page_offset_param : str, optional
The request parameter to put the page offset in
page_request_param : str, optional
Where to put the paging params when calling requests. Can either be
"params" or "data".
page_key : str or callable, optional
Where to pull the page data from the results. If None, assume the
entire json response is the page data.
page_len : callable
A callable that can determine the length of the page given the
json result. The default is just to use len(result).
page_limit : int, optional
If passed, use as a cap of the number of pages pulled
push_pages : bool, optional
If true, push each page individually.
**kwargs
Keyword arguments to pass to the request method. If a dict is
passed for the request parameter it overrides values of kwargs.
"""
requestor = requests
if session:
requestor = session
paging = False
if page_size or push_pages:
paging = True
assert not_none(
page_request_param,
page_size,
push_pages,
page_size_param,
page_offset_param,
), "Not all paging params specified"
assert page_request_param in ["data", "params"], (
"Invalid page_request_param: %s" % page_request_param
)
assert (
data_type == "json"
), "Paging is only supported with JSON-based results"
kwargs[page_request_param] = kwargs.get(page_request_param, {})
offset = 0
results = []
if isinstance(request, str):
request = dict(method="GET", url=request)
else:
assert isinstance(
request, dict
), "Request must be a str or dict type, got %s" % type(request)
count = 0
while True:
kwargs_copy = deepcopy(kwargs)
kwargs_copy.update(request)
if paging:
assert not (
page_size_param in kwargs_copy[page_request_param]
or page_offset_param in kwargs_copy[page_request_param]
), ("Params conflict with paging params: %s" % url)
kwargs_copy[page_request_param].update(
{page_size_param: page_size, page_offset_param: offset}
)
resp = requestor.request(**kwargs_copy)
count += 1
if not skip_raise:
resp.raise_for_status()
if data_type == "content":
data = resp.content
elif data_type == "text":
data = resp.text
elif data_type == "json":
data = resp.json()
else:
assert False, (
"Unrecognized data_type: %s, must be one of content, text, or json"
% data_type
)
if paging:
page = data
if page_key:
if isinstance(page_key, str):
page = data[page_key]
else:
page = page_key(data)
offset += page_len(page)
if push_pages:
self.push(page)
else:
results.extend(page)
if page_limit and count >= page_limit:
break
else:
results = data
break
if (not paging) or (not push_pages):
self.push(results)
class EmailExtract(Node):
"""Extract data from an email inbox using IMAPClient: https://imapclient.readthedocs.io"""
def run(
self,
criteria,
sort=None,
folder="INBOX",
client=None,
host=None,
username=None,
password=None,
push_all=False,
push_type="message",
limit=None,
**kwargs
):
"""Extract data from an email inbox and push the data forward.
Note
----
Instances of IMAPClient are NOT thread safe. They should not be shared
and accessed concurrently from multiple threads.
Parameters
----------
criteria : str or list
Criteria argument passed to IMAPClient.search. See
https://tools.ietf.org/html/rfc3501.html#section-6.4.4.
sort : str or list, optional
Sort criteria passed to IMAPClient.sort. Note that SORT is an
extension to the IMAP4 standard so it may not be supported by all
IMAP servers. See https://tools.ietf.org/html/rfc5256.
folder : str, optional
Folder to read emails from
client : optional
An established IMAPClient connection. If not present, the
host/login information is required.
host : str, optional
The IMAP host to connect to
username : str, optional
The IMAP username for login
password : str, optional
The IMAP password for login
push_all : bool, optional
When true push all retrievd data/emails at once
push_type : str, optional
What type of data to extract and push from the emails. Options include:
* **message**: push email.message.EmailMessage objects
* **message_id**: push a list of message IDs that can be fetched
* **all**: push a list of dict(message=<email.message.EmailMessages>, payload=<extracted payload>)
* **body**: push a list of email bodies
* **attachment**: push a list of attachments (an email with multiple attachments will be grouped in a sublist)
limit : int, optional
Limit to N rows
**kwargs
Keyword arguments to pass IMAPClient if not client is passed
"""
data = []
logout = False
push_types = ["message_id", "message", "all", "body", "attachment"]
if not client:
assert (
host and username and password
), "Host/Username/Password required to create IMAPClient"
dbg("Logging into IMAPClient %s/%s" % (host, username))
logout = True
client = IMAPClient(host, **kwargs)
client.login(username, password)
try:
client.select_folder(folder)
if sort:
messages = client.sort(sort, criteria=criteria)
else:
messages = client.search(criteria)
dbg("Found %d email messages" % len(messages))
if push_type == "message_id":
if limit:
data = messages[:limit]
else:
data = messages
else:
assert (
push_type in push_types
), "Unrecognized push_type: %s, options: %s" % (push_type, push_types)
count = 0
for msg_id, msg_data in client.fetch(messages, ["RFC822"]).items():
raw = msg_data[b"RFC822"].decode("utf8")
msg = parser.Parser(policy=policy.default).parsestr(raw)
if push_type == "message":
data.append(msg)
else:
payload = extract_email_payload(msg)
if push_type == "body":
data.append(payload[0])
elif push_type == "attachment":
data.append(payload[1:])
elif push_type == "all":
data.append(dict(message=msg, payload=payload))
count += 1
if limit and count >= limit:
break
finally:
if logout:
client.logout()
if push_all:
self.push(data)
else:
for row in data:
self.push(row)
node_names = find_class_in_dict(Node, locals(), include="Extract")
if node_names:
__doc__ = __doc__ + get_class_list_docstring("Nodes", node_names)
| 32.419966
| 126
| 0.541463
|
e2e5ddd3a32140f7e6ad33ab4b2950d5d551c04b
| 53
|
py
|
Python
|
backend/app/views/extract_features/__init__.py
|
Edinburgh-Genome-Foundry/CUBA
|
d57565951ead619ef9263e8b356b451001fb910f
|
[
"MIT"
] | 15
|
2018-02-12T13:12:13.000Z
|
2021-08-15T11:37:59.000Z
|
backend/app/views/extract_features/__init__.py
|
Edinburgh-Genome-Foundry/CUBA
|
d57565951ead619ef9263e8b356b451001fb910f
|
[
"MIT"
] | 9
|
2020-06-05T17:54:54.000Z
|
2022-02-12T12:03:19.000Z
|
backend/app/views/extract_features/__init__.py
|
Edinburgh-Genome-Foundry/CUBA
|
d57565951ead619ef9263e8b356b451001fb910f
|
[
"MIT"
] | 3
|
2018-10-18T13:08:50.000Z
|
2020-08-17T14:09:46.000Z
|
from .ExtractFeaturesView import ExtractFeaturesView
| 26.5
| 52
| 0.90566
|
f13cbebc74266380adca3761f264869034f2cb8f
| 7,023
|
py
|
Python
|
focalplane/utils.py
|
Johannes-Sahlmann/focalplane
|
39dc5bc7ed817321247f652d4c5d3565efccb022
|
[
"BSD-3-Clause"
] | null | null | null |
focalplane/utils.py
|
Johannes-Sahlmann/focalplane
|
39dc5bc7ed817321247f652d4c5d3565efccb022
|
[
"BSD-3-Clause"
] | 1
|
2019-12-03T16:45:55.000Z
|
2019-12-03T16:47:07.000Z
|
focalplane/utils.py
|
Johannes-Sahlmann/focalplane
|
39dc5bc7ed817321247f652d4c5d3565efccb022
|
[
"BSD-3-Clause"
] | 4
|
2019-10-03T14:20:29.000Z
|
2021-12-27T19:07:16.000Z
|
import copy
from astropy.table import Table
import astropy.units as u
from astropy.time import Time
import numpy as np
from pyia.data import GaiaData
from pystrometry import pystrometry
def correct_for_proper_motion(gaia_table, target_epoch, verbose=False, ignore_parallax=True):
"""Apply proper motion correction to an input Gaia catalog.
Compute positions and uncertainties at an epoch other than the catalog epoch.
Supports only Gaia input catalog format, i.e. and astropy table with Gaia-named columns.
TODO:
-----
Do corrected_values['ra_error'] need to be corrected for cos(delta) effect?
Parameters
----------
gaia_table
target_epoch : astropy time
verbose
ignore_parallax : bool
If True, set parallax to zero to ignore its contribution to the offset
(that offset is observer-dependent)
Returns
-------
"""
gaia_table = copy.deepcopy(gaia_table)
DR2_REF_EPOCH = gaia_table['ref_epoch'][0]
for attribute_name in 'ra dec ra_error dec_error'.split():
gaia_table[
'{}_original_{}'.format(attribute_name, DR2_REF_EPOCH)] = np.full(
len(gaia_table), np.nan)
gaia_table['{}_{:3.1f}'.format(attribute_name, target_epoch.jyear)] = np.full(
len(gaia_table), np.nan)
gaia_data = GaiaData(gaia_table)
for i in range(len(gaia_table)):
if (not np.isnan(gaia_table['parallax'][i])) and (not np.ma.is_masked(gaia_table['parallax'][i])):
gaia_star = gaia_data[i]
covariance_matrix_mas = gaia_star.get_cov(units=dict(ra=u.milliarcsecond,
dec=u.milliarcsecond,
parallax=u.milliarcsecond,
pm_ra=u.milliarcsecond/u.year,
pm_dec=u.milliarcsecond/u.year))
# remove radial velocity component
covariance_matrix_mas = np.squeeze(covariance_matrix_mas)[0:5, 0:5]
if verbose:
print(covariance_matrix_mas)
print(np.diag(covariance_matrix_mas))
tbl_names = ['ra', 'dec', 'parallax', 'pmra', 'pmdec']
for colname in tbl_names:
print('{} = {}'.format(colname, getattr(gaia_star, colname)))
err_colname = '{}_error'.format(colname)
print('{} = {}'.format(err_colname, getattr(gaia_star, err_colname)))
# helper object to get PPM coefficients
T = Table()
T['MJD'] = [target_epoch.utc.mjd]
T['frame'] = 1
T['OB'] = 1
iad = pystrometry.ImagingAstrometryData(T)
iad.RA_deg = gaia_star.ra.to(u.deg).value
iad.Dec_deg = gaia_star.dec.to(u.deg).value
# this step depends on the observer when computing parallax factors
# set reference epoch properly
# https://gea.esac.esa.int/archive/documentation/GDR2/Gaia_archive/chap_datamodel/sec_dm_main_tables/ssec_dm_gaia_source.html
# ref_epoch : Reference epoch (double, Time[Julian Years])
# Reference epoch to which the astrometric source parameters are referred, expressed as a Julian Year in TCB.
# At DR2 this reference epoch is always J2015.5 but in future releases this will be different and not necessarily the same for all sources.
iad.set_five_parameter_coefficients(verbose=False, overwrite=False,
reference_epoch_MJD=Time(gaia_star.ref_epoch[0], format='jyear', scale='tcb').utc.mjd)
if verbose:
print(iad.five_parameter_coefficients_table)
print(iad.five_parameter_coefficients_array)
if ignore_parallax:
gaia_star.parallax = 0. * u.arcsec
delta_ppm_array = np.array([0., 0.,
gaia_star.parallax.to(u.deg).value[0],
gaia_star.pmra.to(u.deg/u.year).value[0],
gaia_star.pmdec.to(u.deg/u.year).value[0]])
[delta_rastar_at_epoch_deg, delta_dec_at_epoch_deg] = np.dot(iad.five_parameter_coefficients_array.T, delta_ppm_array)
dec_at_epoch_deg = gaia_star.dec.to(u.deg).value + delta_dec_at_epoch_deg
if 0:
cos_delta_factor = np.cos(np.deg2rad(gaia_star.dec.to(u.deg).value))
else:
# this is the way simbad is doing it
cos_delta_factor = np.cos(np.deg2rad(dec_at_epoch_deg))
ra_at_epoch_deg = gaia_star.ra.to(u.deg).value + delta_rastar_at_epoch_deg/cos_delta_factor
corrected_values = {}
for ii, jj in enumerate(iad.observing_1D_xi):
prediction_vector = iad.five_parameter_coefficients_array.T[jj]
prediction_uncertainty_x = np.sqrt(
np.dot(np.dot(prediction_vector, covariance_matrix_mas), prediction_vector))
prediction_vector_y = iad.five_parameter_coefficients_array.T[jj + 1]
prediction_uncertainty_y = np.sqrt(
np.dot(np.dot(prediction_vector_y, covariance_matrix_mas), prediction_vector_y))
if verbose:
print(
'{}: (COV) offset and uncertainty in RA : {:3.12f} +/- {:3.12f} mas '.format(
target_epoch.utc.isot, ra_at_epoch_deg, prediction_uncertainty_x))
print(
'{}: (COV) offset and uncertainty in Dec: {:3.12f} +/- {:3.12f} mas '.format(
target_epoch.utc.isot, dec_at_epoch_deg, prediction_uncertainty_y))
corrected_values['ra'] = ra_at_epoch_deg
corrected_values['dec'] = dec_at_epoch_deg
corrected_values['ra_error'] = prediction_uncertainty_x
corrected_values['dec_error'] = prediction_uncertainty_y
for attribute_name in 'ra dec ra_error dec_error'.split():
gaia_table['{}_original_{}'.format(attribute_name, gaia_star.ref_epoch[0].value)][i] = \
gaia_table[attribute_name][i]
gaia_table['{}_{:3.1f}'.format(attribute_name, target_epoch.utc.jyear)][i] = \
corrected_values[attribute_name]
gaia_table['{}'.format(attribute_name)][i] = \
gaia_table['{}_{:3.1f}'.format(attribute_name, target_epoch.utc.jyear)][i]
if verbose:
print(
'Replacing {}={} by proper motion and parallax corrected value of {}'.format(
attribute_name,
gaia_table['{}_{}'.format(attribute_name, gaia_star.ref_epoch[0].value)][i],
gaia_table['{}'.format(attribute_name)][i]))
return gaia_table
| 48.434483
| 151
| 0.587641
|
691a56fd45b886e37365f6c39c22950ead2196ec
| 9,933
|
py
|
Python
|
Poker.py
|
guptaronav/python-projects
|
b70cf63ed5573c5328e7a9c3a1c90d7641044f43
|
[
"MIT"
] | null | null | null |
Poker.py
|
guptaronav/python-projects
|
b70cf63ed5573c5328e7a9c3a1c90d7641044f43
|
[
"MIT"
] | null | null | null |
Poker.py
|
guptaronav/python-projects
|
b70cf63ed5573c5328e7a9c3a1c90d7641044f43
|
[
"MIT"
] | null | null | null |
import random
import time
from collections import Counter
done = 'false'
#here is the animation
def animate():
Count=0
global done
print('loading… |',end="")
while done == 'false':
time.sleep(0.1)
print('/',end="")
time.sleep(0.1)
print('-',end="")
time.sleep(0.1)
print('\\',end="")
time.sleep(0.1)
Count+=1
if Count==10:
done='true'
print()
print('Done!')
animate()
done = 'false'
Card_Deck=[2,3,4,5,6,7,8,9,10,'J','Q','K','A']
Suits=['♠','♣︎','♥︎','♦']
Deck=['2 ♠','3 ♠','4 ♠','5 ♠','6 ♠','7 ♠','8 ♠','9 ♠','10 ♠','J ♠','Q ♠','K ♠','A ♠',
'2 ♣︎','3 ♣︎','4 ♣︎','5 ♣︎','6 ♣︎','7 ♣︎','8 ♣︎','9 ♣︎','10 ♣︎','J ♣︎','Q ♣︎','K ♣︎','A ♣︎',
'2 ♥︎','3 ♥︎','4 ♥︎︎','5 ♥︎','6 ♥︎','7 ♥︎︎','8 ︎♥︎','9 ♥︎︎','10 ♥︎','J ♥︎','Q ♥︎','K ♥︎','A ♥︎',
'2 ♦︎','3 ♦︎','4 ♦︎︎','5 ♦︎','6 ♦︎','7 ♦︎︎','8 ︎♦','9 ♦','10 ♦︎','J ♦︎','Q ♦','K ♦','A ♦']
Deck_Value=[1,2,3,4,5,6,7,8,9,10,11,12,13,
1,2,3,4,5,6,7,8,9,10,11,12,13,
1,2,3,4,5,6,7,8,9,10,11,12,13,
1,2,3,4,5,6,7,8,9,10,11,12,13]
Spades=[0,1,2,3,4,5,6,7,8,9,10,11,12]
Clubs=[13,14,15,16,17,18,19,20,21,22,23,24,25]
Hearts=[26,27,28,29,30,31,32,33,34,35,36,37,38]
Diamonds=[39,40,41,42,43,44,45,46,47,48,49,50,51]
Aces=[12,25,38,51]
Used_Cards=[]
Stats={}
def deal():
A=random.randint(0,51)
if A not in Used_Cards:
Used_Cards.append(A)
return A
else:
return deal()
def Draw_Five():
A=deal()
B=deal()
C=deal()
D=deal()
E=deal()
Cards_in_Hand=[A,B,C,D,E]
return Cards_in_Hand
def Compare(A,B):
if Deck_Value[A]>Deck_Value[B]:
return 1
elif Deck_Value[A]<Deck_Value[B]:
return -1
else:
return 0
def Is_Straight(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Card_Value.sort()
if Card_Value[0]+1==Card_Value[1] and Card_Value[1]+1==Card_Value[2] and Card_Value[2]+1==Card_Value[3] and Card_Value[3]+1==Card_Value[4]:
return True
elif Card_Value[4] in Aces:
if Card_Value[4]-12==Card_Value[0] and Card_Value[0]+1==Card_Value[1] and Card_Value[1]+1==Card_Value[2] and Card_Value[2]+1==Card_Value[3]:
return True
else:
return False
else:
return False
def Print_Cards(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck[i])
print(Card_Value)
def Is_Flush(Cards):
return all(item in Spades for item in Cards) or all(item in Clubs for item in Cards) or all(item in Hearts for item in Cards) or all(item in Diamonds for item in Cards)
def Is_Straight_Flush(Cards):
return Is_Straight(Cards) and Is_Flush(Cards)
def Is_Royal_Flush(Cards):
Cards.sort(reverse=1)
return Cards[0] in Aces and Is_Straight_Flush(Cards)
def OAK(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
return max(Counter(Card_Value).values())
def Get_MRC(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Values=list(Counter(Card_Value).values())
Keys=list(Counter(Card_Value).keys())
Max_Value_Index=Values.index(max(Values))
return Keys[Max_Value_Index]
#GET Top Two Repeat Cards
def Get_TTRC(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Values=list(Counter(Card_Value).values())
Keys=list(Counter(Card_Value).keys())
if 1 in Values:
Min_Value_Index=Values.index(1)
Keys.pop(Min_Value_Index)
return Keys
def Is_Four_of_a_Kind(Cards):
return OAK(Cards)==4
def Is_Three_of_a_Kind(Cards):
return OAK(Cards)==3
def Is_One_Pair(Cards):
return OAK(Cards)==2
def Is_Two_Pair(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
return not Is_Three_of_a_Kind(Cards) and len(Counter(Card_Value).keys())==3
def Is_Full_House(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
return len(Counter(Card_Value).keys())==2 and Is_Three_of_a_Kind(Cards)
def Get_High_Card(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Card_Value.sort(reverse=1)
return Card_Value[0]
def Get_2nd_High_Card(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Card_Value.sort(reverse=1)
return Card_Value[1]
def Get_3rd_High_Card(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Card_Value.sort(reverse=1)
return Card_Value[2]
def Get_4th_High_Card(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Card_Value.sort(reverse=1)
return Card_Value[3]
def Get_5th_High_Card(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Card_Value.sort(reverse=1)
return Card_Value[4]
def Play(Name):
Result=10
Cards=Draw_Five()
#Cards=[0,13,2,15,25]
print("Drawing Cards for",Name+"…")
time.sleep(2.5)
Print_Cards(Cards)
if Is_Royal_Flush(Cards):
Result=1
print("You got a Royal Flush and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Straight_Flush(Cards):
Result=2
print("You got a Straight Flush and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Four_of_a_Kind(Cards):
Result=3
print("You got a Four of a Kind of",Card_Deck[Get_MRC(Cards)-1],"and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Full_House(Cards):
Result=4
RepeatCards=[]
for dv in Get_TTRC(Cards):
RepeatCards.append(Card_Deck[dv-1])
print("You got a Full House",RepeatCards,"and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Flush(Cards):
Result=5
print("You got a Flush and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Straight(Cards):
Result=6
print("You got a Straight and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Three_of_a_Kind(Cards):
Result=7
print("You got a Three of a Kind of",Card_Deck[Get_MRC(Cards)-1],"and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Two_Pair(Cards):
Result=8
RepeatCards=[]
for dv in Get_TTRC(Cards):
RepeatCards.append(Card_Deck[dv-1])
print("You got Two Pairs",RepeatCards,"and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_One_Pair(Cards):
Result=9
print("You got a Pair of",Card_Deck[Get_MRC(Cards)-1],"and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
else:
print("You got a High Card!", Card_Deck[Get_High_Card(Cards)-1])
#print("Your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
Result_Array=[Get_High_Card(Cards),Get_2nd_High_Card(Cards),Get_3rd_High_Card(Cards),Get_4th_High_Card(Cards),Get_5th_High_Card(Cards)]
return Cards,Result,Result_Array,Get_MRC(Cards)
def declare_winner(P1_Name,P1_Score,P2_Name,P2_Score):
if P1_Score>P2_Score:
Stats[P1_Name]+=1
print(P1_Name,"Wins!")
elif P1_Score<P2_Score:
Stats[P2_Name]+=1
print(P2_Name,"Wins!")
def breaktie(P1_Name,P1_Result_Array,P2_Name,P2_Result_Array,idx):
if P1_Result_Array[idx]==P2_Result_Array[idx]:
if idx==4:
Stats[P2]+=0.5
Stats[P1]+=0.5
print(P1_Name,"and",P2_Name,"have tied. It's a draw!")
else:
breaktie(P1_Name,P1_Result_Array,P2_Name,P2_Result_Array,idx+1)
else:
declare_winner(P1_Name,P1_Result_Array[idx],P2_Name,P2_Result_Array[idx])
def Check_High_Card(P1,P1_Result_Array,P2,P2_Result_Array):
if P1_Result_Array[0]==P2_Result_Array[0]:
breaktie(P1,P1_Result_Array,P2,P2_Result_Array,1)
else:
declare_winner(P1,P1_Result_Array[0],P2,P2_Result_Array[0])
def Start_Game(P1,P2,Game_Number):
print("______________________________________________")
input(P1 + ", Hit Enter when Ready ")
(P1_Cards,P1_Result,P1_Result_Array,P1_MRC)=Play(P1)
for i in range(1,3,1):
print()
input(P2 + ", Hit Enter when Ready ")
(P2_Cards,P2_Result,P2_Result_Array,P2_MRC)=Play(P2)
for i in range(1,3,1):
print()
#comparing results to find a winner
if P1_Result==P2_Result:
if P1_Result in [3,4,7,9]:
if P1_MRC>P2_MRC:
Stats[P1]+=1
print(P1,"Wins!")
elif P1_MRC<P2_MRC:
Stats[P2]+=1
print(P2,"Wins!")
else:
Check_High_Card(P1,P1_Result_Array,P2,P2_Result_Array)
elif P1_Result==8:
#both players have 2 pairs
P1_TTRC=Get_TTRC(P1_Cards)
P2_TTRC=Get_TTRC(P2_Cards)
if P1_TTRC[0]>P2_TTRC[0] and P1_TTRC[0]>P2_TTRC[1]:
Stats[P1]+=1
print(P1,"Wins!")
elif P1_TTRC[1]>P2_TTRC[0] and P1_TTRC[0]>P2_TTRC[1]:
Stats[P1]+=1
print(P1,"Wins!")
elif P2_TTRC[0]>P1_TTRC[0] and P2_TTRC[0]>P1_TTRC[1]:
Stats[P2]+=1
print(P2,"Wins!")
elif P2_TTRC[1]>P1_TTRC[0] and P2_TTRC[0]>P1_TTRC[1]:
Stats[P2]+=1
print(P2,"Wins!")
else:
Check_High_Card(P1,P1_Result_Array,P2,P2_Result_Array)
else:
Check_High_Card(P1,P1_Result_Array,P2,P2_Result_Array)
elif P1_Result>P2_Result:
Stats[P2]+=1
print(P2,"Wins!")
elif P1_Result<P2_Result:
Stats[P1]+=1
print(P1,"Wins!")
print("Current Stats:",Stats)
print("______________________________________________")
Continue=input("Would You Like to Play Again? ")
if "n" not in Continue and "N" not in Continue:
print("Ok, Starting Game",Game_Number+1)
if len(Used_Cards)>42:
print("Our Virtual Deck has ran out of cards. Shuffling…")
time.sleep(1.5)
print("Deck Incoming!")
Used_Cards.clear()
Start_Game(P1,P2,Game_Number+1)
else:
print("Thank You for Playing Poker Online: Multiplayer (Single Deck Edition)!")
print("Welcome To Poker Online: Multiplayer (Single Deck Edition)!")
print()
P1=input("Player 1, Please Enter Your Name: ")
P2=input("Player 2, Please Enter Your Name: ")
Stats[P1]=0
Stats[P2]=0
Start_Game(P1,P2,1)
| 30.191489
| 170
| 0.651868
|
c53167d083904bf31907c498b55592c8704ad153
| 766
|
py
|
Python
|
SLMtools/calculators/bramson.py
|
adlhancock/SLMtools
|
ad4ad9efbadd37a68d9e48cbd7397124bdde3d2d
|
[
"MIT"
] | 1
|
2019-03-24T10:48:55.000Z
|
2019-03-24T10:48:55.000Z
|
SLMtools/calculators/bramson.py
|
chrisheinze/SLMtools
|
ad4ad9efbadd37a68d9e48cbd7397124bdde3d2d
|
[
"MIT"
] | 1
|
2020-11-28T15:50:55.000Z
|
2020-11-29T16:55:28.000Z
|
SLMtools/calculators/bramson.py
|
chrisheinze/SLMtools
|
ad4ad9efbadd37a68d9e48cbd7397124bdde3d2d
|
[
"MIT"
] | 1
|
2019-02-18T17:27:41.000Z
|
2019-02-18T17:27:41.000Z
|
# -*- coding: utf-8 -*-
""" bramson laser coupling calculation tools.
Created on Fri Oct 28 15:34:53 2016
@author: dhancock
"""
def absorptivity(rho,wavelength=1070e-9):
"""
calculates absorbtivity based on resistivity and wavelength
"""
A = rho/wavelength
return 0.365*(A)**0.5 - 0.0667*(A)+(A**3)**0.5
def resistivity(rho,T,factor,T0=293):
"""
calculates temperature dependent resistivity
"""
return rho*(1 + (T-T0)*factor)
if __name__ == '__main__':
materials = ['W','Ta','Mo']
resistivities = [resistivity(rho,293,factor) for rho,factor in zip([5.6e-8,5.2e-8,53.4e-9],[4.5e-3,0,0])]
for material,rho in zip(materials, resistivities):
print(material,' = ',absorptivity(rho))
| 25.533333
| 109
| 0.617493
|
ff092983aef85bf478e31bbfa75a87727c45c861
| 3,923
|
py
|
Python
|
custom_components/xiaomi_cloud_map_extractor/xiaomi_vacuum_map_parser/xiaomi/image_handler.py
|
licheng5625/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
d1af6a71e8287c6f3f4002367146d42451efa1cb
|
[
"MIT"
] | null | null | null |
custom_components/xiaomi_cloud_map_extractor/xiaomi_vacuum_map_parser/xiaomi/image_handler.py
|
licheng5625/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
d1af6a71e8287c6f3f4002367146d42451efa1cb
|
[
"MIT"
] | null | null | null |
custom_components/xiaomi_cloud_map_extractor/xiaomi_vacuum_map_parser/xiaomi/image_handler.py
|
licheng5625/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
d1af6a71e8287c6f3f4002367146d42451efa1cb
|
[
"MIT"
] | null | null | null |
import logging
from typing import Tuple
from PIL import Image
from PIL.Image import Image as ImageType
from ..common.image_handler import ImageHandler
from ..const import *
_LOGGER = logging.getLogger(__name__)
class ImageHandlerXiaomi(ImageHandler):
MAP_OUTSIDE = 0x00
MAP_WALL = 0x01
MAP_INSIDE = 0xFF
MAP_SCAN = 0x07
@staticmethod
def parse(raw_data: bytes, width, height, colors, image_config) -> Tuple[ImageType, dict]:
rooms = {}
scale = image_config[CONF_SCALE]
trim_left = int(image_config[CONF_TRIM][CONF_LEFT] * width / 100)
trim_right = int(image_config[CONF_TRIM][CONF_RIGHT] * width / 100)
trim_top = int(image_config[CONF_TRIM][CONF_TOP] * height / 100)
trim_bottom = int(image_config[CONF_TRIM][CONF_BOTTOM] * height / 100)
trimmed_height = height - trim_top - trim_bottom
trimmed_width = width - trim_left - trim_right
image = Image.new('RGBA', (trimmed_width, trimmed_height))
if width == 0 or height == 0:
return ImageHandler.create_empty_map_image(colors), {}
pixels = image.load()
for img_y in range(trimmed_height):
for img_x in range(trimmed_width):
pixel_type = raw_data[img_x + trim_left + width * (img_y + trim_bottom)]
x = img_x
y = trimmed_height - img_y - 1
if pixel_type == ImageHandlerXiaomi.MAP_OUTSIDE:
pixels[x, y] = ImageHandler.__get_color__(COLOR_MAP_OUTSIDE, colors)
elif pixel_type == ImageHandlerXiaomi.MAP_WALL:
pixels[x, y] = ImageHandler.__get_color__(COLOR_MAP_WALL, colors)
elif pixel_type == ImageHandlerXiaomi.MAP_INSIDE:
pixels[x, y] = ImageHandler.__get_color__(COLOR_MAP_INSIDE, colors)
elif pixel_type == ImageHandlerXiaomi.MAP_SCAN:
pixels[x, y] = ImageHandler.__get_color__(COLOR_SCAN, colors)
else:
obstacle = pixel_type & 0x07
if obstacle == 0:
pixels[x, y] = ImageHandler.__get_color__(COLOR_GREY_WALL, colors)
elif obstacle == 1:
pixels[x, y] = ImageHandler.__get_color__(COLOR_MAP_WALL_V2, colors)
elif obstacle == 7:
room_number = (pixel_type & 0xFF) >> 3
room_x = img_x + trim_left
room_y = img_y + trim_bottom
if room_number not in rooms:
rooms[room_number] = (room_x, room_y, room_x, room_y)
else:
rooms[room_number] = (min(rooms[room_number][0], room_x),
min(rooms[room_number][1], room_y),
max(rooms[room_number][2], room_x),
max(rooms[room_number][3], room_y))
default = ImageHandler.ROOM_COLORS[room_number >> 1]
pixels[x, y] = ImageHandler.__get_color__(f"{COLOR_ROOM_PREFIX}{room_number}", colors, default)
else:
pixels[x, y] = ImageHandler.__get_color__(COLOR_UNKNOWN, colors)
if image_config["scale"] != 1 and width != 0 and height != 0:
image = image.resize((int(trimmed_width * scale), int(trimmed_height * scale)), resample=Image.NEAREST)
return image, rooms
@staticmethod
def get_room_at_pixel(raw_data: bytes, width, x, y):
room_number = None
pixel_type = raw_data[x + width * y]
if pixel_type not in [ImageHandlerXiaomi.MAP_INSIDE, ImageHandlerXiaomi.MAP_SCAN]:
if pixel_type & 0x07 == 7:
room_number = (pixel_type & 0xFF) >> 3
return room_number
| 49.658228
| 119
| 0.573031
|
7dc71effbea78afd3ec1020f72f7db166782ff23
| 4,356
|
py
|
Python
|
train.py
|
spectrtrec/siim_pneumothorax
|
3b16ae482c674c5baf702e64a7c6e19ca9aa71d3
|
[
"MIT"
] | null | null | null |
train.py
|
spectrtrec/siim_pneumothorax
|
3b16ae482c674c5baf702e64a7c6e19ca9aa71d3
|
[
"MIT"
] | null | null | null |
train.py
|
spectrtrec/siim_pneumothorax
|
3b16ae482c674c5baf702e64a7c6e19ca9aa71d3
|
[
"MIT"
] | null | null | null |
import argparse
import os
import sys
import pandas as pd
from keras import backend as K
from keras_radam import RAdam
from augmentations import *
from losses import *
from model import *
from siim_data_loader import *
from utils import *
from segmentation_models import Unet
parser = argparse.ArgumentParser()
parser.add_argument("--network", default="UEfficientNetV2")
parser.add_argument("--fine_size", default=512, type=int, help="Resized image size")
parser.add_argument("--batch_size", default=5, type=int, help="Batch size for training")
parser.add_argument("--train_path", default="pneumotorax512/train/", help="train path")
parser.add_argument("--masks_path", default="pneumotorax512/masks/", help="mask path")
parser.add_argument("--test_path", default="pneumotorax512/test/", help="test path")
parser.add_argument("--pretrain_weights", help="pretrain weights")
parser.add_argument("--epoch", default=30, type=int, help="Number of training epochs")
parser.add_argument("--swa_epoch", default=15, type=int, help="Number of swa epochs")
parser.add_argument("--debug", default=False, type=bool, help="Debug")
args = parser.parse_args()
def train(
list_train,
list_valid,
train_path,
masks_path,
model,
epoch,
batch_size,
fold,
imh_size,
swa_epoch,
):
swa = SWA(f"models/keras_swa_{fold}.model", swa_epoch)
snapshot = SnapshotCallbackBuilder(swa, epoch, 1, fold)
training_generator = DataGenerator(
list_train, train_path, masks_path, AUGMENTATIONS_TRAIN, batch_size, imh_size
)
validation_generator = DataGenerator(
list_valid, train_path, masks_path, AUGMENTATIONS_TEST, batch_size, imh_size
)
history = model.fit_generator(
generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=epoch,
verbose=2,
callbacks=snapshot.get_callbacks(),
)
if __name__ == "__main__":
debug = args.debug
df = pd.read_csv("train_proc_v2_gr.csv")
test_files = [
os.path.splitext(filename)[0]
for filename in os.listdir(os.path.join(os.getcwd(), args.test_path))
]
df_test = pd.DataFrame(test_files, columns=["id"])
epoch = args.epoch
list_thresh = []
for fold in [0]:
print("-----fold-----")
df_train = df[df.fold != fold].copy().reset_index(drop=True)
df_valid = df[df.fold == fold].copy().reset_index(drop=True)
if debug:
df_train = df[df.fold != 0].copy().reset_index(drop=True)
df_valid = df[df.fold == 0].copy().reset_index(drop=True)
df_train = df_train.iloc[:60]
df_valid = df_train.iloc[:60]
df_test = df_test.iloc[:60]
epoch = 3
K.clear_session()
model = get_network(
args.network, input_shape=(args.fine_size, args.fine_size, 3), drop_out=0.5
)
model.compile(loss=bce_dice_loss, optimizer="adam", metrics=[my_iou_metric])
train(
df_train["id"].values,
df_valid["id"].values,
args.train_path,
args.masks_path,
model,
epoch,
args.batch_size,
fold,
args.fine_size,
args.swa_epoch,
)
try:
print("using swa weight model")
model.load_weights(f"models/keras_swa_{fold}.model")
except Exception as e:
print(e)
model.load_weights(f"models/keras_{fold}.model")
val_predict = predict_validation_result(
model,
args.train_path,
args.masks_path,
df_valid["id"].values,
args.batch_size,
args.fine_size,
)
best_threshhold = prderict_best_threshhold(
df_valid["id"].values, args.masks_path, val_predict, args.fine_size
)
list_thresh.append(best_threshhold)
predict = predict_result(
model,
df_test["id"].values,
args.test_path,
args.fine_size,
best_threshhold,
args.batch_size,
fold,
)
if fold == 0:
preds_test = predict
else:
preds_test += predict
submit(preds_test, df_test["id"].values, args.network, max(list_thresh))
| 33
| 88
| 0.626263
|
6727135b96adbe92393cbe8c06cd7f9d3da98b9c
| 2,454
|
py
|
Python
|
train.py
|
sukijelly/wav2vec2_finetune
|
e89dbe44f126ce161a13a5529cdd4b70bf8d3af2
|
[
"Apache-2.0"
] | 33
|
2021-12-27T05:15:37.000Z
|
2022-01-09T23:02:18.000Z
|
train.py
|
sukijelly/wav2vec2_finetune
|
e89dbe44f126ce161a13a5529cdd4b70bf8d3af2
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
sukijelly/wav2vec2_finetune
|
e89dbe44f126ce161a13a5529cdd4b70bf8d3af2
|
[
"Apache-2.0"
] | 2
|
2022-02-22T00:58:31.000Z
|
2022-03-12T13:39:19.000Z
|
import warnings
import numpy as np
import yaml
from transformers import Trainer, TrainingArguments, Wav2Vec2ForCTC
from dataset import dataset
from utils import DataCollatorCTCWithPadding, compute_metrics
def train():
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
with open("config_train.yml") as f:
args = yaml.load(f, Loader=yaml.FullLoader)
dataset_train, dataset_test, processor = dataset(args)
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)
model = Wav2Vec2ForCTC.from_pretrained(
args["pretrained_checkpoint_dir"],
attention_dropout=args["attention_dropout"],
hidden_dropout=args["hidden_dropout"],
feat_proj_dropout=args["feat_proj_dropout"],
mask_time_prob=args["mask_time_prob"],
layerdrop=args["layerdrop"],
# gradient_checkpointing=args["gradient_checkpointing"],
ctc_loss_reduction=args["ctc_loss_reduction"],
pad_token_id=processor.tokenizer.pad_token_id,
vocab_size=len(processor.tokenizer),
)
model.freeze_feature_extractor()
print("-------load_pretrained_model_done----------")
training_args = TrainingArguments(
output_dir=args["checkpoint_dir"],
group_by_length=args["group_by_length"],
per_device_train_batch_size=args["batch_size"],
per_device_eval_batch_size=args["batch_size"],
gradient_accumulation_steps=args["gradient_accumulation_steps"],
evaluation_strategy=args["evaluation_strategy"],
num_train_epochs=args["num_train_epochs"],
fp16=args["fp16"],
save_steps=args["save_steps"],
eval_steps=args["eval_steps"],
logging_steps=args["logging_steps"],
weight_decay=args["weight_decay"],
learning_rate=args["learning_rate"],
warmup_steps=args["warmup_steps"],
save_total_limit=args["save_total_limit"],
dataloader_num_workers=args["dataloader_num_workers"],
)
print("-------train_ready_done---------")
trainer = Trainer(
model=model,
data_collator=data_collator,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=dataset_train,
eval_dataset=dataset_test,
tokenizer=processor.feature_extractor,
)
print("-------training_start!---------")
trainer.train()
| 35.057143
| 81
| 0.700081
|
2b5a02bdc1b0c49e9fe6ddf3fdd58d65b6735225
| 3,523
|
py
|
Python
|
mmdet3d/core/post_processing/merge_augs.py
|
LJoson/mmdetection3d
|
ff159fe7ea2a45f9138487c1c4977d2f67676ebc
|
[
"Apache-2.0"
] | null | null | null |
mmdet3d/core/post_processing/merge_augs.py
|
LJoson/mmdetection3d
|
ff159fe7ea2a45f9138487c1c4977d2f67676ebc
|
[
"Apache-2.0"
] | null | null | null |
mmdet3d/core/post_processing/merge_augs.py
|
LJoson/mmdetection3d
|
ff159fe7ea2a45f9138487c1c4977d2f67676ebc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.ops import nms_bev as nms_gpu
from mmcv.ops import nms_normal_bev as nms_normal_gpu
from ..bbox import bbox3d2result, bbox3d_mapping_back, xywhr2xyxyr
def merge_aug_bboxes_3d(aug_results, img_metas, test_cfg):
"""Merge augmented detection 3D bboxes and scores.
Args:
aug_results (list[dict]): The dict of detection results.
The dict contains the following keys
- boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox.
- scores_3d (torch.Tensor): Detection scores.
- labels_3d (torch.Tensor): Predicted box labels.
img_metas (list[dict]): Meta information of each sample.
test_cfg (dict): Test config.
Returns:
dict: Bounding boxes results in cpu mode, containing merged results.
- boxes_3d (:obj:`BaseInstance3DBoxes`): Merged detection bbox.
- scores_3d (torch.Tensor): Merged detection scores.
- labels_3d (torch.Tensor): Merged predicted box labels.
"""
assert len(aug_results) == len(img_metas), \
'"aug_results" should have the same length as "img_metas", got len(' \
f'aug_results)={len(aug_results)} and len(img_metas)={len(img_metas)}'
recovered_bboxes = []
recovered_scores = []
recovered_labels = []
for bboxes, img_info in zip(aug_results, img_metas):
scale_factor = img_info[0]['pcd_scale_factor']
pcd_horizontal_flip = img_info[0]['pcd_horizontal_flip']
pcd_vertical_flip = img_info[0]['pcd_vertical_flip']
recovered_scores.append(bboxes['scores_3d'])
recovered_labels.append(bboxes['labels_3d'])
bboxes = bbox3d_mapping_back(bboxes['boxes_3d'], scale_factor,
pcd_horizontal_flip, pcd_vertical_flip)
recovered_bboxes.append(bboxes)
aug_bboxes = recovered_bboxes[0].cat(recovered_bboxes)
aug_bboxes_for_nms = xywhr2xyxyr(aug_bboxes.bev)
aug_scores = torch.cat(recovered_scores, dim=0)
aug_labels = torch.cat(recovered_labels, dim=0)
# TODO: use a more elegent way to deal with nms
if test_cfg.use_rotate_nms:
nms_func = nms_gpu
else:
nms_func = nms_normal_gpu
merged_bboxes = []
merged_scores = []
merged_labels = []
# Apply multi-class nms when merge bboxes
if len(aug_labels) == 0:
return bbox3d2result(aug_bboxes, aug_scores, aug_labels)
for class_id in range(torch.max(aug_labels).item() + 1):
class_inds = (aug_labels == class_id)
bboxes_i = aug_bboxes[class_inds]
bboxes_nms_i = aug_bboxes_for_nms[class_inds, :]
scores_i = aug_scores[class_inds]
labels_i = aug_labels[class_inds]
if len(bboxes_nms_i) == 0:
continue
selected = nms_func(bboxes_nms_i, scores_i, test_cfg.nms_thr)
merged_bboxes.append(bboxes_i[selected, :])
merged_scores.append(scores_i[selected])
merged_labels.append(labels_i[selected])
merged_bboxes = merged_bboxes[0].cat(merged_bboxes)
merged_scores = torch.cat(merged_scores, dim=0)
merged_labels = torch.cat(merged_labels, dim=0)
_, order = merged_scores.sort(0, descending=True)
num = min(test_cfg.max_num, len(aug_bboxes))
order = order[:num]
merged_bboxes = merged_bboxes[order]
merged_scores = merged_scores[order]
merged_labels = merged_labels[order]
return bbox3d2result(merged_bboxes, merged_scores, merged_labels)
| 37.478723
| 78
| 0.682657
|
e4f758fa1796a61d9e4c9f3af860bd8c2e6ff3ae
| 1,412
|
py
|
Python
|
indico/modules/attachments/views.py
|
salevajo/indico
|
6f9cbabc20d1641caea907099388ae2b04965cf8
|
[
"MIT"
] | 1
|
2021-12-27T17:51:27.000Z
|
2021-12-27T17:51:27.000Z
|
indico/modules/attachments/views.py
|
salevajo/indico
|
6f9cbabc20d1641caea907099388ae2b04965cf8
|
[
"MIT"
] | 5
|
2021-04-08T19:26:47.000Z
|
2022-01-24T16:30:18.000Z
|
indico/modules/attachments/views.py
|
salevajo/indico
|
6f9cbabc20d1641caea907099388ae2b04965cf8
|
[
"MIT"
] | 2
|
2019-02-24T17:29:10.000Z
|
2021-04-08T19:23:27.000Z
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.modules.events.management.views import WPEventManagement
from indico.modules.events.views import WPConferenceDisplayBase, WPSimpleEventDisplayBase
from indico.web.views import WPJinjaMixin
class AttachmentsMixin(WPJinjaMixin):
template_prefix = 'attachments/'
base_wp = None
def _get_page_content(self, params):
return WPJinjaMixin._get_page_content(self, params)
class WPEventAttachments(AttachmentsMixin, WPEventManagement):
base_wp = WPEventManagement
sidemenu_option = 'attachments'
ALLOW_JSON = True
class WPEventFolderDisplay(WPSimpleEventDisplayBase, WPJinjaMixin):
template_prefix = 'attachments/'
def _get_body(self, params):
return WPJinjaMixin._get_page_content(self, params)
class WPPackageEventAttachmentsManagement(WPEventAttachments, WPJinjaMixin):
template_prefix = 'attachments/'
class WPPackageEventAttachmentsDisplayConference(WPConferenceDisplayBase):
template_prefix = 'attachments/'
class WPPackageEventAttachmentsDisplay(WPSimpleEventDisplayBase, WPJinjaMixin):
template_prefix = 'attachments/'
def _get_body(self, params):
return WPJinjaMixin._get_page_content(self, params)
| 30.042553
| 89
| 0.786827
|
57b902c6f3789efb75baca6ce357f2f08efbd6fc
| 8,684
|
py
|
Python
|
mindspore/explainer/explanation/_attribution/_perturbation/rise.py
|
233-puchi/mindspore
|
e9d2684cdb7668eac48169feeff778eeffbfa70e
|
[
"Apache-2.0"
] | null | null | null |
mindspore/explainer/explanation/_attribution/_perturbation/rise.py
|
233-puchi/mindspore
|
e9d2684cdb7668eac48169feeff778eeffbfa70e
|
[
"Apache-2.0"
] | null | null | null |
mindspore/explainer/explanation/_attribution/_perturbation/rise.py
|
233-puchi/mindspore
|
e9d2684cdb7668eac48169feeff778eeffbfa70e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""RISE."""
import math
import numpy as np
from mindspore import Tensor
from mindspore.train._utils import check_value_type
from .perturbation import PerturbationAttribution
from .... import _operators as op
from ...._utils import resize
class RISE(PerturbationAttribution):
r"""
RISE: Randomized Input Sampling for Explanation of Black-box Model.
RISE is a perturbation-based method that generates attribution maps by sampling on multiple random binary masks.
The original image is randomly masked, and then fed into the black-box model to get predictions. The final
attribution map is the weighted sum of these random masks, with the weights being the corresponding output on the
node of interest:
.. math::
attribution = \sum_{i}f_c(I\odot M_i) M_i
For more details, please refer to the original paper via: `RISE <https://arxiv.org/abs/1806.07421>`_.
Args:
network (Cell): The black-box model to be explained.
activation_fn (Cell): The activation layer that transforms logits to prediction probabilities. For
single label classification tasks, `nn.Softmax` is usually applied. As for multi-label classification
tasks, `nn.Sigmoid` is usually be applied. Users can also pass their own customized `activation_fn` as long
as when combining this function with network, the final output is the probability of the input.
perturbation_per_eval (int, optional): Number of perturbations for each inference during inferring the
perturbed samples. Within the memory capacity, usually the larger this number is, the faster the
explanation is obtained. Default: 32.
Inputs:
- **inputs** (Tensor) - The input data to be explained, a 4D tensor of shape :math:`(N, C, H, W)`.
- **targets** (Tensor, int) - The labels of interest to be explained. When `targets` is an integer,
all of the inputs will generates attribution map w.r.t this integer. When `targets` is a tensor, it
should be of shape :math:`(N, l)` (l being the number of labels for each sample) or :math:`(N,)` :math:`()`.
Outputs:
Tensor, a 4D tensor of shape :math:`(N, ?, H, W)`.
Examples:
>>> import numpy as np
>>> import mindspore as ms
>>> from mindspore.explainer.explanation import RISE
>>>
>>> # The detail of LeNet5 is shown in model_zoo.official.cv.lenet.src.lenet.py
>>> net = LeNet5(10, num_channel=3)
>>> # initialize RISE explainer with the pretrained model and activation function
>>> activation_fn = ms.nn.Softmax() # softmax layer is applied to transform logits to probabilities
>>> rise = RISE(net, activation_fn=activation_fn)
>>> # given an instance of RISE, saliency map can be generate
>>> inputs = ms.Tensor(np.random.rand(2, 3, 32, 32), ms.float32)
>>> # when `targets` is an integer
>>> targets = 5
>>> saliency = rise(inputs, targets)
>>> print(saliency.shape)
(2, 1, 32, 32)
>>> # `targets` can also be a 2D tensor
>>> targets = ms.Tensor([[5], [1]], ms.int32)
>>> saliency = rise(inputs, targets)
>>> print(saliency.shape)
(2, 1, 32, 32)
"""
def __init__(self,
network,
activation_fn,
perturbation_per_eval=32):
super(RISE, self).__init__(network, activation_fn, perturbation_per_eval)
self._num_masks = 6000 # number of masks to be sampled
self._mask_probability = 0.5 # ratio of inputs to be masked
self._down_sample_size = 10 # the original size of binary masks
self._resize_mode = 'bilinear' # mode choice to resize the down-sized binary masks to size of the inputs
self._perturbation_mode = 'constant' # setting the perturbed pixels to a constant value
self._base_value = 0 # setting the perturbed pixels to this constant value
self._num_classes = None # placeholder of self._num_classes just for future assignment in other methods
def _generate_masks(self, data, batch_size):
"""Generate a batch of binary masks for data."""
height, width = data.shape[2], data.shape[3]
mask_size = (self._down_sample_size, self._down_sample_size)
up_size = (height + mask_size[0], width + mask_size[1])
mask = np.random.random((batch_size, 1) + mask_size) < self._mask_probability
upsample = resize(op.Tensor(mask, data.dtype), up_size,
self._resize_mode).asnumpy()
shift_x = np.random.randint(0, mask_size[0] + 1, size=batch_size)
shift_y = np.random.randint(0, mask_size[1] + 1, size=batch_size)
masks = [sample[:, x_i: x_i + height, y_i: y_i + width] for sample, x_i, y_i
in zip(upsample, shift_x, shift_y)]
masks = Tensor(np.array(masks), data.dtype)
return masks
def __call__(self, inputs, targets):
"""Generates attribution maps for inputs."""
self._verify_data(inputs, targets)
height, width = inputs.shape[2], inputs.shape[3]
if self._num_classes is None:
self._num_classes = self.network(inputs).shape[1]
# Due to the unsupported Op of slice assignment, we use numpy array here
targets = self._unify_targets(inputs, targets)
attr_np = np.zeros(shape=(inputs.shape[0], targets.shape[1], height, width))
cal_times = math.ceil(self._num_masks / self._perturbation_per_eval)
for idx, data in enumerate(inputs):
bg_data = data * 0 + self._base_value
data = op.reshape(data, (1, -1, height, width))
for j in range(cal_times):
bs = min(self._num_masks - j * self._perturbation_per_eval,
self._perturbation_per_eval)
masks = self._generate_masks(data, bs)
weights = masks * data + (1 - masks) * bg_data
weights = self._activation_fn(self.network(weights))
while len(weights.shape) > 2:
weights = op.mean(weights, axis=2)
weights = np.expand_dims(np.expand_dims(weights.asnumpy()[:, targets[idx]], 2), 3)
attr_np[idx] += np.sum(weights * masks.asnumpy(), axis=0)
attr_np = attr_np / self._num_masks
return op.Tensor(attr_np, dtype=inputs.dtype)
@staticmethod
def _verify_data(inputs, targets):
"""Verify the validity of the parsed inputs."""
check_value_type('inputs', inputs, Tensor)
if len(inputs.shape) != 4:
raise ValueError(f'Argument inputs must be 4D Tensor, but got {len(inputs.shape)}D Tensor.')
check_value_type('targets', targets, (Tensor, int, tuple, list))
if isinstance(targets, Tensor):
if len(targets.shape) > 2:
raise ValueError('Dimension invalid. If `targets` is a Tensor, it should be 0D, 1D or 2D. '
'But got {}D.'.format(len(targets.shape)))
if targets.shape and len(targets) != len(inputs):
raise ValueError(
'If `targets` is a 2D, 1D Tensor, it should have the same length as inputs {}. But got {}.'.format(
len(inputs), len(targets)))
@staticmethod
def _unify_targets(inputs, targets):
"""To unify targets to be 2D numpy.ndarray."""
if isinstance(targets, int):
return np.array([[targets] for _ in inputs]).astype(np.int)
if isinstance(targets, Tensor):
if not targets.shape:
return np.array([[targets.asnumpy()] for _ in inputs]).astype(np.int)
if len(targets.shape) == 1:
return np.array([[t.asnumpy()] for t in targets]).astype(np.int)
if len(targets.shape) == 2:
return np.array([t.asnumpy() for t in targets]).astype(np.int)
return targets
| 47.195652
| 119
| 0.632427
|
97f739e5843130113d68738ab079dc90b7bb5f7c
| 999
|
py
|
Python
|
Lib/test/test_jython_initializer.py
|
jeff5/jython-whinchat
|
65d8e5268189f8197295ff2d91be3decb1ee0081
|
[
"CNRI-Jython"
] | 577
|
2020-06-04T16:34:44.000Z
|
2022-03-31T11:46:07.000Z
|
Lib/test/test_jython_initializer.py
|
jeff5/jython-whinchat
|
65d8e5268189f8197295ff2d91be3decb1ee0081
|
[
"CNRI-Jython"
] | 174
|
2015-01-08T20:37:09.000Z
|
2020-06-03T16:48:59.000Z
|
Lib/test/test_jython_initializer.py
|
jeff5/jython-whinchat
|
65d8e5268189f8197295ff2d91be3decb1ee0081
|
[
"CNRI-Jython"
] | 162
|
2015-02-07T02:14:38.000Z
|
2020-05-30T16:42:03.000Z
|
import os
import subprocess
import sys
import unittest
from test import test_support
WINDOWS = (os._name if test_support.is_jython else os.name) == 'nt'
class TestUsingInitializer(unittest.TestCase):
def test_syspath_initializer(self):
fn = test_support.findfile('check_for_initializer_in_syspath.py')
jar = test_support.findfile('syspath_initializer.jar')
env = dict(CLASSPATH=jar,
JAVA_HOME=sys.registry['java.home'],
PATH=os.environ.get('PATH', ''))
if WINDOWS:
# TMP is needed to give property java.io.tmpdir a sensible value
env['TMP'] = os.environ.get('TMP', '.')
# SystemRoot is needed to remote debug the subprocess JVM
env['SystemRoot'] = os.environ.get('SystemRoot', '')
self.assertEquals(0, subprocess.call([sys.executable, fn], env=env))
def test_main():
test_support.run_unittest(TestUsingInitializer)
if __name__ == "__main__":
test_main()
| 32.225806
| 76
| 0.660661
|
3029a3746b38534c2ebd74f22527c303a107e1a6
| 2,203
|
py
|
Python
|
ckan/views/dashboard.py
|
salsadigitalauorg/ckan
|
7586d78682c30f205027522214f33ee2bf413055
|
[
"BSD-3-Clause"
] | null | null | null |
ckan/views/dashboard.py
|
salsadigitalauorg/ckan
|
7586d78682c30f205027522214f33ee2bf413055
|
[
"BSD-3-Clause"
] | null | null | null |
ckan/views/dashboard.py
|
salsadigitalauorg/ckan
|
7586d78682c30f205027522214f33ee2bf413055
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
from __future__ import annotations
import logging
from typing import Any, cast
from flask import Blueprint
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.logic as logic
import ckan.model as model
from ckan.common import _, g
from ckan.views.user import _extra_template_variables
from ckan.types import Context
log = logging.getLogger(__name__)
dashboard = Blueprint(u'dashboard', __name__, url_prefix=u'/dashboard')
@dashboard.before_request
def before_request() -> None:
if not g.userobj:
h.flash_error(_(u'Not authorized to see this page'))
# flask types do not mention that it's possible to return a response
# from the `before_request` callback
return h.redirect_to(u'user.login') # type: ignore
try:
context = cast(Context, {
"model": model, "user": g.user, "auth_user_obj": g.userobj
})
logic.check_access(u'site_read', context)
except logic.NotAuthorized:
base.abort(403, _(u'Not authorized to see this page'))
return None
def datasets() -> str:
context: Context = {
u'for_view': True, u'user': g.user, u'auth_user_obj': g.userobj}
data_dict: dict[str, Any] = {
u'user_obj': g.userobj,
u'include_datasets': True}
extra_vars = _extra_template_variables(context, data_dict)
return base.render(u'user/dashboard_datasets.html', extra_vars)
def organizations() -> str:
context: Context = {
u'for_view': True, u'user': g.user, u'auth_user_obj': g.userobj}
data_dict = {u'user_obj': g.userobj}
extra_vars = _extra_template_variables(context, data_dict)
return base.render(u'user/dashboard_organizations.html', extra_vars)
def groups() -> str:
context: Context = {
u'for_view': True, u'user': g.user, u'auth_user_obj': g.userobj}
data_dict = {u'user_obj': g.userobj}
extra_vars = _extra_template_variables(context, data_dict)
return base.render(u'user/dashboard_groups.html', extra_vars)
dashboard.add_url_rule(u'/datasets', view_func=datasets)
dashboard.add_url_rule(u'/groups', view_func=groups)
dashboard.add_url_rule(u'/organizations', view_func=organizations)
| 31.471429
| 76
| 0.704494
|
0c0b72291b69e569685817f01fdef608359963ab
| 51,596
|
py
|
Python
|
bayesian/api_v1.py
|
arajkumar/fabric8-analytics-server
|
110767da04da00fefb7e36f9c28624f554835990
|
[
"Apache-2.0"
] | null | null | null |
bayesian/api_v1.py
|
arajkumar/fabric8-analytics-server
|
110767da04da00fefb7e36f9c28624f554835990
|
[
"Apache-2.0"
] | null | null | null |
bayesian/api_v1.py
|
arajkumar/fabric8-analytics-server
|
110767da04da00fefb7e36f9c28624f554835990
|
[
"Apache-2.0"
] | null | null | null |
"""Definition of all REST API endpoints of the server module."""
import datetime
import functools
import uuid
import re
import urllib
import tempfile
import json
import time
from collections import defaultdict
import botocore
from requests_futures.sessions import FuturesSession
from flask import Blueprint, current_app, request, url_for, Response, g
from flask.json import jsonify
from flask_restful import Api, Resource, reqparse
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.dialects.postgresql import insert
from selinon import StoragePool
from f8a_worker.models import (
Ecosystem, StackAnalysisRequest, RecommendationFeedback)
from f8a_worker.utils import (MavenCoordinates, case_sensitivity_transform)
from f8a_worker.manifests import get_manifest_descriptor_by_filename
from . import rdb, cache
from .dependency_finder import DependencyFinder
from fabric8a_auth.auth import login_required
from .auth import get_access_token
from .exceptions import HTTPError
from .utils import (get_system_version, retrieve_worker_result,
server_create_component_bookkeeping, GraphAnalyses,
server_create_analysis, get_analyses_from_graph,
search_packages_from_graph, fetch_file_from_github_release,
get_item_from_list_by_key_value, RecommendationReason,
retrieve_worker_results, get_next_component_from_graph, set_tags_to_component,
is_valid, select_latest_version, get_categories_data, get_core_dependencies,
create_directory_structure, push_repo, get_booster_core_repo,
get_recommendation_feedback_by_ecosystem, CveByDateEcosystemUtils,
server_run_flow, resolved_files_exist, fetch_sa_request, request_timed_out,
get_ecosystem_from_manifest, check_for_accepted_ecosystem)
from .license_extractor import extract_licenses
from .manifest_models import MavenPom
import os
from f8a_worker.storages import AmazonS3
from .generate_manifest import PomXMLTemplate
from .default_config import COMPONENT_ANALYSES_LIMIT
from fabric8a_auth.errors import AuthError
# TODO: improve maintainability index
# TODO: https://github.com/fabric8-analytics/fabric8-analytics-server/issues/373
errors = {
'AuthError': {
'status': 401,
'message': 'Authentication failed',
'some_description': 'Authentication failed'
}}
api_v1 = Blueprint('api_v1', __name__, url_prefix='/api/v1')
rest_api_v1 = Api(api_v1, errors=errors)
pagination_parser = reqparse.RequestParser()
pagination_parser.add_argument('page', type=int, default=0)
pagination_parser.add_argument('per_page', type=int, default=50)
ANALYSIS_ACCESS_COUNT_KEY = 'access_count'
TOTAL_COUNT_KEY = 'total_count'
ANALYTICS_API_VERSION = "v1.0"
HOSTNAME = os.environ.get('HOSTNAME', 'bayesian-api')
METRICS_SERVICE_URL = "http://{}:{}".format(
os.environ.get('METRICS_ACCUMULATOR_HOST', 'metrics-accumulator'),
os.environ.get('METRICS_ACCUMULATOR_PORT', '5200')
)
worker_count = int(os.getenv('FUTURES_SESSION_WORKER_COUNT', '100'))
_session = FuturesSession(max_workers=worker_count)
@api_v1.route('/_error')
def error():
"""Implement the endpoint used by httpd, which redirects its errors to it."""
try:
status = int(request.environ['REDIRECT_STATUS'])
except Exception:
# if there's an exception, it means that a client accessed this directly;
# in this case, we want to make it look like the endpoint is not here
return api_404_handler()
msg = 'Unknown error'
# for now, we just provide specific error for stuff that already happened;
# before adding more, I'd like to see them actually happening with reproducers
if status == 401:
msg = 'Authentication failed'
elif status == 405:
msg = 'Method not allowed for this endpoint'
raise HTTPError(status, msg)
@api_v1.route('/readiness')
def readiness():
"""Handle the /readiness REST API call."""
return jsonify({}), 200
@api_v1.route('/liveness')
def liveness():
"""Handle the /liveness REST API call."""
# Check database connection
current_app.logger.debug("Liveness probe - trying to connect to database "
"and execute a query")
rdb.session.query(Ecosystem).count()
return jsonify({}), 200
def get_item_skip(page, per_page):
"""Get the number of items to skip for the first page-1 pages."""
return per_page * page
def get_item_relative_limit(page, per_page):
"""Get the maximum possible number of items on one page."""
return per_page
def get_item_absolute_limit(page, per_page):
"""Get the total possible number of items."""
return per_page * (page + 1)
def get_items_for_page(items, page, per_page):
"""Get all items for specified page and number of items to be used per page."""
return items[get_item_skip(page, per_page):get_item_absolute_limit(page, per_page)]
# TODO: do we really need paginated output?
def paginated(func):
"""Provide paginated output for longer responses."""
@functools.wraps(func)
def inner(*args, **kwargs):
func_res = func(*args, **kwargs)
res, code, headers = func_res, 200, {}
# TODO: please explain the logic for the code below:
if isinstance(res, tuple):
if len(res) == 3:
res, code, headers = func_res
elif len(res) == 2:
res, code = func_res
else:
raise HTTPError('Internal error', 500)
args = pagination_parser.parse_args()
page, per_page = args['page'], args['per_page']
count = res[TOTAL_COUNT_KEY]
# first and last page handling
previous_page = None if page == 0 else page - 1
next_page = None if get_item_absolute_limit(page, per_page) >= count else page + 1
view_args = request.view_args.copy()
view_args['per_page'] = per_page
view_args['page'] = previous_page
paging = []
if previous_page is not None:
paging.append({'url': url_for(request.endpoint, **view_args), 'rel': 'prev'})
view_args['page'] = next_page
if next_page is not None:
paging.append({'url': url_for(request.endpoint, **view_args), 'rel': 'next'})
# put the info about pages into HTTP header for the response
headers['Link'] = ', '.join(['<{url}>; rel="{rel}"'.format(**d) for d in paging])
return res, code, headers
return inner
# flask-restful doesn't actually store a list of endpoints, so we register them as they
# pass through add_resource_no_matter_slashes
_resource_paths = []
def add_resource_no_matter_slashes(resource, route, endpoint=None, defaults=None):
"""Add a resource for both trailing slash and no trailing slash to prevent redirects."""
slashless = route.rstrip('/')
_resource_paths.append(api_v1.url_prefix + slashless)
slashful = route + '/'
endpoint = endpoint or resource.__name__.lower()
defaults = defaults or {}
# resources with and without slashes
rest_api_v1.add_resource(resource,
slashless,
endpoint=endpoint + '__slashless',
defaults=defaults)
rest_api_v1.add_resource(resource,
slashful,
endpoint=endpoint + '__slashful',
defaults=defaults)
class ApiEndpoints(Resource):
"""Implementation of / REST API call."""
def get(self):
"""Handle the GET REST API call."""
return {'paths': sorted(_resource_paths)}
class SystemVersion(Resource):
"""Implementation of /system/version REST API call."""
@staticmethod
def get():
"""Handle the GET REST API call."""
return get_system_version()
class ComponentSearch(Resource):
"""Implementation of /component-search REST API call."""
method_decorators = [login_required]
def get(self, package):
"""Handle the GET REST API call."""
if not package:
msg = "Please enter a valid search term"
raise HTTPError(406, msg)
# Tokenize the search term before calling graph search
result = search_packages_from_graph(re.split(r'\W+', package))
return result
class ComponentAnalyses(Resource):
"""Implementation of all /component-analyses REST API calls."""
method_decorators = [login_required]
@staticmethod
def get(ecosystem, package, version):
"""Handle the GET REST API call."""
security_vendor = request.headers.get('security-vendor', None)
st = time.time()
metrics_payload = {
"pid": os.getpid(),
"hostname": HOSTNAME,
"endpoint": request.endpoint,
"request_method": "GET",
"ecosystem": ecosystem,
"package": package,
"version": version
}
package = urllib.parse.unquote(package)
if not check_for_accepted_ecosystem(ecosystem):
msg = "Ecosystem {ecosystem} is not supported for this request".format(
ecosystem=ecosystem
)
raise HTTPError(400, msg)
if ecosystem == 'maven':
try:
package = MavenCoordinates.normalize_str(package)
except ValueError:
msg = "Invalid maven format - {pkg}".format(
pkg=package
)
metrics_payload.update({"status_code": 400, "value": time.time() - st})
_session.post(url=METRICS_SERVICE_URL + "/api/v1/prometheus", json=metrics_payload)
raise HTTPError(400, msg)
package = case_sensitivity_transform(ecosystem, package)
# Querying GraphDB for CVE Info.
if security_vendor:
graph_obj = GraphAnalyses(ecosystem, package, version, vendor=security_vendor)
result = graph_obj.get_analyses_for_snyk()
else:
# Old Flow
result = get_analyses_from_graph(ecosystem, package, version)
if result is not None:
# Known component for Bayesian
server_create_component_bookkeeping(ecosystem, package, version, g.decoded_token)
metrics_payload.update({"status_code": 200, "value": time.time() - st})
_session.post(url=METRICS_SERVICE_URL + "/api/v1/prometheus", json=metrics_payload)
return result
if os.environ.get("INVOKE_API_WORKERS", "") == "1":
# Enter the unknown path
server_create_analysis(ecosystem, package, version, user_profile=g.decoded_token,
api_flow=True, force=False, force_graph_sync=True)
msg = "Package {ecosystem}/{package}/{version} is unavailable. " \
"The package will be available shortly," \
" please retry after some time.".format(ecosystem=ecosystem, package=package,
version=version)
metrics_payload.update({"status_code": 202, "value": time.time() - st})
_session.post(url=METRICS_SERVICE_URL + "/api/v1/prometheus", json=metrics_payload)
return {'error': msg}, 202
else:
# no data has been found
server_create_analysis(ecosystem, package, version, user_profile=g.decoded_token,
api_flow=False, force=False, force_graph_sync=True)
msg = "No data found for {ecosystem} package " \
"{package}/{version}".format(ecosystem=ecosystem,
package=package, version=version)
metrics_payload.update({"status_code": 404, "value": time.time() - st})
_session.post(url=METRICS_SERVICE_URL + "/api/v1/prometheus", json=metrics_payload)
raise HTTPError(404, msg)
class ComponentAnalysesPOST(Resource):
"""Implementation of /component-analyses REST API calls."""
method_decorators = [login_required]
@staticmethod
def post():
"""Handle the POST REST API call."""
input_json = request.get_json()
if not input_json:
raise HTTPError(400, error="Expected JSON request")
if type(input_json) != list:
raise HTTPError(400, error="Expected list of dependencies in JSON request")
if len(input_json) > COMPONENT_ANALYSES_LIMIT:
raise HTTPError(400, error="Could not process more than {} dependencies at once."
.format(COMPONENT_ANALYSES_LIMIT))
results = list()
for dependency in input_json:
ecosystem = dependency.get('ecosystem')
package = dependency.get('package')
version = dependency.get('version')
if not all([ecosystem, package, version]):
raise HTTPError(422, "provide the valid input.")
if not check_for_accepted_ecosystem(ecosystem):
msg = "Ecosystem {ecosystem} is not supported for this request".format(
ecosystem=ecosystem
)
raise HTTPError(400, msg)
if ecosystem == 'maven':
package = MavenCoordinates.normalize_str(package)
package = case_sensitivity_transform(ecosystem, package)
result = get_analyses_from_graph(ecosystem, package, version)
if result is not None:
# Known component for Bayesian
server_create_component_bookkeeping(ecosystem, package, version, g.decoded_token)
results.append(result)
elif os.environ.get("INVOKE_API_WORKERS", "") == "1":
# Enter the unknown path
server_create_analysis(ecosystem, package, version, user_profile=g.decoded_token,
api_flow=True, force=False, force_graph_sync=True)
msg = "Package {ecosystem}/{package}/{version} is unavailable. " \
"The package will be available shortly," \
" please retry after some time.".format(ecosystem=ecosystem, package=package,
version=version)
results.append({"package": package, "message": msg})
else:
# no data has been found
server_create_analysis(ecosystem, package, version, user_profile=g.decoded_token,
api_flow=False, force=False, force_graph_sync=True)
msg = "No data found for {ecosystem} package " \
"{package}/{version}".format(ecosystem=ecosystem,
package=package, version=version)
results.append({"package": package, "message": msg})
return results, 200
class StackAnalysesGET(Resource):
"""Implementation of the /stack-analyses GET REST API call method."""
method_decorators = [login_required]
@staticmethod
def get(external_request_id):
"""Handle the GET REST API call."""
# TODO: reduce cyclomatic complexity
# TODO: refactor the business logic into its own function defined outside api_v1.py
db_result = fetch_sa_request(rdb, external_request_id)
if db_result is None:
raise HTTPError(404, "Invalid request ID '{t}'.".format(t=external_request_id))
graph_agg = retrieve_worker_result(rdb, external_request_id, "GraphAggregatorTask")
if graph_agg is not None and 'task_result' in graph_agg:
if graph_agg['task_result'] is None:
raise HTTPError(500, 'Invalid manifest file(s) received. '
'Please submit valid manifest files for stack analysis')
stack_result = retrieve_worker_result(rdb, external_request_id, "stack_aggregator_v2")
reco_result = retrieve_worker_result(rdb, external_request_id, "recommendation_v2")
if stack_result is None or reco_result is None:
# If the response is not ready and the timeout period is over, send error 408
if request_timed_out(db_result):
raise HTTPError(408, "Stack analysis request {t} has timed out. Please retry "
"with a new analysis.".format(t=external_request_id))
else:
return {'error': "Analysis for request ID '{t}' is in progress".format(
t=external_request_id)}, 202
if stack_result == -1 and reco_result == -1:
raise HTTPError(404, "Worker result for request ID '{t}' doesn't exist yet".format(
t=external_request_id))
started_at = None
finished_at = None
version = None
release = None
manifest_response = []
stacks = []
recommendations = []
if stack_result is not None and 'task_result' in stack_result:
started_at = stack_result.get("task_result", {}).get("_audit", {}).get("started_at",
started_at)
finished_at = stack_result.get("task_result", {}).get("_audit", {}).get("ended_at",
finished_at)
version = stack_result.get("task_result", {}).get("_audit", {}).get("version",
version)
release = stack_result.get("task_result", {}).get("_release", release)
stacks = stack_result.get("task_result", {}).get("stack_data", stacks)
if reco_result is not None and 'task_result' in reco_result:
recommendations = reco_result.get("task_result", {}).get("recommendations",
recommendations)
if not stacks:
return {
"version": version,
"release": release,
"started_at": started_at,
"finished_at": finished_at,
"request_id": external_request_id,
"result": manifest_response
}
for stack in stacks:
user_stack_deps = stack.get('user_stack_info', {}).get('analyzed_dependencies', [])
stack_recommendation = get_item_from_list_by_key_value(recommendations,
"manifest_file_path",
stack.get("manifest_file_path"))
for dep in user_stack_deps:
# Adding topics from the recommendations
if stack_recommendation is not None:
dep["topic_list"] = stack_recommendation.get("input_stack_topics",
{}).get(dep.get('name'), [])
else:
dep["topic_list"] = []
# There should not be any recommendations if there are no analyzed dependencies
user_stack_deps_count = stack.get('user_stack_info', {}). \
get('analyzed_dependencies_count', [])
if user_stack_deps_count == 0:
stack["recommendation"] = {
"alternate": [],
"companion": [],
"usage_outliers": [],
"input_stack_topics": {},
"manifest_file_path": stack.get("manifest_file_path", ""),
"missing_packages_pgm": []}
else:
stack["recommendation"] = get_item_from_list_by_key_value(
recommendations,
"manifest_file_path",
stack.get("manifest_file_path"))
manifest_response.append(stack)
# Populate reason for alternate and companion recommendation
manifest_response = RecommendationReason().add_reco_reason(manifest_response)
resp = {
"version": version,
"release": release,
"started_at": started_at,
"finished_at": finished_at,
"request_id": external_request_id,
"result": manifest_response
}
return resp
@api_v1.route('/stack-analyses/<external_request_id>/_debug')
@login_required
def stack_analyses_debug(external_request_id):
"""Debug endpoint exposing operational data for particular stack analysis.
This endpoint is not part of the public API.
Note the existence of the data is not guaranteed,
therefore the endpoint can return 404 even for valid request IDs.
"""
results = retrieve_worker_results(rdb, external_request_id)
if not results:
return jsonify(error='No operational data for the request ID'), 404
response = {'tasks': []}
for result in results:
op_data = result.to_dict()
audit = op_data.get('task_result', {}).get('_audit', {})
task_data = {'task_name': op_data.get('worker'),
'started_at': audit.get('started_at'),
'ended_at': audit.get('ended_at'),
'error': op_data.get('error')}
response['tasks'].append(task_data)
return jsonify(response), 200
class UserFeedback(Resource):
"""Implementation of /user-feedback POST REST API call."""
method_decorators = [login_required]
_ANALYTICS_BUCKET_NAME = "{}-{}".format(
os.environ.get('DEPLOYMENT_PREFIX', 'unknown'),
os.environ.get("AWS_ANALYTICS_BUCKET", "bayesian-user-feedback"))
@staticmethod
def post():
"""Handle the POST REST API call."""
input_json = request.get_json()
# TODO: refactor the business logic into its own function defined outside api_v1.py
# TODO: two cases should be handled here:
# 1) no JSON at all
# 2) JSON without 'request_id'
if not request.json or 'request_id' not in input_json:
raise HTTPError(400, error="Expected JSON request")
if 'feedback' not in input_json:
raise HTTPError(400, error="Expected feedback")
s3 = AmazonS3(bucket_name=UserFeedback._ANALYTICS_BUCKET_NAME)
s3.connect()
# Store data
key = "{}".format(input_json["request_id"])
s3.store_dict(input_json, key)
return {'status': 'success'}
class UserIntent(Resource):
"""Implementation of /user-intent POST REST API call."""
method_decorators = [login_required]
@staticmethod
def post():
"""Handle the POST REST API call."""
# TODO: refactor the business logic into its own function defined outside api_v1.py
input_json = request.get_json()
if not input_json:
raise HTTPError(400, error="Expected JSON request")
if 'manual_tagging' not in input_json:
if 'ecosystem' not in input_json:
raise HTTPError(400, error="Expected ecosystem in the request")
if 'data' not in input_json:
raise HTTPError(400, error="Expected data in the request")
# TODO: please use proper class constant here, like in
# UserFeedback._ANALYTICS_BUCKET_NAME
s3 = StoragePool.get_connected_storage('S3UserIntent')
# Store data
return s3.store_master_tags(input_json)
else:
if 'user' not in input_json:
raise HTTPError(400, error="Expected user name in the request")
if 'data' not in input_json:
raise HTTPError(400, error="Expected tags in the request")
# TODO: please use proper class constant here, like in
# UserFeedback._ANALYTICS_BUCKET_NAME
s3 = StoragePool.get_connected_storage('S3ManualTagging')
# Store data
return s3.store_user_data(input_json)
class UserIntentGET(Resource):
"""Implementation of /user-intent GET REST API call."""
method_decorators = [login_required]
@staticmethod
def get(user, ecosystem):
"""Handle the GET REST API call."""
# TODO: refactor the business logic into its own function defined outside api_v1.py
if not user:
raise HTTPError(400, error="Expected user name in the request")
if not ecosystem:
raise HTTPError(400, error="Expected ecosystem in the request")
# TODO: please use proper class constant here, like in
s3 = StoragePool.get_connected_storage('S3ManualTagging')
# get user data
try:
result = s3.fetch_user_data(user, ecosystem)
except botocore.exceptions.ClientError:
err_msg = "Failed to fetch data for the user {u}, ecosystem {e}".format(u=user,
e=ecosystem)
current_app.logger.exception(err_msg)
raise HTTPError(404, error=err_msg)
return result
class MasterTagsGET(Resource):
"""Implementation of /master-tags REST API call."""
method_decorators = [login_required]
# TODO: move the timeout constant to the config file
@staticmethod
@cache.memoize(timeout=604800) # 7 days
def get(ecosystem):
"""Handle the GET REST API call."""
# TODO: refactor the business logic into its own function defined outside api_v1.py
if not ecosystem:
raise HTTPError(400, error="Expected ecosystem in the request")
# TODO: please use proper class constant here, like in
s3 = StoragePool.get_connected_storage('S3UserIntent')
# get user data
try:
result = s3.fetch_master_tags(ecosystem)
except botocore.exceptions.ClientError:
err_msg = "Failed to fetch master tags for the ecosystem {e}".format(e=ecosystem)
current_app.logger.exception(err_msg)
raise HTTPError(404, error=err_msg)
return result
def __repr__(self):
"""Return textual representatin of classname + the id."""
return "{}({})".format(self.__class__.__name__, self.id)
class GetNextComponent(Resource):
"""Implementation of all /get-next-component REST API call."""
method_decorators = [login_required]
@staticmethod
def post(ecosystem):
"""Handle the POST REST API call."""
if not ecosystem:
raise HTTPError(400, error="Expected ecosystem in the request")
# TODO: refactor the business logic into its own function defined outside api_v1.py
pkg = get_next_component_from_graph(
ecosystem,
g.decoded_token.get('email'),
g.decoded_token.get('company'),
)
# check for package data
if pkg:
return pkg[0]
else:
raise HTTPError(200, error="No package found for tagging.")
class SetTagsToComponent(Resource):
"""Implementation of all /set-tags REST API calls."""
method_decorators = [login_required]
@staticmethod
def post():
"""Handle the POST REST API call."""
input_json = request.get_json()
# sanity checks
if not input_json:
raise HTTPError(400, error="Expected JSON request")
# ecosystem name is expexted in the payload
if 'ecosystem' not in input_json:
raise HTTPError(400, error="Expected ecosystem in the request")
# component name is expexted in the payload
if 'component' not in input_json:
raise HTTPError(400, error="Expected component in the request")
# at least one tag is expexted in the payload
if 'tags' not in input_json or not any(input_json.get('tags', [])):
raise HTTPError(400, error="Expected some tags in the request")
# start the business logic
status, _error = set_tags_to_component(input_json.get('ecosystem'),
input_json.get('component'),
input_json.get('tags'),
g.decoded_token.get('email'),
g.decoded_token.get('company'))
if status:
return {'status': 'success'}, 200
else:
raise HTTPError(400, error=_error)
class GenerateManifest(Resource):
"""Implementation of the /generate-file REST API call."""
method_decorators = [login_required]
@staticmethod
def post():
"""Handle the POST REST API call with the manifest file."""
input_json = request.get_json()
if 'ecosystem' not in input_json:
raise HTTPError(400, "Must provide an ecosystem")
if input_json.get('ecosystem') == 'maven':
return Response(
PomXMLTemplate(input_json).xml_string(),
headers={
"Content-disposition": 'attachment;filename=pom.xml',
"Content-Type": "text/xml;charset=utf-8"
}
)
else:
return Response(
{'result': "ecosystem '{}' is not yet supported".format(
input_json['ecosystem'])},
status=400
)
class StackAnalyses(Resource):
"""Implementation of all /stack-analyses REST API calls."""
method_decorators = [login_required]
@staticmethod
def post():
"""Handle the POST REST API call."""
# TODO: reduce cyclomatic complexity
# TODO: refactor the business logic into its own function defined outside api_v1.py
github_token = get_access_token('github')
sid = request.args.get('sid')
license_files = list()
check_license = request.args.get('check_license', 'false') == 'true'
github_url = request.form.get("github_url")
ref = request.form.get('github_ref')
is_scan_enabled = request.headers.get('IsScanEnabled', "false")
ecosystem = request.headers.get('ecosystem')
origin = request.headers.get('origin')
show_transitive = request.headers.get('showTransitiveReport') \
or os.environ.get('SHOW_TRANSITIVE_REPORT', "false")
scan_repo_url = request.headers.get('ScanRepoUrl')
# TODO: is not it better to use map of synonyms, for example?
if ecosystem == "node":
ecosystem = "npm"
if ecosystem == "python":
ecosystem = "pypi"
if ecosystem == "golang":
ecosystem = "golang"
if ecosystem == "java":
ecosystem = "maven"
source = request.form.get('source')
if not (scan_repo_url and ecosystem):
if github_url is not None:
files = fetch_file_from_github_release(url=github_url,
filename='pom.xml',
token=github_token.get('access_token'),
ref=ref)
else:
files = request.files.getlist('manifest[]')
filepaths = request.values.getlist('filePath[]')
license_files = request.files.getlist('license[]')
current_app.logger.info('%r' % files)
current_app.logger.info('%r' % filepaths)
# At least one manifest file path should be present to analyse a stack
if not filepaths:
raise HTTPError(400, error="Error processing request. "
"Please send a valid manifest file path.")
if len(files) != len(filepaths):
raise HTTPError(400,
error="Error processing request. "
"Number of manifests and filePaths must be the same.")
# At least one manifest file should be present to analyse a stack
if not files:
raise HTTPError(400,
error="Error processing request. "
"Please upload a valid manifest files.")
else:
files = []
dt = datetime.datetime.now()
if sid:
request_id = sid
is_modified_flag = {'is_modified': True}
else:
request_id = uuid.uuid4().hex
is_modified_flag = {'is_modified': False}
manifests = []
for index, manifest_file_raw in enumerate(files):
if github_url is not None:
filename = manifest_file_raw.get('filename', None)
filepath = manifest_file_raw.get('filepath', None)
content = manifest_file_raw.get('content')
else:
filename = manifest_file_raw.filename
filepath = filepaths[index]
content = manifest_file_raw.read().decode('utf-8')
# For flow generating from build, we need not goto mercator
if origin != "vscode" and not resolved_files_exist(filename):
# check if manifest files with given name are supported
manifest_descriptor = get_manifest_descriptor_by_filename(filename)
if manifest_descriptor is None:
raise HTTPError(400, error="Manifest file '{filename}' is not supported".format(
filename=filename))
# Check if the manifest is valid
if not manifest_descriptor.validate(content):
raise HTTPError(400,
error="Error processing request. Please upload a valid "
"manifest file '{filename}'".format(filename=filename))
# Record the response details for this manifest file
manifest = {'filename': filename,
'content': content,
'filepath': filepath}
try:
# Exception is raised when origin is vscode and ecosystem header is not set.
manifest['ecosystem'] = ecosystem or manifest_descriptor.ecosystem
except UnboundLocalError:
raise HTTPError(400, error="ecosystem header must be set.")
manifests.append(manifest)
if not ecosystem:
ecosystem = get_ecosystem_from_manifest(manifests)
data = {'api_name': 'stack_analyses'}
args = {'external_request_id': request_id,
'ecosystem': ecosystem, 'data': data}
try:
api_url = current_app.config['F8_API_BACKBONE_HOST']
d = DependencyFinder()
deps = {}
worker_flow_enabled = False
if resolved_files_exist(manifests):
# This condition is for the flow from vscode
deps = d.scan_and_find_dependencies(ecosystem, manifests, show_transitive)
elif scan_repo_url and ecosystem:
# This condition is for the build flow
args = {'git_url': scan_repo_url,
'ecosystem': ecosystem,
'is_scan_enabled': is_scan_enabled,
'request_id': request_id,
'is_modified_flag': is_modified_flag,
'auth_key': request.headers.get('Authorization'),
'check_license': check_license,
'gh_token': github_token
}
server_run_flow('gitOperationsFlow', args)
# Flag to prevent calling of backbone twice
worker_flow_enabled = True
else:
# The default flow via mercator
deps = d.execute(args, rdb.session, manifests, source)
deps['external_request_id'] = request_id
deps['current_stack_license'] = extract_licenses(license_files)
deps['show_transitive'] = show_transitive
deps.update(is_modified_flag)
if not worker_flow_enabled:
# No need to call backbone if its already called via worker flow
_session.post(
'{}/api/v1/stack_aggregator'.format(api_url), json=deps,
params={'check_license': str(check_license).lower()})
_session.post('{}/api/v1/recommender'.format(api_url), json=deps,
params={'check_license': str(check_license).lower()})
except (ValueError, json.JSONDecodeError) as e:
HTTPError(400, "Invalid dependencies encountered. %r" % e)
except Exception as exc:
raise HTTPError(500, ("Could not process {t}."
.format(t=request_id))) from exc
try:
insert_stmt = insert(StackAnalysisRequest).values(
id=request_id,
submitTime=str(dt),
requestJson={'manifest': manifests},
dep_snapshot=deps
)
do_update_stmt = insert_stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(dep_snapshot=deps)
)
rdb.session.execute(do_update_stmt)
rdb.session.commit()
return {"status": "success", "submitted_at": str(dt), "id": str(request_id)}
except SQLAlchemyError as e:
# TODO: please log the actual error too here
raise HTTPError(500, "Error updating log for request {t}".format(t=sid)) from e
@staticmethod
def get():
"""Handle the GET REST API call."""
raise HTTPError(404, "Unsupported API endpoint")
class SubmitFeedback(Resource):
"""Implementation of /submit-feedback POST REST API call."""
method_decorators = [login_required]
@staticmethod
def post():
"""Handle the POST REST API call."""
input_json = request.get_json()
if not request.json:
raise HTTPError(400, error="Expected JSON request")
stack_id = input_json.get('stack_id')
recommendation_type = input_json.get('recommendation_type')
package_name = input_json.get('package_name')
feedback_type = input_json.get('feedback_type')
ecosystem_name = input_json.get('ecosystem')
conditions = [is_valid(stack_id),
is_valid(recommendation_type),
is_valid(package_name),
is_valid(feedback_type),
is_valid(ecosystem_name)]
if not all(conditions):
raise HTTPError(400, error="Expected parameters missing")
# Insert in a single commit. Gains - a) performance, b) avoid insert inconsistencies
# for a single request
try:
ecosystem_obj = Ecosystem.by_name(rdb.session, name=ecosystem_name)
req = RecommendationFeedback(
stack_id=stack_id,
package_name=package_name,
recommendation_type=recommendation_type,
feedback_type=feedback_type,
ecosystem_id=ecosystem_obj.id
)
rdb.session.add(req)
rdb.session.commit()
return {'status': 'success'}
except SQLAlchemyError as e:
# TODO: please log the actual error too here
current_app.logger.exception(
'Failed to create new analysis request')
raise HTTPError(
500, "Error inserting log for request {t}".format(t=stack_id)) from e
class CategoryService(Resource):
"""Implementation of Dependency editor category service GET REST API call."""
method_decorators = [login_required]
@staticmethod
def get(runtime):
"""Handle the GET REST API call."""
# TODO: refactor
categories = defaultdict(lambda: {'pkg_count': 0, 'packages': list()})
gremlin_resp = get_categories_data(re.sub('[^A-Za-z0-9]+', '', runtime))
response = {
'categories': dict(),
'request_id': gremlin_resp.get('requestId')
}
if 'result' in gremlin_resp and 'requestId' in gremlin_resp:
data = gremlin_resp.get('result')
if 'data' in data and data['data']:
for items in data.get('data'):
category = items.get('category')
package = items.get('package')
if category and package:
pkg_count = category.get('category_deps_count', [0])[0]
_category = category.get('ctname', [''])[0]
pkg_name = package.get('name', [''])[0]
pkg_description = package.get('description', [''])[0]
libio_version = package.get('libio_latest_version', [''])[0]
version = package.get('latest_version', [''])[0]
latest_version = select_latest_version(
version, libio_version, pkg_name)
categories[_category]['pkg_count'] = pkg_count
categories[_category]['packages'].append({
'name': pkg_name,
'version': latest_version,
'description': pkg_description,
'category': _category
})
response['categories'] = categories
else:
get_categories_data.clear_cache()
return response, 200
class CoreDependencies(Resource):
"""Implementation of Blank booster /get-core-dependencies REST API call."""
method_decorators = [login_required]
@staticmethod
def get(runtime):
"""Handle the GET REST API call."""
try:
resolved = []
dependencies = get_core_dependencies(runtime)
request_id = uuid.uuid4().hex
for elem in dependencies:
packages = {}
packages['package'] = elem['groupId'] + ':' + elem['artifactId']
if elem.get('version'):
packages['version'] = elem['version']
if elem.get('scope'):
packages['scope'] = elem['scope']
resolved.append(packages)
response = {
"_resolved": resolved,
"ecosystem": "maven",
"request_id": request_id
}
return response, 200
except Exception as e:
current_app.logger.error('ERROR: {}'.format(str(e)))
class EmptyBooster(Resource):
"""Implementation of /empty-booster POST REST API call."""
method_decorators = [login_required]
@staticmethod
def post():
"""Handle the POST REST API request."""
remote_repo = request.form.get('gitRepository')
_exists = os.path.exists
if not remote_repo:
raise HTTPError(400, error="Expected gitRepository in request")
runtime = request.form.get('runtime')
if not runtime:
raise HTTPError(400, error="Expected runtime in request")
booster_core_repo = get_booster_core_repo()
if not _exists(booster_core_repo):
get_booster_core_repo.cache.clear()
booster_core_repo = get_booster_core_repo()
pom_template = os.path.join(booster_core_repo, 'pom.template.xml')
jenkinsfile = os.path.join(booster_core_repo, 'Jenkinsfile')
core_json = os.path.join(booster_core_repo, 'core.json')
if not all(map(_exists, [pom_template, jenkinsfile, core_json])):
raise HTTPError(500, "Some necessary files are missing in core dependencies Repository")
core_deps = json.load(open(core_json))
dependencies = [dict(zip(['groupId', 'artifactId', 'version'],
d.split(':'))) for d in request.form.getlist('dependency')]
dependencies += core_deps.get(runtime, [])
git_org = request.form.get('gitOrganization')
github_token = get_access_token('github').get('access_token', '')
maven_obj = MavenPom(open(pom_template).read())
maven_obj['version'] = '1.0.0-SNAPSHOT'
maven_obj['artifactId'] = re.sub('[^A-Za-z0-9-]+', '', runtime) + '-application'
maven_obj['groupId'] = 'io.openshift.booster'
maven_obj.add_dependencies(dependencies)
build_config = core_deps.get(runtime + '-' + 'build')
if build_config:
maven_obj.add_element(build_config, 'build', next_to='dependencies')
dir_struct = {
'name': 'booster',
'type': 'dir',
'contains': [{'name': 'src',
'type': 'dir',
'contains': [
{'name': 'main/java/io/openshift/booster',
'type': 'dir',
'contains': {'name': 'Booster.java',
'contains': 'package io.openshift.booster;\
\npublic class Booster {\
\n public static void main(String[] args) { }\
\n} '}
},
{'name': 'test/java/io/openshift/booster',
'type': 'dir',
'contains': {'name': 'BoosterTest.java',
'contains': 'package io.openshift.booster;\
\n\npublic class BoosterTest { } '}
}]},
{'name': 'pom.xml',
'type': 'file',
'contains': MavenPom.tostring(maven_obj)},
{'name': "Jenkinsfile",
'contains': open(jenkinsfile).read()}
]
}
booster_dir = tempfile.TemporaryDirectory().name
create_directory_structure(booster_dir, dir_struct)
push_repo(github_token, os.path.join(booster_dir, dir_struct.get('name')),
remote_repo, organization=git_org, auto_remove=True)
return {'status': 'ok'}, 200
class RecommendationFB(Resource):
"""Implementation of /recommendation_feedback/<ecosystem> API call."""
@staticmethod
def get(ecosystem):
"""Implement GET method."""
if not ecosystem:
raise HTTPError(400, error="Expected ecosystem in the request")
result = get_recommendation_feedback_by_ecosystem(ecosystem)
return jsonify(result)
class CveByDateEcosystem(Resource):
"""Implementation of api endpoint for CVEs bydate & further filter by ecosystem if provided."""
method_decorators = [login_required]
@staticmethod
def get(modified_date, ecosystem='all'):
"""Implement GET Method."""
if not modified_date:
raise HTTPError(400, error="Expected date in the request")
try:
datetime.datetime.strptime(modified_date, '%Y%m%d')
except ValueError:
msg = 'Invalid datetime specified. Please specify in YYYYMMDD format'
raise HTTPError(400, msg)
try:
cve_sources = request.args.get('cve_sources', 'all')
date_range = int(request.args.get('date_range', 7))
if date_range <= 0:
raise HTTPError(400, error="date_range parameter should be a positive integer")
getcve = CveByDateEcosystemUtils(None, cve_sources,
modified_date, ecosystem, date_range)
result = getcve.get_cves_by_date_ecosystem()
except Exception as e:
current_app.logger.error('ERROR: {}'.format(str(e)))
msg = "No cve data found for {ecosystem} ".format(ecosystem=ecosystem)
raise HTTPError(404, msg)
return result, 200
class EpvsByCveidService(Resource):
"""Implementation of api endpoint for EPVs bycveid."""
method_decorators = [login_required]
@staticmethod
def get(cve_id):
"""Implement GET Method."""
try:
getcve = CveByDateEcosystemUtils(cve_id)
result = getcve.get_cves_epv_by_date()
except Exception as e:
current_app.logger.error('ERROR: {}'.format(str(e)))
msg = "No epv data found for {cve_id} ".format(cve_id=cve_id)
raise HTTPError(404, msg)
return result, 200
add_resource_no_matter_slashes(ApiEndpoints, '')
add_resource_no_matter_slashes(ComponentSearch, '/component-search/<package>',
endpoint='get_components')
add_resource_no_matter_slashes(ComponentAnalyses,
'/component-analyses/<ecosystem>/<package>/<version>',
endpoint='get_component_analysis')
add_resource_no_matter_slashes(ComponentAnalysesPOST,
'/component-analyses',
endpoint='post_component_analysis')
add_resource_no_matter_slashes(SystemVersion, '/system/version')
add_resource_no_matter_slashes(StackAnalyses, '/stack-analyses')
add_resource_no_matter_slashes(StackAnalysesGET, '/stack-analyses/<external_request_id>')
add_resource_no_matter_slashes(UserFeedback, '/user-feedback')
add_resource_no_matter_slashes(UserIntent, '/user-intent')
add_resource_no_matter_slashes(UserIntentGET, '/user-intent/<user>/<ecosystem>')
add_resource_no_matter_slashes(MasterTagsGET, '/master-tags/<ecosystem>')
add_resource_no_matter_slashes(GenerateManifest, '/generate-file')
add_resource_no_matter_slashes(
GetNextComponent, '/get-next-component/<ecosystem>')
add_resource_no_matter_slashes(SetTagsToComponent, '/set-tags')
add_resource_no_matter_slashes(CategoryService, '/categories/<runtime>')
add_resource_no_matter_slashes(SubmitFeedback, '/submit-feedback')
add_resource_no_matter_slashes(CoreDependencies, '/get-core-dependencies/<runtime>')
add_resource_no_matter_slashes(EmptyBooster, '/empty-booster')
add_resource_no_matter_slashes(RecommendationFB, '/recommendation_feedback/<ecosystem>')
add_resource_no_matter_slashes(CveByDateEcosystem, '/cves/bydate/<modified_date>/<ecosystem>')
add_resource_no_matter_slashes(EpvsByCveidService, '/epvs/bycveid/<cve_id>')
@api_v1.errorhandler(HTTPError)
def handle_http_error(err):
"""Handle HTTPError exceptions."""
return jsonify({'error': err.error}), err.status_code
@api_v1.errorhandler(AuthError)
def api_401_handler(err):
"""Handle AuthError exceptions."""
return jsonify(error=err.error), err.status_code
# workaround https://github.com/mitsuhiko/flask/issues/1498
# NOTE: this *must* come in the end, unless it'll overwrite rules defined
# after this
@api_v1.route('/<path:invalid_path>')
def api_404_handler(*args, **kwargs):
"""Handle all other routes not defined above."""
return jsonify(error='Cannot match given query to any API v1 endpoint'), 404
| 41.509252
| 100
| 0.592507
|
ef0e819626fd442b923dfb353403ebd9f939fb4d
| 1,019
|
py
|
Python
|
phone_sensor/test_phone_sensor.py
|
CallumJHays/mvt-phonesensor-app
|
f1e9665a22ea85535b9704c3f07f46b486edc81b
|
[
"MIT"
] | 1
|
2021-09-08T09:30:21.000Z
|
2021-09-08T09:30:21.000Z
|
phone_sensor/test_phone_sensor.py
|
CallumJHays/machinevision-toolbox-python.phone-sensor
|
f1e9665a22ea85535b9704c3f07f46b486edc81b
|
[
"MIT"
] | 2
|
2021-03-02T04:48:08.000Z
|
2021-03-15T00:58:07.000Z
|
phone_sensor/test_phone_sensor.py
|
CallumJHays/mvt-phonesensor-app
|
f1e9665a22ea85535b9704c3f07f46b486edc81b
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
from phone_sensor import PhoneSensor
import unittest
from urllib.request import urlopen
import ssl
class TestPhoneSensor(unittest.TestCase):
def test_constructor(self):
PhoneSensor().close()
def test_server_shutsdown(self):
with PhoneSensor():
pass
# this would throw an error if the server wasn't shutdown
phone = PhoneSensor()
phone.close()
def test_hosts_client(self):
host, port = 'localhost', 8000
# need to tell urlopen to trust the ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with PhoneSensor(host=host, port=port):
with urlopen(f'https://{host}:{port}', context=ctx) \
as client_html:
assert client_html.status == HTTPStatus.OK
# testing client-functionality will require https://github.com/pyppeteer/pyppeteer
if __name__ == '__main__':
unittest.main()
| 26.128205
| 82
| 0.654563
|
83a6b37fe814ffe685a6a31082e7f7f9df05ca46
| 38,302
|
py
|
Python
|
testdata/bin/generate-schema-statements.py
|
radford-nguyen/impala
|
eb95c912cb7e720caf66388942b4e94d95e95658
|
[
"Apache-2.0"
] | 1
|
2019-04-22T10:32:39.000Z
|
2019-04-22T10:32:39.000Z
|
testdata/bin/generate-schema-statements.py
|
grcpeng/impala
|
8412c772cb551c59fdb6e9dd59b49043f4aa9ef6
|
[
"Apache-2.0"
] | 1
|
2019-05-06T07:23:34.000Z
|
2019-05-06T07:23:34.000Z
|
testdata/bin/generate-schema-statements.py
|
grcpeng/impala
|
8412c772cb551c59fdb6e9dd59b49043f4aa9ef6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This script generates statements to create and populate
# tables in a variety of formats. The tables and formats are
# defined through a combination of files:
# 1. Workload format specifics specify for each workload
# which formats are part of core, exhaustive, etc.
# This operates via the normal test dimensions.
# (see tests/common/test_dimension.py and
# testdata/workloads/*/*.csv)
# 2. Workload table availability constraints specify which
# tables exist for which formats.
# (see testdata/datasets/*/schema_constraints.csv)
# The arguments to this script specify the workload and
# exploration strategy and can optionally restrict it
# further to individual tables.
#
# This script is generating several SQL scripts to be
# executed by bin/load-data.py. The two scripts are tightly
# coupled and any change in files generated must be
# reflected in bin/load-data.py. Currently, this script
# generates three things:
# 1. It creates the directory (destroying the existing
# directory if necessary)
# ${IMPALA_DATA_LOADING_SQL_DIR}/${workload}
# 2. It creates and populates a subdirectory
# avro_schemas/${workload} with JSON files specifying
# the Avro schema for each table.
# 3. It generates SQL files with the following naming schema:
#
# Using the following variables:
# workload_exploration = ${workload}-${exploration_strategy} and
# file_format_suffix = ${file_format}-${codec}-${compression_type}
#
# A. Impala table creation scripts run in Impala to create tables, partitions,
# and views. There is one for each file format. They take the form:
# create-${workload_exploration}-impala-generated-${file_format_suffix}.sql
#
# B. Hive creation/load scripts run in Hive to load data into tables and create
# tables or views that Impala does not support. There is one for each
# file format. They take the form:
# load-${workload_exploration}-hive-generated-${file_format_suffix}.sql
#
# C. HBase creation script runs through the hbase commandline to create
# HBase tables. (Only generated if loading HBase table.) It takes the form:
# load-${workload_exploration}-hbase-generated.create
#
# D. HBase postload script runs through the hbase commandline to flush
# HBase tables. (Only generated if loading HBase table.) It takes the form:
# post-load-${workload_exploration}-hbase-generated.sql
#
# E. Impala load scripts run in Impala to load data. Only Parquet and Kudu
# are loaded through Impala. There is one for each of those formats loaded.
# They take the form:
# load-${workload_exploration}-impala-generated-${file_format_suffix}.sql
#
# F. Invalidation script runs through Impala to invalidate/refresh metadata
# for tables. It takes the form:
# invalidate-${workload_exploration}-impala-generated.sql
#
# In summary, table "CREATE" statements are mostly done by Impala. Any "CREATE"
# statements that Impala does not support are done through Hive. Loading data
# into tables mostly runs in Hive except for Parquet and Kudu tables.
# Loading proceeds in two parts: First, data is loaded into text tables.
# Second, almost all other formats are populated by inserts from the text
# table. Since data loaded in Hive may not be visible in Impala, all tables
# need to have metadata refreshed or invalidated before access in Impala.
# This means that loading Parquet or Kudu requires invalidating source
# tables. It also means that invalidate needs to happen at the end of dataload.
#
# For tables requiring customized actions to create schemas or place data,
# this script allows the table specification to include commands that
# this will execute as part of generating the SQL for table. If the command
# generates output, that output is used for that section. This is useful
# for custom tables that rely on loading specific files into HDFS or
# for tables where specifying the schema is tedious (e.g. wide tables).
# This should be used sparingly, because these commands are executed
# serially.
#
import collections
import csv
import glob
import json
import math
import os
import random
import re
import shutil
import subprocess
import sys
import tempfile
from itertools import product
from optparse import OptionParser
from tests.util.test_file_parser import *
from tests.common.test_dimensions import *
parser = OptionParser()
parser.add_option("-e", "--exploration_strategy", dest="exploration_strategy",
default="core", help="The exploration strategy for schema gen: 'core',"\
" 'pairwise', or 'exhaustive'")
parser.add_option("--hive_warehouse_dir", dest="hive_warehouse_dir",
default="/test-warehouse",
help="The HDFS path to the base Hive test warehouse directory")
parser.add_option("-w", "--workload", dest="workload",
help="The workload to generate schema for: tpch, hive-benchmark, ...")
parser.add_option("-s", "--scale_factor", dest="scale_factor", default="",
help="An optional scale factor to generate the schema for")
parser.add_option("-f", "--force_reload", dest="force_reload", action="store_true",
default= False, help='Skips HDFS exists check and reloads all tables')
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default = False, help="If set, outputs additional logging.")
parser.add_option("-b", "--backend", dest="backend", default="localhost:21000",
help="Backend connection to use, default: localhost:21000")
parser.add_option("--table_names", dest="table_names", default=None,
help="Only load the specified tables - specified as a comma-seperated "\
"list of base table names")
parser.add_option("--table_formats", dest="table_formats", default=None,
help="Override the test vectors and load using the specified table "\
"formats. Ex. --table_formats=seq/snap/block,text/none")
parser.add_option("--hdfs_namenode", dest="hdfs_namenode", default="localhost:20500",
help="HDFS name node for Avro schema URLs, default localhost:20500")
(options, args) = parser.parse_args()
if options.workload is None:
print "A workload name must be specified."
parser.print_help()
sys.exit(1)
WORKLOAD_DIR = os.path.join(os.environ['IMPALA_HOME'], 'testdata', 'workloads')
DATASET_DIR = os.path.join(os.environ['IMPALA_HOME'], 'testdata', 'datasets')
SQL_OUTPUT_DIR = os.environ['IMPALA_DATA_LOADING_SQL_DIR']
AVRO_SCHEMA_DIR = "avro_schemas"
DEFAULT_FS=os.environ['DEFAULT_FS']
IMPALA_SUPPORTED_INSERT_FORMATS = ['parquet', 'hbase', 'text', 'kudu']
COMPRESSION_TYPE = "SET mapred.output.compression.type=%s;"
COMPRESSION_ENABLED = "SET hive.exec.compress.output=%s;"
COMPRESSION_CODEC = "SET mapred.output.compression.codec=%s;"
AVRO_COMPRESSION_CODEC = "SET avro.output.codec=%s;"
SET_DYNAMIC_PARTITION_STATEMENT = "SET hive.exec.dynamic.partition=true;"
SET_PARTITION_MODE_NONSTRICT_STATEMENT = "SET hive.exec.dynamic.partition.mode=nonstrict;"
SET_HIVE_INPUT_FORMAT = "SET mapred.max.split.size=256000000;\n"\
"SET hive.input.format=org.apache.hadoop.hive.ql.io.%s;\n"
SET_HIVE_HBASE_BULK_LOAD = "SET hive.hbase.bulk = true"
FILE_FORMAT_IDX = 0
DATASET_IDX = 1
CODEC_IDX = 2
COMPRESSION_TYPE_IDX = 3
COMPRESSION_MAP = {'def': 'org.apache.hadoop.io.compress.DefaultCodec',
'gzip': 'org.apache.hadoop.io.compress.GzipCodec',
'bzip': 'org.apache.hadoop.io.compress.BZip2Codec',
'snap': 'org.apache.hadoop.io.compress.SnappyCodec',
'lzo': 'com.hadoop.compression.lzo.LzopCodec',
'none': ''
}
AVRO_COMPRESSION_MAP = {
'def': 'deflate',
'snap': 'snappy',
'none': '',
}
FILE_FORMAT_MAP = {
'text': 'TEXTFILE',
'seq': 'SEQUENCEFILE',
'rc': 'RCFILE',
'orc': 'ORC',
'parquet': 'PARQUET',
'text_lzo':
"\nINPUTFORMAT 'com.hadoop.mapred.DeprecatedLzoTextInputFormat'" +
"\nOUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'",
'avro': 'AVRO',
'hbase': "'org.apache.hadoop.hive.hbase.HBaseStorageHandler'",
'kudu': "KUDU",
}
HIVE_TO_AVRO_TYPE_MAP = {
'STRING': 'string',
'INT': 'int',
'TINYINT': 'int',
'SMALLINT': 'int',
'BIGINT': 'long',
'BOOLEAN': 'boolean',
'FLOAT': 'float',
'DOUBLE': 'double',
# Avro has no timestamp type, so convert to string
# TODO: this allows us to create our Avro test tables, but any tests that use
# a timestamp column will fail. We probably want to convert back to timestamps
# in our tests.
'TIMESTAMP': 'string',
}
PARQUET_ALTER_STATEMENT = "ALTER TABLE %(table_name)s SET\n\
SERDEPROPERTIES ('blocksize' = '1073741824', 'compression' = '%(compression)s');"
HBASE_CREATE_STATEMENT = """
CREATE EXTERNAL TABLE IF NOT EXISTS {{db_name}}{{db_suffix}}.{{table_name}} (
{columns})
STORED BY {{file_format}}
WITH SERDEPROPERTIES (
"hbase.columns.mapping" =
"{hbase_column_mapping}")
{tbl_properties}{{hdfs_location}}"""
KNOWN_EXPLORATION_STRATEGIES = ['core', 'pairwise', 'exhaustive', 'lzo']
def build_create_statement(table_template, table_name, db_name, db_suffix,
file_format, compression, hdfs_location,
force_reload):
create_stmt = ''
if (force_reload):
create_stmt += 'DROP TABLE IF EXISTS %s%s.%s;\n' % (db_name, db_suffix, table_name)
if compression == 'lzo':
file_format = '%s_%s' % (file_format, compression)
# hbase / kudu tables are external, and not read from hdfs. We don't need an
# hdfs_location.
if file_format in ['hbase', 'kudu']:
hdfs_location = str()
# Remove location part from the format string
table_template = table_template.replace("LOCATION '{hdfs_location}'", "")
create_stmt += table_template.format(db_name=db_name,
db_suffix=db_suffix,
table_name=table_name,
file_format=FILE_FORMAT_MAP[file_format],
hdfs_location=hdfs_location)
return create_stmt
def build_table_template(file_format, columns, partition_columns, row_format,
avro_schema_dir, table_name, table_properties):
if file_format == 'hbase':
return build_hbase_create_stmt_in_hive(columns, partition_columns, table_name)
primary_keys_clause = ""
partitioned_by = str()
if partition_columns:
partitioned_by = 'PARTITIONED BY (%s)' % ', '.join(partition_columns.split('\n'))
row_format_stmt = str()
if row_format and file_format != 'kudu':
row_format_stmt = 'ROW FORMAT ' + row_format
file_format_string = "STORED AS {file_format}"
tblproperties_clause = "TBLPROPERTIES (\n{0}\n)"
tblproperties = {}
external = "EXTERNAL"
if file_format == 'avro':
# TODO Is this flag ever used?
if options.hdfs_namenode is None:
tblproperties["avro.schema.url"] = "%s/%s/%s/{table_name}.json" \
% (DEFAULT_FS, options.hive_warehouse_dir, avro_schema_dir)
else:
tblproperties["avro.schema.url"] = "hdfs://%s/%s/%s/{table_name}.json" \
% (options.hdfs_namenode, options.hive_warehouse_dir, avro_schema_dir)
elif file_format in ['parquet', 'orc']: # columnar formats don't need row format
row_format_stmt = str()
elif file_format == 'kudu':
# Use partitioned_by to set a trivial hash distribution
assert not partitioned_by, "Kudu table shouldn't have partition cols defined"
partitioned_by = "partition by hash partitions 3"
row_format_stmt = str()
primary_keys_clause = ", PRIMARY KEY (%s)" % columns.split("\n")[0].split(" ")[0]
# Kudu's test tables are managed.
external = ""
# Read the properties specified in the TABLE_PROPERTIES section. When the specified
# properties have the same key as a default property, the value for the specified
# property is used.
if table_properties:
for table_property in table_properties.split("\n"):
format_prop = table_property.split(":")
if format_prop[0] == file_format:
key_val = format_prop[1].split("=")
tblproperties[key_val[0]] = key_val[1]
all_tblproperties = []
for key, value in tblproperties.iteritems():
all_tblproperties.append("'{0}' = '{1}'".format(key, value))
# If there are no properties to set avoid the TBLPROPERTIES clause altogether.
if not all_tblproperties:
tblproperties_clause = ""
else:
tblproperties_clause = tblproperties_clause.format(",\n".join(all_tblproperties))
# Note: columns are ignored but allowed if a custom serde is specified
# (e.g. Avro)
stmt = """
CREATE {external} TABLE IF NOT EXISTS {{db_name}}{{db_suffix}}.{{table_name}} (
{columns}
{primary_keys})
{partitioned_by}
{row_format}
{file_format_string}
LOCATION '{{hdfs_location}}'
{tblproperties}
""".format(
external=external,
row_format=row_format_stmt,
columns=',\n'.join(columns.split('\n')),
primary_keys=primary_keys_clause,
partitioned_by=partitioned_by,
tblproperties=tblproperties_clause,
file_format_string=file_format_string
).strip()
# Remove empty lines from the stmt string. There is an empty line for
# each of the sections that didn't have anything (e.g. partitioned_by)
stmt = os.linesep.join([s for s in stmt.splitlines() if s])
stmt += ';'
return stmt
def build_hbase_create_stmt_in_hive(columns, partition_columns, table_name):
# The hbase create statement differs sufficiently from the generic create to justify a
# separate method. Specifically, STORED AS becomes STORED BY. There is section called
# serdeproperties, the partition colmns have to be appended to columns in the schema.
columns = columns.split('\n')
# partition columns have to be appended to the columns in the schema.
# PARTITIONED BY is not supported and does not make sense for HBase.
if partition_columns:
columns.extend(partition_columns.split('\n'))
# stringids is a special case. It still points to functional_hbase.alltypesagg
if 'stringids' not in table_name:
tbl_properties = ('TBLPROPERTIES("hbase.table.name" = '
'"{db_name}{db_suffix}.{table_name}")')
else:
tbl_properties = ('TBLPROPERTIES("hbase.table.name" = '
'"{db_name}{db_suffix}.alltypesagg")')
# build hbase column mapping, the first column is implicitly the primary key
# which has a diffrerent representation [:key]
hbase_column_mapping = ["d:%s" % c.split(' ')[0] for c in columns[1:]]
hbase_column_mapping = ":key," + ','.join(hbase_column_mapping)
stmt = HBASE_CREATE_STATEMENT.format(
columns=',\n'.join(columns),
hbase_column_mapping=hbase_column_mapping,
tbl_properties=tbl_properties,
).strip()
return stmt + ';'
def avro_schema(columns):
record = {
"name": "a", # doesn't matter
"type": "record",
"fields": list()
}
for column_spec in columns.strip().split('\n'):
# column_spec looks something like "col_name col_type COMMENT comment"
# (comment may be omitted, we don't use it)
name = column_spec.split()[0]
if "DECIMAL" in column_spec.upper():
if column_spec.split()[1].upper() == "DECIMAL":
# No scale and precision specified, use defaults
scale = 0
precision = 9
else:
# Parse out scale and precision from decimal type
m = re.search("DECIMAL\((?P<precision>.*),(?P<scale>.*)\)", column_spec.upper())
assert m, "Could not parse decimal column spec: " + column_spec
scale = int(m.group('scale'))
precision = int(m.group('precision'))
type = {"type": "bytes", "logicalType": "decimal", "precision": precision,
"scale": scale}
else:
hive_type = column_spec.split()[1].upper()
if hive_type.startswith('CHAR(') or hive_type.startswith('VARCHAR('):
type = 'string'
else:
type = HIVE_TO_AVRO_TYPE_MAP[hive_type]
record["fields"].append(
{'name': name,
'type': [type, "null"]}) # all columns nullable
return json.dumps(record)
def build_compression_codec_statement(codec, compression_type, file_format):
codec = AVRO_COMPRESSION_MAP[codec] if file_format == 'avro' else COMPRESSION_MAP[codec]
if not codec:
return str()
return (AVRO_COMPRESSION_CODEC % codec) if file_format == 'avro' else (
COMPRESSION_TYPE % compression_type.upper() + '\n' + COMPRESSION_CODEC % codec)
def build_codec_enabled_statement(codec):
compression_enabled = 'false' if codec == 'none' else 'true'
return COMPRESSION_ENABLED % compression_enabled
def build_insert_into_statement(insert, db_name, db_suffix, table_name, file_format,
hdfs_path, for_impala=False):
insert_hint = "/* +shuffle, clustered */" \
if for_impala and file_format == 'parquet' else ""
insert_statement = insert.format(db_name=db_name,
db_suffix=db_suffix,
table_name=table_name,
hdfs_location=hdfs_path,
impala_home=os.getenv("IMPALA_HOME"),
hint=insert_hint)
# Kudu tables are managed and don't support OVERWRITE, so we replace OVERWRITE
# with INTO to make this a regular INSERT.
if file_format == 'kudu':
insert_statement = insert_statement.replace("OVERWRITE", "INTO")
if for_impala:
return insert_statement
statement = SET_PARTITION_MODE_NONSTRICT_STATEMENT + "\n"
statement += SET_DYNAMIC_PARTITION_STATEMENT + "\n"
statement += "set hive.auto.convert.join=true;\n"
# For some reason (hive bug?) we need to have the CombineHiveInputFormat set
# for cases where we are compressing in bzip or lzo on certain tables that
# have multiple files.
if 'multi' in table_name and ('bzip' in db_suffix or 'lzo' in db_suffix):
statement += SET_HIVE_INPUT_FORMAT % "CombineHiveInputFormat"
else:
statement += SET_HIVE_INPUT_FORMAT % "HiveInputFormat"
return statement + insert_statement
def build_hbase_insert(db_name, db_suffix, table_name):
hbase_insert = SET_HIVE_HBASE_BULK_LOAD + ';\n'
hbase_insert += ("INSERT OVERWRITE TABLE {db_name}{db_suffix}.{table_name}"
" SELECT * FROM {db_name}.{table_name};\n").\
format(db_name=db_name, db_suffix=db_suffix,table_name=table_name)
return hbase_insert
def build_insert(insert, db_name, db_suffix, file_format,
codec, compression_type, table_name, hdfs_path, create_hive=False):
# HBASE inserts don't need the hive options to be set, and don't require and HDFS
# file location, so they're handled separately.
if file_format == 'hbase' and not create_hive:
return build_hbase_insert(db_name, db_suffix, table_name)
output = build_codec_enabled_statement(codec) + "\n"
output += build_compression_codec_statement(codec, compression_type, file_format) + "\n"
output += build_insert_into_statement(insert, db_name, db_suffix,
table_name, file_format, hdfs_path) + "\n"
return output
def build_load_statement(load_template, db_name, db_suffix, table_name):
# hbase does not need the hdfs path.
if table_name.startswith('hbase'):
load_template = load_template.format(table_name=table_name,
db_name=db_name,
db_suffix=db_suffix)
else:
base_load_dir = os.getenv("REMOTE_LOAD", os.getenv("IMPALA_HOME"))
load_template = load_template.format(table_name=table_name,
db_name=db_name,
db_suffix=db_suffix,
impala_home = base_load_dir)
return load_template
def build_hbase_create_stmt(db_name, table_name, column_families, region_splits):
hbase_table_name = "{db_name}_hbase.{table_name}".format(db_name=db_name,
table_name=table_name)
create_stmts = list()
create_stmts.append("disable '%s'" % hbase_table_name)
create_stmts.append("drop '%s'" % hbase_table_name)
column_families = ','.join(["'{0}'".format(cf) for cf in column_families.splitlines()])
create_statement = "create '%s', %s" % (hbase_table_name, column_families)
if (region_splits):
create_statement += ", {SPLITS => [" + region_splits.strip() + "]}"
create_stmts.append(create_statement)
return create_stmts
# Does a hdfs directory listing and returns array with all the subdir names.
def get_hdfs_subdirs_with_data(path):
tmp_file = tempfile.TemporaryFile("w+")
cmd = "hadoop fs -du %s | grep -v '^0' | awk '{print $3}'" % path
subprocess.call([cmd], shell = True, stderr = open('/dev/null'), stdout = tmp_file)
tmp_file.seek(0)
# Results look like:
# <acls> - <user> <group> <date> /directory/subdirectory
# So to get subdirectory names just return everything after the last '/'
return [line[line.rfind('/') + 1:].strip() for line in tmp_file.readlines()]
class Statements(object):
"""Simple container object for storing SQL statements to be output to a
file. Useful for ordering the statements correctly."""
def __init__(self):
self.create = list()
self.load = list()
self.load_base = list()
def write_to_file(self, filename):
# If there is no content to write, skip
if not self: return
output = self.create + self.load_base + self.load
with open(filename, 'w') as f:
f.write('\n\n'.join(output))
def __nonzero__(self):
return bool(self.create or self.load or self.load_base)
def eval_section(section_str):
"""section_str should be the contents of a section (i.e. a string). If section_str
starts with `, evaluates section_str as a shell command and returns the
output. Otherwise returns section_str."""
if not section_str.startswith('`'): return section_str
cmd = section_str[1:]
# Use bash explicitly instead of setting shell=True so we get more advanced shell
# features (e.g. "for i in {1..n}")
p = subprocess.Popen(['/bin/bash', '-c', cmd], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr: print stderr
assert p.returncode == 0
return stdout.strip()
def generate_statements(output_name, test_vectors, sections,
schema_include_constraints, schema_exclude_constraints,
schema_only_constraints):
# TODO: This method has become very unwieldy. It has to be re-factored sooner than
# later.
# Parquet statements to be executed separately by Impala
hbase_output = Statements()
hbase_post_load = Statements()
impala_invalidate = Statements()
table_names = None
if options.table_names:
table_names = [name.lower() for name in options.table_names.split(',')]
existing_tables = get_hdfs_subdirs_with_data(options.hive_warehouse_dir)
for row in test_vectors:
impala_create = Statements()
hive_output = Statements()
impala_load = Statements()
file_format, data_set, codec, compression_type =\
[row.file_format, row.dataset, row.compression_codec, row.compression_type]
table_format = '%s/%s/%s' % (file_format, codec, compression_type)
db_suffix = row.db_suffix()
db_name = '{0}{1}'.format(data_set, options.scale_factor)
db = '{0}{1}'.format(db_name, db_suffix)
create_db_stmt = 'CREATE DATABASE IF NOT EXISTS {0};\n'.format(db)
impala_create.create.append(create_db_stmt)
for section in sections:
table_name = section['BASE_TABLE_NAME'].strip()
if table_names and (table_name.lower() not in table_names):
print 'Skipping table: %s.%s, table is not in specified table list' % (db, table_name)
continue
if table_format in schema_only_constraints and \
table_name.lower() not in schema_only_constraints[table_format]:
print ('Skipping table: %s.%s, \'only\' constraint for format did not '
'include this table.') % (db, table_name)
continue
if schema_include_constraints[table_name.lower()] and \
table_format not in schema_include_constraints[table_name.lower()]:
print 'Skipping \'%s.%s\' due to include constraint match.' % (db, table_name)
continue
if schema_exclude_constraints[table_name.lower()] and\
table_format in schema_exclude_constraints[table_name.lower()]:
print 'Skipping \'%s.%s\' due to exclude constraint match.' % (db, table_name)
continue
alter = section.get('ALTER')
create = section['CREATE']
create_hive = section['CREATE_HIVE']
assert not (create and create_hive), "Can't set both CREATE and CREATE_HIVE"
table_properties = section['TABLE_PROPERTIES']
insert = eval_section(section['DEPENDENT_LOAD'])
insert_hive = eval_section(section['DEPENDENT_LOAD_HIVE'])
assert not (insert and insert_hive),\
"Can't set both DEPENDENT_LOAD and DEPENDENT_LOAD_HIVE"
load = eval_section(section['LOAD'])
if file_format == 'kudu':
create_kudu = section["CREATE_KUDU"]
if section['DEPENDENT_LOAD_KUDU']:
insert = eval_section(section['DEPENDENT_LOAD_KUDU'])
else:
create_kudu = None
# For some datasets we may want to use a different load strategy when running local
# tests versus tests against large scale factors. The most common reason is to
# reduce he number of partitions for the local test environment
if not options.scale_factor and section['LOAD_LOCAL']:
load = section['LOAD_LOCAL']
columns = eval_section(section['COLUMNS']).strip()
partition_columns = section['PARTITION_COLUMNS'].strip()
row_format = section['ROW_FORMAT'].strip()
# Force reloading of the table if the user specified the --force option or
# if the table is partitioned and there was no ALTER section specified. This is to
# ensure the partition metadata is always properly created. The ALTER section is
# used to create partitions, so if that section exists there is no need to force
# reload.
# IMPALA-6579: Also force reload all Kudu tables. The Kudu entity referenced
# by the table may or may not exist, so requiring a force reload guarantees
# that the Kudu entity is always created correctly.
# TODO: Rename the ALTER section to ALTER_TABLE_ADD_PARTITION
force_reload = options.force_reload or (partition_columns and not alter) or \
file_format == 'kudu'
hdfs_location = '{0}.{1}{2}'.format(db_name, table_name, db_suffix)
# hdfs file names for hive-benchmark and functional datasets are stored
# directly under /test-warehouse
# TODO: We should not need to specify the hdfs file path in the schema file.
# This needs to be done programmatically.
if data_set in ['hive-benchmark', 'functional']:
hdfs_location = hdfs_location.split('.')[-1]
# hive does not allow hyphenated table names.
if data_set == 'hive-benchmark':
db_name = '{0}{1}'.format('hivebenchmark', options.scale_factor)
data_path = os.path.join(options.hive_warehouse_dir, hdfs_location)
# Empty tables (tables with no "LOAD" sections) are assumed to be used for insert
# testing. Since Impala currently only supports inserting into TEXT, PARQUET and
# HBASE we need to create these tables with a supported insert format.
create_file_format = file_format
create_codec = codec
if not (section['LOAD'] or section['LOAD_LOCAL'] or section['DEPENDENT_LOAD'] \
or section['DEPENDENT_LOAD_HIVE']):
create_codec = 'none'
create_file_format = file_format
if file_format not in IMPALA_SUPPORTED_INSERT_FORMATS:
create_file_format = 'text'
output = impala_create
if create_hive or file_format == 'hbase':
output = hive_output
elif codec == 'lzo':
# Impala CREATE TABLE doesn't allow INPUTFORMAT.
output = hive_output
# TODO: Currently, Kudu does not support partitioned tables via Impala.
# If a CREATE_KUDU section was provided, assume it handles the partition columns
if file_format == 'kudu' and partition_columns != '' and not create_kudu:
print "Ignore partitions on Kudu table: %s.%s" % (db_name, table_name)
continue
# If a CREATE section is provided, use that. Otherwise a COLUMNS section
# must be provided (and optionally PARTITION_COLUMNS and ROW_FORMAT
# sections), which is used to generate the create table statement.
if create_hive:
table_template = create_hive
elif create_kudu:
table_template = create_kudu
elif create:
table_template = create
if file_format in ['avro', 'hbase', 'kudu']:
# We don't know how to generalize CREATE sections to Avro and hbase.
print ("CREATE section not supported with %s, "
"skipping: '%s'" % (file_format, table_name))
continue
elif columns:
avro_schema_dir = "%s/%s" % (AVRO_SCHEMA_DIR, data_set)
table_template = build_table_template(
create_file_format, columns, partition_columns,
row_format, avro_schema_dir, table_name, table_properties)
# Write Avro schema to local file
if file_format == 'avro':
if not os.path.exists(avro_schema_dir):
os.makedirs(avro_schema_dir)
with open("%s/%s.json" % (avro_schema_dir, table_name),"w") as f:
f.write(avro_schema(columns))
else:
table_template = None
if table_template:
output.create.append(build_create_statement(table_template, table_name, db_name,
db_suffix, create_file_format, create_codec, data_path, force_reload))
# HBASE create table
if file_format == 'hbase':
# If the HBASE_COLUMN_FAMILIES section does not exist, default to 'd'
column_families = section.get('HBASE_COLUMN_FAMILIES', 'd')
region_splits = section.get('HBASE_REGION_SPLITS', None)
hbase_output.create.extend(build_hbase_create_stmt(db_name, table_name,
column_families, region_splits))
hbase_post_load.load.append("flush '%s_hbase.%s'\n" % (db_name, table_name))
# Need to make sure that tables created and/or data loaded in Hive is seen
# in Impala. We only need to do a full invalidate if the table was created in Hive
# and Impala doesn't know about it. Otherwise, do a refresh.
if output == hive_output:
invalidate_table_stmt = "INVALIDATE METADATA {0}.{1};\n".format(db, table_name)
else:
invalidate_table_stmt = "REFRESH {0}.{1};\n".format(db, table_name)
impala_invalidate.create.append(invalidate_table_stmt)
# The ALTER statement in hive does not accept fully qualified table names so
# insert a use statement. The ALTER statement is skipped for HBASE as it's
# used for adding partitions.
# TODO: Consider splitting the ALTER subsection into specific components. At the
# moment, it assumes we're only using ALTER for partitioning the table.
if alter and file_format not in ("hbase", "kudu"):
use_db = 'USE {db_name};\n'.format(db_name=db)
if output == hive_output and codec == 'lzo':
# Hive ALTER TABLE ADD PARTITION doesn't handle null partitions, so
# we can't run the ALTER section in this case.
if options.force_reload:
# IMPALA-2278: Hive INSERT OVERWRITE won't clear out partition directories
# that weren't already added to the table. So, for force reload, manually
# delete the partition directories.
output.create.append(("DFS -rm -R {data_path};").format(
data_path=data_path))
else:
# If this is not a force reload use msck repair to add the partitions
# into the table.
output.create.append(use_db + 'msck repair table %s;' % (table_name))
else:
output.create.append(use_db + alter.format(table_name=table_name))
# If the directory already exists in HDFS, assume that data files already exist
# and skip loading the data. Otherwise, the data is generated using either an
# INSERT INTO statement or a LOAD statement.
if not force_reload and hdfs_location in existing_tables:
print 'HDFS path:', data_path, 'contains data. Data loading can be skipped.'
else:
print 'HDFS path:', data_path, 'does not exists or is empty. Data will be loaded.'
if not db_suffix:
if load:
hive_output.load_base.append(build_load_statement(load, db_name,
db_suffix, table_name))
else:
print 'Empty base table load for %s. Skipping load generation' % table_name
elif file_format in ['kudu', 'parquet']:
if insert_hive:
hive_output.load.append(build_insert(insert_hive, db_name, db_suffix,
file_format, codec, compression_type, table_name, data_path))
elif insert:
impala_load.load.append(build_insert_into_statement(insert, db_name,
db_suffix, table_name, file_format, data_path, for_impala=True))
else:
print 'Empty parquet/kudu load for table %s. Skipping insert generation' \
% table_name
else:
if insert_hive:
insert = insert_hive
if insert:
hive_output.load.append(build_insert(insert, db_name, db_suffix, file_format,
codec, compression_type, table_name, data_path, create_hive=create_hive))
else:
print 'Empty insert for table %s. Skipping insert generation' % table_name
impala_create.write_to_file("create-%s-impala-generated-%s-%s-%s.sql" %
(output_name, file_format, codec, compression_type))
hive_output.write_to_file("load-%s-hive-generated-%s-%s-%s.sql" %
(output_name, file_format, codec, compression_type))
impala_load.write_to_file("load-%s-impala-generated-%s-%s-%s.sql" %
(output_name, file_format, codec, compression_type))
if hbase_output:
hbase_output.create.append("exit")
hbase_output.write_to_file('load-' + output_name + '-hbase-generated.create')
if hbase_post_load:
hbase_post_load.load.append("exit")
hbase_post_load.write_to_file('post-load-' + output_name + '-hbase-generated.sql')
impala_invalidate.write_to_file("invalidate-" + output_name + "-impala-generated.sql")
def parse_schema_template_file(file_name):
VALID_SECTION_NAMES = ['DATASET', 'BASE_TABLE_NAME', 'COLUMNS', 'PARTITION_COLUMNS',
'ROW_FORMAT', 'CREATE', 'CREATE_HIVE', 'CREATE_KUDU',
'DEPENDENT_LOAD', 'DEPENDENT_LOAD_KUDU', 'DEPENDENT_LOAD_HIVE',
'LOAD', 'LOAD_LOCAL', 'ALTER', 'HBASE_COLUMN_FAMILIES',
'TABLE_PROPERTIES', 'HBASE_REGION_SPLITS']
return parse_test_file(file_name, VALID_SECTION_NAMES, skip_unknown_sections=False)
if __name__ == "__main__":
if options.table_formats is None:
if options.exploration_strategy not in KNOWN_EXPLORATION_STRATEGIES:
print 'Invalid exploration strategy:', options.exploration_strategy
print 'Valid values:', ', '.join(KNOWN_EXPLORATION_STRATEGIES)
sys.exit(1)
test_vectors = [vector.value for vector in\
load_table_info_dimension(options.workload, options.exploration_strategy)]
else:
table_formats = options.table_formats.split(',')
dataset = get_dataset_from_workload(options.workload)
test_vectors =\
[TableFormatInfo.create_from_string(dataset, tf) for tf in table_formats]
target_dataset = test_vectors[0].dataset
print 'Target Dataset: ' + target_dataset
dataset_load_dir = os.path.join(SQL_OUTPUT_DIR, target_dataset)
# If the directory containing the sql files does not exist, create it. Else nuke all the
# files corresponding to the current workload.
try:
os.makedirs(dataset_load_dir)
except OSError:
# Directory already exists, remove it.
shutil.rmtree(dataset_load_dir)
# Recreate the workload dir
os.makedirs(dataset_load_dir)
finally:
# Make sure that the directory was created and is empty.
assert os.path.isdir(dataset_load_dir)
assert len(os.listdir(dataset_load_dir)) == 0
# Make the dataset dir the current working directory
os.chdir(dataset_load_dir)
schema_template_file = os.path.join(DATASET_DIR, target_dataset,
'%s_schema_template.sql' % target_dataset)
if not os.path.isfile(schema_template_file):
print 'Schema file not found: ' + schema_template_file
sys.exit(1)
constraints_file = os.path.join(DATASET_DIR, target_dataset, 'schema_constraints.csv')
include_constraints, exclude_constraints, only_constraints = \
parse_table_constraints(constraints_file)
sections = parse_schema_template_file(schema_template_file)
generate_statements('%s-%s' % (options.workload, options.exploration_strategy),
test_vectors, sections, include_constraints, exclude_constraints, only_constraints)
| 45.870659
| 94
| 0.686622
|
defb137473d817976ff3c17042ed6651f7a9f1f1
| 908
|
py
|
Python
|
img-shapes-texts.py
|
martingaido/opencv-python
|
22951f77244638795d45b89871ec8911b0cf60d1
|
[
"MIT"
] | null | null | null |
img-shapes-texts.py
|
martingaido/opencv-python
|
22951f77244638795d45b89871ec8911b0cf60d1
|
[
"MIT"
] | null | null | null |
img-shapes-texts.py
|
martingaido/opencv-python
|
22951f77244638795d45b89871ec8911b0cf60d1
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
print("OpenCV Version:", cv2.__version__)
# Black Image
img = np.zeros((512, 512, 3), np.uint8)
cv2.imshow("Black Image", img)
# Color Image
img[:] = 255, 0, 0
cv2.imshow("Blue Image", img)
# Create Lines (start, end, color, thickness)
cv2.line(img, (0,0), (300,300), (0,255,0), 3)
cv2.imshow("Draw Line", img)
# Create a Rectangle (start, end, color, thickness)
cv2.rectangle(img, (0,0), (250,350), (0,0,255), cv2.FILLED)
cv2.imshow("Draw Rectangle", img)
# Create a Circle (radio, color, thickness)
cv2.circle(img, (400,50), 30, (255,255,0), 5)
cv2.imshow("Draw a Circle", img)
# Put Text in the Image (image, text, coords, font, color, tickness)
cv2.putText(img, "This is the Text", (300,100), cv2.FONT_HERSHEY_COMPLEX, 2, (0,0,0), 1)
cv2.imshow("Put Text", img)
# Wait 5 secs. then close window
# cv2.waitKey(5000)
# Close window when key is pressed
cv2.waitKey(0)
| 26.705882
| 88
| 0.676211
|
20256cb417774ba174cdc661ae45cabf32348e1f
| 1,625
|
py
|
Python
|
aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/StartApplicationRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/StartApplicationRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/StartApplicationRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkedas.endpoint import endpoint_data
class StartApplicationRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'StartApplication','Edas')
self.set_uri_pattern('/pop/v5/changeorder/co_start')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AppId(self):
return self.get_query_params().get('AppId')
def set_AppId(self,AppId):
self.add_query_param('AppId',AppId)
def get_EccInfo(self):
return self.get_query_params().get('EccInfo')
def set_EccInfo(self,EccInfo):
self.add_query_param('EccInfo',EccInfo)
| 36.111111
| 77
| 0.760615
|
5ebc58d9237b659885eb82ae238d4ab5e953af22
| 1,391
|
py
|
Python
|
lib/streamlit/error_util.py
|
pohlt/streamlit
|
852764f4f7d2bc06ddf932632df06c9104bf0a35
|
[
"Apache-2.0"
] | 5
|
2020-07-06T21:29:56.000Z
|
2022-03-12T20:04:27.000Z
|
lib/streamlit/error_util.py
|
pohlt/streamlit
|
852764f4f7d2bc06ddf932632df06c9104bf0a35
|
[
"Apache-2.0"
] | 5
|
2021-04-30T21:25:13.000Z
|
2022-03-12T00:43:14.000Z
|
lib/streamlit/error_util.py
|
pohlt/streamlit
|
852764f4f7d2bc06ddf932632df06c9104bf0a35
|
[
"Apache-2.0"
] | 3
|
2020-07-14T23:32:51.000Z
|
2021-12-04T16:49:29.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import traceback
import streamlit as st
# Extract the streamlit package path
_streamlit_dir = os.path.dirname(st.__file__)
# Make it absolute, resolve aliases, and ensure there's a trailing path
# separator
_streamlit_dir = os.path.join(os.path.realpath(_streamlit_dir), "")
def _is_in_streamlit_package(file):
"""True if the given file is part of the streamlit package."""
try:
common_prefix = os.path.commonprefix([os.path.realpath(file), _streamlit_dir])
except ValueError:
# Raised if paths are on different drives.
return False
return common_prefix == _streamlit_dir
def get_nonstreamlit_traceback(extracted_tb):
return [
entry for entry in extracted_tb if not _is_in_streamlit_package(entry.filename)
]
| 30.911111
| 87
| 0.741912
|
047032425f65959580e5842444545fde515e867c
| 9,157
|
py
|
Python
|
official/resnet/ctl/ctl_imagenet_benchmark.py
|
laggingreflex/models
|
7212436440eaa11293ca84befcc5d8327109ea76
|
[
"Apache-2.0"
] | 2
|
2018-05-08T08:57:52.000Z
|
2021-11-17T10:24:18.000Z
|
official/resnet/ctl/ctl_imagenet_benchmark.py
|
laggingreflex/models
|
7212436440eaa11293ca84befcc5d8327109ea76
|
[
"Apache-2.0"
] | 4
|
2018-09-29T14:56:06.000Z
|
2018-11-05T05:58:25.000Z
|
official/resnet/ctl/ctl_imagenet_benchmark.py
|
laggingreflex/models
|
7212436440eaa11293ca84befcc5d8327109ea76
|
[
"Apache-2.0"
] | 4
|
2018-10-03T16:10:24.000Z
|
2018-11-09T22:48:33.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes CTL benchmarks and accuracy tests."""
from __future__ import print_function
import os
import time
# pylint: disable=g-bad-import-order
from absl import flags
import tensorflow as tf
from official.resnet.keras import keras_common
from official.resnet.ctl import ctl_imagenet_main
from official.resnet.ctl import ctl_common
from official.utils.testing.perfzero_benchmark import PerfZeroBenchmark
from official.utils.flags import core as flags_core
MIN_TOP_1_ACCURACY = 0.76
MAX_TOP_1_ACCURACY = 0.77
FLAGS = flags.FLAGS
class CtlBenchmark(PerfZeroBenchmark):
"""Base benchmark class with methods to simplify testing."""
def __init__(self, output_dir=None, default_flags=None, flag_methods=None):
self.output_dir = output_dir
self.default_flags = default_flags or {}
self.flag_methods = flag_methods or {}
super(CtlBenchmark, self).__init__(
output_dir=self.output_dir,
default_flags=self.default_flags,
flag_methods=self.flag_methods)
def _report_benchmark(self,
stats,
wall_time_sec,
top_1_max=None,
top_1_min=None,
total_batch_size=None,
log_steps=None,
warmup=1):
"""Report benchmark results by writing to local protobuf file.
Args:
stats: dict returned from keras models with known entries.
wall_time_sec: the during of the benchmark execution in seconds
top_1_max: highest passing level for top_1 accuracy.
top_1_min: lowest passing level for top_1 accuracy.
total_batch_size: Global batch-size.
log_steps: How often the log was created for stats['step_timestamp_log'].
warmup: number of entries in stats['step_timestamp_log'] to ignore.
"""
metrics = []
if 'eval_acc' in stats:
metrics.append({'name': 'accuracy_top_1',
'value': stats['eval_acc'],
'min_value': top_1_min,
'max_value': top_1_max})
metrics.append({'name': 'eval_loss',
'value': stats['eval_loss']})
metrics.append({'name': 'top_1_train_accuracy',
'value': stats['train_acc']})
metrics.append({'name': 'train_loss',
'value': stats['train_loss']})
if (warmup and 'step_timestamp_log' in stats and
len(stats['step_timestamp_log']) > warmup):
# first entry in the time_log is start of step 1. The rest of the
# entries are the end of each step recorded
time_log = stats['step_timestamp_log']
elapsed = time_log[-1].timestamp - time_log[warmup].timestamp
num_examples = (
total_batch_size * log_steps * (len(time_log) - warmup - 1))
examples_per_sec = num_examples / elapsed
metrics.append({'name': 'exp_per_second',
'value': examples_per_sec})
if 'avg_exp_per_second' in stats:
metrics.append({'name': 'avg_exp_per_second',
'value': stats['avg_exp_per_second']})
flags_str = flags_core.get_nondefault_flags_as_str()
self.report_benchmark(iters=-1, wall_time=wall_time_sec, metrics=metrics,
extras={'flags': flags_str})
class Resnet50CtlAccuracy(CtlBenchmark):
"""Benchmark accuracy tests for ResNet50 in CTL."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
"""A benchmark class.
Args:
output_dir: directory where to output e.g. log files
root_data_dir: directory under which to look for dataset
**kwargs: arbitrary named arguments. This is needed to make the
constructor forward compatible in case PerfZero provides more
named arguments before updating the constructor.
"""
flag_methods = [
ctl_common.define_ctl_flags,
keras_common.define_keras_flags
]
self.data_dir = os.path.join(root_data_dir, 'imagenet')
super(Resnet50CtlAccuracy, self).__init__(
output_dir=output_dir, flag_methods=flag_methods)
def benchmark_8_gpu(self):
"""Test Keras model with eager, dist_strat and 8 GPUs."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128 * 8
FLAGS.train_epochs = 90
FLAGS.epochs_between_evals = 10
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
FLAGS.dtype = 'fp32'
# Add some thread tunings to improve performance.
FLAGS.datasets_num_private_threads = 14
self._run_and_report_benchmark()
def _run_and_report_benchmark(self):
start_time_sec = time.time()
stats = ctl_imagenet_main.run(flags.FLAGS)
wall_time_sec = time.time() - start_time_sec
super(Resnet50CtlAccuracy, self)._report_benchmark(
stats,
wall_time_sec,
top_1_min=MIN_TOP_1_ACCURACY,
top_1_max=MAX_TOP_1_ACCURACY,
total_batch_size=FLAGS.batch_size,
log_steps=100)
def _get_model_dir(self, folder_name):
return os.path.join(self.output_dir, folder_name)
class Resnet50CtlBenchmarkBase(CtlBenchmark):
"""Resnet50 benchmarks."""
def __init__(self, output_dir=None, default_flags=None):
flag_methods = [
ctl_common.define_ctl_flags,
keras_common.define_keras_flags
]
super(Resnet50CtlBenchmarkBase, self).__init__(
output_dir=output_dir,
flag_methods=flag_methods,
default_flags=default_flags)
def _run_and_report_benchmark(self):
start_time_sec = time.time()
stats = ctl_imagenet_main.run(FLAGS)
wall_time_sec = time.time() - start_time_sec
# Number of logged step time entries that are excluded in performance
# report. We keep results from last 100 batches in this case.
warmup = (FLAGS.train_steps - 100) // FLAGS.log_steps
super(Resnet50CtlBenchmarkBase, self)._report_benchmark(
stats,
wall_time_sec,
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
warmup=warmup)
def benchmark_1_gpu_no_dist_strat(self):
"""Test Keras model with 1 GPU, no distribution strategy."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
FLAGS.batch_size = 128
self._run_and_report_benchmark()
def benchmark_1_gpu(self):
"""Test Keras model with 1 GPU."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.distribution_strategy = 'default'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
FLAGS.batch_size = 128
self._run_and_report_benchmark()
def benchmark_1_gpu_eager(self):
"""Test Keras model with 1 GPU in pure eager mode."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.distribution_strategy = 'default'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_eager')
FLAGS.batch_size = 64
FLAGS.use_tf_function = False
self._run_and_report_benchmark()
def benchmark_8_gpu(self):
"""Test Keras model with 8 GPUs."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.distribution_strategy = 'default'
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
FLAGS.batch_size = 128 * 8 # 8 GPUs
self._run_and_report_benchmark()
def fill_report_object(self, stats):
super(Resnet50CtlBenchmarkBase, self).fill_report_object(
stats,
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
class Resnet50CtlBenchmarkSynth(Resnet50CtlBenchmarkBase):
"""Resnet50 synthetic benchmark tests."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
def_flags = {}
def_flags['skip_eval'] = True
def_flags['use_synthetic_data'] = True
def_flags['train_steps'] = 110
def_flags['log_steps'] = 10
super(Resnet50CtlBenchmarkSynth, self).__init__(
output_dir=output_dir, default_flags=def_flags)
class Resnet50CtlBenchmarkReal(Resnet50CtlBenchmarkBase):
"""Resnet50 real data benchmark tests."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
def_flags = {}
def_flags['skip_eval'] = True
def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
def_flags['train_steps'] = 110
def_flags['log_steps'] = 10
super(Resnet50CtlBenchmarkReal, self).__init__(
output_dir=output_dir, default_flags=def_flags)
if __name__ == '__main__':
tf.test.main()
| 34.424812
| 80
| 0.683958
|
9740027201dc06af0ca5557da6eb8e9bc3a51a96
| 2,645
|
py
|
Python
|
iAnnotateSV/AnnotateForDGv.py
|
rhshah/iAnnotateSV
|
a2f86543925169219c91fe4e3de5412a69f735a4
|
[
"Apache-2.0"
] | 16
|
2015-03-28T19:06:11.000Z
|
2021-05-01T17:20:05.000Z
|
iAnnotateSV/AnnotateForDGv.py
|
mskcc/iAnnotateSV
|
eb06fe93c20783a1fead0ca1e02fba2ded97d622
|
[
"Apache-2.0"
] | 14
|
2016-10-09T00:50:42.000Z
|
2022-01-13T00:42:58.000Z
|
iAnnotateSV/AnnotateForDGv.py
|
rhshah/iAnnotateSV
|
a2f86543925169219c91fe4e3de5412a69f735a4
|
[
"Apache-2.0"
] | 10
|
2015-03-09T14:07:41.000Z
|
2020-04-13T09:41:02.000Z
|
'''
Created on 12/23/2015
@Ronak Shah
'''
from collections import defaultdict
import pandas as pd
import logging
import coloredlogs
# Gives elements at particular index in list
getVar = lambda searchList, ind: [searchList[i] for i in ind]
coloredlogs.install(level='DEBUG')
def ReadDGvFile(filename, verbose):
if(verbose):
logging.info("iAnnotateSV::AnnotateForDGv: Reading & Storing DGV TSV file as dictionary")
# Initialize dictionary of lists
dataDict = defaultdict(list)
with open(filename, 'r') as filecontent:
header = filecontent.readline()
for line in filecontent:
data = line.rstrip('\n').split('\t')
processedData = (data[0].replace('chr', ''))
slicedData = data[1:]
joinedData = '\t'.join(slicedData)
dataDict[processedData].append(joinedData)
return dataDict
def AnnotateDGv (verbose, count, sv, dgvDict):
if(verbose):
logging.info("iAnnotateSV::AnnotateForDGv: Checking Entry %d in DGv data", count)
# Initialize List to store repeat annotation
list_svloc1 = []
list_svloc2 = []
# Read SV Data
sv_chr1 = str(sv.loc['chr1'])
sv_pos1 = int(sv.loc['pos1'])
sv_chr2 = str(sv.loc['chr2'])
sv_pos2 = int(sv.loc['pos2'])
# Traverse through DGv Data Dict
list_loc1 = dgvDict.get(sv_chr1, "None") # Get the values for the chromosome
if(list_loc1 != "None"): # Check if there are no keys with a particular chromosome
for loc in list_loc1: # For each location in all values check the overlap
data = loc.split('\t')
dgv_pos1 = int(data[0])
dgv_pos2 = int(data[1])
if (dgv_pos1 <= sv_pos1 <= dgv_pos2):
slicedData = getVar(data, [2, 8])
joinedData = '-'.join(slicedData)
list_svloc1.append(joinedData)
else:
if(verbose):
logging.warn("iAnnotateSV::AnnotateForDGv: Chromosome %s is not there in the DGv dictionary", sv_chr1)
list_loc2 = dgvDict.get(sv_chr2, "None")
if(list_loc2 != "None"):
for loc in list_loc2:
data = loc.split('\t')
dgv_pos1 = int(data[0])
dgv_pos2 = int(data[1])
if (dgv_pos1 <= sv_pos2 <= dgv_pos2):
slicedData = getVar(data, [2, 8])
joinedData = '-'.join(slicedData)
list_svloc2.append(joinedData)
else:
if(verbose):
logging.warn("iAnnotateSV::AnnotateForDGv: Chromosome %s is not there in the DGv dictionary", sv_chr2)
return (list_svloc1, list_svloc2)
| 39.477612
| 122
| 0.608696
|
9fa3183606fec259ad5b1b2788c11f9e7959410b
| 17,270
|
py
|
Python
|
mriqc/bin/mriqc_run.py
|
Jordan-Theriault/mriqc
|
7a84b28e17c9f137bde75aa264b6f0e7e5804eed
|
[
"BSD-3-Clause"
] | null | null | null |
mriqc/bin/mriqc_run.py
|
Jordan-Theriault/mriqc
|
7a84b28e17c9f137bde75aa264b6f0e7e5804eed
|
[
"BSD-3-Clause"
] | null | null | null |
mriqc/bin/mriqc_run.py
|
Jordan-Theriault/mriqc
|
7a84b28e17c9f137bde75aa264b6f0e7e5804eed
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2015-11-19 16:44:27
"""
=====
MRIQC
=====
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import os.path as op
from multiprocessing import cpu_count
from .. import __version__
DEFAULT_MEM_GB = 8
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from .. import DEFAULTS
parser = ArgumentParser(description='MRIQC: MRI Quality Control',
formatter_class=RawTextHelpFormatter)
# Arguments as specified by BIDS-Apps
# required, positional arguments
# IMPORTANT: they must go directly with the parser object
parser.add_argument('bids_dir', action='store',
help='The directory with the input dataset '
'formatted according to the BIDS standard.')
parser.add_argument('output_dir', action='store',
help='The directory where the output files '
'should be stored. If you are running group level analysis '
'this folder should be prepopulated with the results of the'
'participant level analysis.')
parser.add_argument('analysis_level', action='store', nargs='+',
help='Level of the analysis that will be performed. '
'Multiple participant level analyses can be run independently '
'(in parallel) using the same output_dir.',
choices=['participant', 'group'])
# optional arguments
parser.add_argument('--version', action='version',
version='mriqc v{}'.format(__version__))
# BIDS selectors
g_bids = parser.add_argument_group('Options for filtering BIDS queries')
g_bids.add_argument('--participant_label', '--participant-label', action='store', nargs='+',
help='one or more participant identifiers (the sub- prefix can be '
'removed)')
g_bids.add_argument('--session-id', action='store', nargs='+', type=str,
help='select a specific session to be processed')
g_bids.add_argument('--run-id', action='store', type=str, nargs='+',
help='select a specific run to be processed')
g_bids.add_argument('--task-id', action='store', nargs='+', type=str,
help='select a specific task to be processed')
g_bids.add_argument('-m', '--modalities', action='store', nargs='*',
choices=['T1w', 'bold', 'T2w'], default=['T1w', 'bold', 'T2w'],
help='select one of the supported MRI types')
# Control instruments
g_outputs = parser.add_argument_group('Instrumental options')
g_outputs.add_argument('-w', '--work-dir', action='store',
default=op.join(os.getcwd(), 'work'))
g_outputs.add_argument('--report-dir', action='store')
g_outputs.add_argument('--verbose-reports', default=False, action='store_true')
g_outputs.add_argument('--write-graph', action='store_true', default=False,
help='Write workflow graph.')
g_outputs.add_argument('--dry-run', action='store_true', default=False,
help='Do not run the workflow.')
g_outputs.add_argument('--profile', action='store_true', default=False,
help='hook up the resource profiler callback to nipype')
g_outputs.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
g_outputs.add_argument('--no-sub', default=False, action='store_true',
help='Turn off submission of anonymized quality metrics '
'to MRIQC\'s metrics repository.')
g_outputs.add_argument('--email', action='store', default='', type=str,
help='Email address to include with quality metric submission.')
g_outputs.add_argument("-v", "--verbose", dest="verbose_count",
action="count", default=0,
help="increases log verbosity for each occurence, debug level is -vvv")
g_outputs.add_argument(
'--webapi-url', action='store', default='https://mriqc.nimh.nih.gov/api/v1', type=str,
help='IP address where the MRIQC WebAPI is listening')
g_outputs.add_argument(
'--webapi-port', action='store', type=int,
help='port where the MRIQC WebAPI is listening')
g_outputs.add_argument('--upload-strict', action='store_true', default=False,
help='upload will fail if if upload is strict')
# General performance
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument('--n_procs', '--nprocs', '--n_cpus', '--nprocs',
action='store', default=0, type=int, help='number of threads')
g_perfm.add_argument('--mem_gb', action='store', default=0, type=int,
help='available total memory')
g_perfm.add_argument('--testing', action='store_true', default=False,
help='use testing settings for a minimal footprint')
g_perfm.add_argument(
'-f', '--float32', action='store_true', default=DEFAULTS['float32'],
help="Cast the input data to float32 if it's represented in higher precision "
"(saves space and improves perfomance)")
# Workflow settings
g_conf = parser.add_argument_group('Workflow configuration')
g_conf.add_argument('--ica', action='store_true', default=False,
help='Run ICA on the raw data and include the components'
'in the individual reports (slow but potentially very insightful)')
g_conf.add_argument('--hmc-afni', action='store_true', default=True,
help='Use ANFI 3dvolreg for head motion correction (HMC) - default')
g_conf.add_argument('--hmc-fsl', action='store_true', default=False,
help='Use FSL MCFLIRT instead of AFNI for head motion correction (HMC)')
g_conf.add_argument('--fft-spikes-detector', action='store_true', default=False,
help='Turn on FFT based spike detector (slow).')
g_conf.add_argument('--fd_thres', action='store', default=0.2,
type=float, help='motion threshold for FD computation')
# ANTs options
g_ants = parser.add_argument_group('Specific settings for ANTs')
g_ants.add_argument(
'--ants-nthreads', action='store', type=int, default=1,
help='number of threads that will be set in ANTs processes')
g_ants.add_argument(
'--ants-float', action='store_true', default=False,
help='use float number precision on ANTs computations')
g_ants.add_argument('--ants-settings', action='store',
help='path to JSON file with settings for ANTS')
# AFNI head motion correction settings
g_afni = parser.add_argument_group('Specific settings for AFNI')
g_afni.add_argument('--deoblique', action='store_true', default=False,
help='Deoblique the functional scans during head motion '
'correction preprocessing')
g_afni.add_argument('--despike', action='store_true', default=False,
help='Despike the functional scans during head motion correction '
'preprocessing')
g_afni.add_argument('--start-idx', action='store', type=int,
help='Initial volume in functional timeseries that should be '
'considered for preprocessing')
g_afni.add_argument('--stop-idx', action='store', type=int,
help='Final volume in functional timeseries that should be '
'considered for preprocessing')
g_afni.add_argument('--correct-slice-timing', action='store_true', default=False,
help='Perform slice timing correction')
return parser
def main():
"""Entry point"""
from nipype import config as ncfg, logging as nlog
from nipype.pipeline.engine import Workflow
from .. import logging
from ..utils.bids import collect_bids_data
from ..workflows.core import build_workflow
from ..utils.misc import check_folder
# Run parser
opts = get_parser().parse_args()
# Retrieve logging level
log_level = int(max(3 - opts.verbose_count, 0) * 10)
if opts.verbose_count > 1:
log_level = int(max(25 - 5 * opts.verbose_count, 1))
logging.getLogger().setLevel(log_level)
log = logging.getLogger('mriqc.cli')
# Build settings dict
bids_dir = op.abspath(opts.bids_dir)
# Number of processes
n_procs = opts.n_procs
settings = {
'bids_dir': bids_dir,
'write_graph': opts.write_graph,
'testing': opts.testing,
'hmc_afni': opts.hmc_afni,
'hmc_fsl': opts.hmc_fsl,
'fft_spikes_detector': opts.fft_spikes_detector,
'n_procs': n_procs,
'ants_nthreads': opts.ants_nthreads,
'ants_float': opts.ants_float,
'output_dir': op.abspath(opts.output_dir),
'work_dir': op.abspath(opts.work_dir),
'verbose_reports': opts.verbose_reports or opts.testing,
'float32': opts.float32,
'ica': opts.ica,
'no_sub': opts.no_sub,
'email': opts.email,
'fd_thres': opts.fd_thres,
'webapi_url': opts.webapi_url,
'webapi_port': opts.webapi_port,
'upload_strict': opts.upload_strict,
}
if opts.hmc_afni:
settings['deoblique'] = opts.deoblique
settings['despike'] = opts.despike
settings['correct_slice_timing'] = opts.correct_slice_timing
if opts.start_idx:
settings['start_idx'] = opts.start_idx
if opts. stop_idx:
settings['stop_idx'] = opts.stop_idx
if opts.ants_settings:
settings['ants_settings'] = opts.ants_settings
log_dir = op.join(settings['output_dir'], 'logs')
analysis_levels = opts.analysis_level
if opts.participant_label is None:
analysis_levels.append('group')
analysis_levels = list(set(analysis_levels))
if len(analysis_levels) > 2:
raise RuntimeError('Error parsing analysis levels, got "%s"' % ', '.join(analysis_levels))
settings['report_dir'] = opts.report_dir
if not settings['report_dir']:
settings['report_dir'] = op.join(settings['output_dir'], 'reports')
check_folder(settings['output_dir'])
if 'participant' in analysis_levels:
check_folder(settings['work_dir'])
check_folder(log_dir)
check_folder(settings['report_dir'])
# Set nipype config
ncfg.update_config({
'logging': {'log_directory': log_dir, 'log_to_file': True},
'execution': {'crashdump_dir': log_dir, 'crashfile_format': 'txt',
'resource_monitor': opts.profile},
})
# Set nipype logging level
nlog.getLogger('workflow').setLevel(log_level)
nlog.getLogger('interface').setLevel(log_level)
nlog.getLogger('utils').setLevel(log_level)
plugin_settings = {'plugin': 'Linear'}
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as pfile:
plugin_settings = loadyml(pfile)
else:
# Setup multiprocessing
if settings['n_procs'] == 0:
settings['n_procs'] = cpu_count()
if settings['ants_nthreads'] == 0:
if settings['n_procs'] > 1:
# always leave one extra thread for non ANTs work,
# don't use more than 8 threads - the speed up is minimal
settings['ants_nthreads'] = min(settings['n_procs'] - 1, 8)
else:
settings['ants_nthreads'] = 1
if settings['n_procs'] > 1:
plugin_settings['plugin'] = 'MultiProc'
plugin_settings['plugin_args'] = {'n_procs': settings['n_procs']}
if opts.mem_gb:
plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb
# Process data types
modalities = opts.modalities
# Set up participant level
if 'participant' in analysis_levels:
log.info('Participant level started. Checking BIDS dataset...')
dataset = collect_bids_data(
settings['bids_dir'],
modalities=modalities,
participant_label=opts.participant_label,
session=opts.session_id,
run=opts.run_id,
task=opts.task_id,
)
log.info(
'Running MRIQC-%s (analysis_levels=[%s], participant_label=%s)\n\tSettings=%s',
__version__, ', '.join(analysis_levels), opts.participant_label, settings)
workflow = Workflow(name='workflow_enumerator')
workflow.base_dir = settings['work_dir']
wf_list = []
for mod in modalities:
if not dataset[mod]:
log.warning('No %s scans were found in %s', mod, settings['bids_dir'])
continue
wf_list.append(build_workflow(dataset[mod], mod, settings=settings))
if wf_list:
workflow.add_nodes(wf_list)
if not opts.dry_run:
# Warn about submitting measures BEFORE
if not settings['no_sub']:
log.warning(
'Anonymized quality metrics will be submitted'
' to MRIQC\'s metrics repository.'
' Use --no-sub to disable submission.')
# run MRIQC
workflow.run(**plugin_settings)
# Warn about submitting measures AFTER
if not settings['no_sub']:
log.warning(
'Anonymized quality metrics have beeen submitted'
' to MRIQC\'s metrics repository.'
' Use --no-sub to disable submission.')
else:
msg = 'Error reading BIDS directory ({}), or the dataset is not ' \
'BIDS-compliant.'
if opts.participant_label or opts.session_id or opts.run_id or opts.task_id:
msg = 'The combination of supplied labels'
if opts.participant_label is not None:
msg += ' (--participant_label {})'.format(" ".join(opts.participant_label))
if opts.session_id is not None:
msg += ' (--session-id {})'.format(" ".join(opts.session_id))
if opts.run_id is not None:
msg += ' (--run-id {})'.format(" ".join(opts.run_id))
if opts.task_id is not None:
msg += ' (--task-id {})'.format(" ".join(opts.task_id))
msg += ' did not result in matches within the BIDS directory ({}).'
raise RuntimeError(msg.format(settings['bids_dir']))
log.info('Participant level finished successfully.')
# Set up group level
if 'group' in analysis_levels:
from ..reports import group_html
from ..utils.misc import generate_csv # , generate_pred
log.info('Group level started...')
log.info(
'Running MRIQC-%s (analysis_levels=[%s], participant_label=%s)\n\tSettings=%s',
__version__, ', '.join(analysis_levels), opts.participant_label, settings)
reports_dir = check_folder(op.join(settings['output_dir'], 'reports'))
derivatives_dir = op.join(settings['output_dir'], 'derivatives')
n_group_reports = 0
for mod in modalities:
dataframe, out_csv = generate_csv(derivatives_dir,
settings['output_dir'], mod)
# If there are no iqm.json files, nothing to do.
if dataframe is None:
log.warning(
'No IQM-JSON files were found for the %s data type in %s. The group-level '
'report was not generated.', mod, derivatives_dir)
continue
log.info('Summary CSV table for the %s data generated (%s)', mod, out_csv)
# out_pred = generate_pred(derivatives_dir, settings['output_dir'], mod)
# if out_pred is not None:
# log.info('Predicted QA CSV table for the %s data generated (%s)',
# mod, out_pred)
out_html = op.join(reports_dir, mod + '_group.html')
group_html(out_csv, mod,
csv_failed=op.join(settings['output_dir'], 'failed_' + mod + '.csv'),
out_file=out_html)
log.info('Group-%s report generated (%s)', mod, out_html)
n_group_reports += 1
if n_group_reports == 0:
raise Exception("No data found. No group level reports were generated.")
log.info('Group level finished successfully.')
if __name__ == '__main__':
main()
| 43.832487
| 98
| 0.596815
|
16c2cf5f55958399fa02f2c6ca5fe0b60cf9f372
| 731
|
py
|
Python
|
setup.py
|
saikiran030996/C-Users-KirAI-oops-Project-DVC-NLP-usecase
|
f18f85bbb4b75cf073529714fc704dedff9de760
|
[
"MIT"
] | null | null | null |
setup.py
|
saikiran030996/C-Users-KirAI-oops-Project-DVC-NLP-usecase
|
f18f85bbb4b75cf073529714fc704dedff9de760
|
[
"MIT"
] | null | null | null |
setup.py
|
saikiran030996/C-Users-KirAI-oops-Project-DVC-NLP-usecase
|
f18f85bbb4b75cf073529714fc704dedff9de760
|
[
"MIT"
] | null | null | null |
from setuptools import setup
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
## edit below variables as per your requirements -
REPO_NAME = "C-Users-KirAI-oops-Project-DVC-NLP-usecase"
AUTHOR_USER_NAME = "saikiran"
SRC_REPO = "src"
LIST_OF_REQUIREMENTS = []
setup(
name=SRC_REPO,
version="0.0.1",
author=AUTHOR_USER_NAME,
description="A small package for DVC",
long_description=long_description,
long_description_content_type="text/markdown",
url=f"https://github.com/{AUTHOR_USER_NAME}/{REPO_NAME}",
author_email="sunny.c17hawke@gmail.com",
packages=[SRC_REPO],
license="MIT",
python_requires=">=3.6",
install_requires=LIST_OF_REQUIREMENTS
)
| 27.074074
| 61
| 0.715458
|
2ae488bda12794c6a030c60722d894fa522b4666
| 535
|
py
|
Python
|
packages/pyre/filesystem/BlockDevice.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyre/filesystem/BlockDevice.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyre/filesystem/BlockDevice.py
|
PyreFramework/pyre
|
345c7449a3416eea1c1affa74fb32faff30a6aaa
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2022 all rights reserved
#
# superclass
from .File import File
# class declaration
class BlockDevice(File):
"""
Representation of block devices, a type of unix device driver
"""
# constant
marker = 'b'
# interface
def identify(self, explorer, **kwds):
"""
Tell {explorer} that it is visiting a block device
"""
# dispatch
return explorer.onBlockDevice(info=self, **kwds)
# end of file
| 16.212121
| 65
| 0.605607
|
0d47804022bd1e7e7c40d6e4957d7300593b8507
| 992
|
py
|
Python
|
InfluxDBWriter/InfluxdbWriter.py
|
ansjin/multi-serverless-deployment
|
d4bb6f65db01c6b6fd2085de17f8cfad2cba9bb8
|
[
"MIT"
] | 1
|
2021-03-08T20:17:46.000Z
|
2021-03-08T20:17:46.000Z
|
InfluxDBWriter/InfluxdbWriter.py
|
ansjin/multi-serverless-deployment
|
d4bb6f65db01c6b6fd2085de17f8cfad2cba9bb8
|
[
"MIT"
] | null | null | null |
InfluxDBWriter/InfluxdbWriter.py
|
ansjin/multi-serverless-deployment
|
d4bb6f65db01c6b6fd2085de17f8cfad2cba9bb8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import yaml
logs_file = "Logs/log.log"
from influxdb import DataFrameClient
class InfluxDBWriter:
def __init__(self, configfile: str):
with open(configfile, 'r') as stream:
try:
data = yaml.safe_load(stream)
data = data['influxdb']
self.client = DataFrameClient(data['hostinfo']['host'], data['hostinfo']['port'],
data['auth']['username'], data['auth']['password'],
data['database']['dbname'])
self.client.create_database(data['database']['dbname'])
self.database = data['database']['dbname']
self.protocol = data['database']['protocol']
except yaml.YAMLError as exc:
print(exc)
def write_dataframe_influxdb(self, df):
self.client.write_points(df, self.database, protocol=self.protocol)
print("Writen DataFrame")
| 34.206897
| 97
| 0.550403
|
7de41f65e064485f54da28a17cdbcc09b4192a2b
| 2,671
|
py
|
Python
|
tests/env/packages/env_test_seaborn.py
|
patkan/foxbms-2
|
329216a5b0739362512b4c744975a136f674f60c
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 47
|
2021-04-01T21:14:55.000Z
|
2022-03-30T12:19:18.000Z
|
tests/env/packages/env_test_seaborn.py
|
patkan/foxbms-2
|
329216a5b0739362512b4c744975a136f674f60c
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 6
|
2021-06-01T08:25:54.000Z
|
2021-11-17T07:55:05.000Z
|
tests/env/packages/env_test_seaborn.py
|
patkan/foxbms-2
|
329216a5b0739362512b4c744975a136f674f60c
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 24
|
2021-04-01T21:15:04.000Z
|
2022-03-29T06:18:10.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 - 2021, Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# - "This product uses parts of foxBMS®"
# - "This product includes parts of foxBMS®"
# - "This product is derived from foxBMS®"
"""Testing 'seaborn' package"""
import logging
import argparse
# package to test
import seaborn # pylint: disable=unused-import
def main():
"""Testing 'seaborn' package"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
action="count",
default=0,
help="set verbosity level",
)
args = parser.parse_args()
if args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbosity > 1:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
if __name__ == "__main__":
main()
| 36.589041
| 98
| 0.732684
|
6c3bbb9313bb02215c611545e1f69edd138a32f7
| 184
|
py
|
Python
|
dog-breed-system/dog-breed.py
|
campos537/dog-breed-recognition
|
30be95bd53074efd8138c228efe99d3978705d30
|
[
"Unlicense"
] | null | null | null |
dog-breed-system/dog-breed.py
|
campos537/dog-breed-recognition
|
30be95bd53074efd8138c228efe99d3978705d30
|
[
"Unlicense"
] | null | null | null |
dog-breed-system/dog-breed.py
|
campos537/dog-breed-recognition
|
30be95bd53074efd8138c228efe99d3978705d30
|
[
"Unlicense"
] | null | null | null |
from tkinter import *
from windows.main_window import MainWindow
window = Tk()
mywin = MainWindow(window)
window.title('Dog Breed')
window.geometry("960x960+10+10")
window.mainloop()
| 20.444444
| 42
| 0.771739
|
9591fb111fadae4fb1a8227283e9eb45b05373b0
| 556
|
py
|
Python
|
document_model.py
|
iliaschalkidis/lmtc-emnlp2020
|
55634fda5f118a0476572e674e90b14d3b740501
|
[
"Apache-2.0"
] | 11
|
2020-09-16T15:12:42.000Z
|
2022-02-09T06:16:31.000Z
|
document_model.py
|
iliaschalkidis/lmtc-emnlp2020
|
55634fda5f118a0476572e674e90b14d3b740501
|
[
"Apache-2.0"
] | 2
|
2021-04-14T16:59:10.000Z
|
2022-01-28T08:11:17.000Z
|
document_model.py
|
iliaschalkidis/lmtc-emnlp2020
|
55634fda5f118a0476572e674e90b14d3b740501
|
[
"Apache-2.0"
] | 1
|
2021-07-15T18:36:54.000Z
|
2021-07-15T18:36:54.000Z
|
import logging
from spacy_tagger import Tagger
LOGGER = logging.getLogger(__name__)
class Document:
"""
A document is a combination of text and the positions of the tags in that text.
"""
tagger = Tagger()
def __init__(self, text, tags, filename=None):
"""
:param text: document text as a string
:param tags: list of Tag objects
"""
self.tokens = [token.text for token in Document.tagger.tokenize_text(text)]
self.tags = tags
self.text = text
self.filename = filename
| 25.272727
| 83
| 0.634892
|
1b74d1cc1d16d916e23cf1c338b6046e9667f079
| 566
|
py
|
Python
|
beatmap_collections/migrations/0014_alter_profile_user.py
|
HelloYeew/beattosetto
|
f8847fda908b0211c1221ba64854df42274b9db1
|
[
"MIT"
] | 5
|
2021-11-02T15:09:24.000Z
|
2022-03-24T03:03:57.000Z
|
beatmap_collections/migrations/0014_alter_profile_user.py
|
beattosetto/beattosetto
|
a4bd93fc1ca7530b73f03194847ab8b0dca86806
|
[
"MIT"
] | 205
|
2021-10-12T09:13:59.000Z
|
2022-03-10T10:09:07.000Z
|
beatmap_collections/migrations/0014_alter_profile_user.py
|
pontakornth/beattosetto
|
b3201922a61a9164659efc26f11dc818aa5705f2
|
[
"MIT"
] | 2
|
2021-10-12T13:39:01.000Z
|
2021-10-13T08:47:59.000Z
|
# Generated by Django 3.2.8 on 2021-10-25 02:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('beatmap_collections', '0013_profile'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
),
]
| 25.727273
| 97
| 0.666078
|
6adc2f558dcae2ae470dc5da212f82b3877487e1
| 9,284
|
py
|
Python
|
missions/examples/movie-review/example/dataset.py
|
muik/ai-hackathon-2018
|
53fb888bf777b89c1d073acd00a49a3865ca9d1e
|
[
"MIT"
] | 2
|
2018-05-04T07:28:09.000Z
|
2018-06-04T01:15:07.000Z
|
missions/examples/movie-review/example/dataset.py
|
muik/ai-hackathon-2018
|
53fb888bf777b89c1d073acd00a49a3865ca9d1e
|
[
"MIT"
] | null | null | null |
missions/examples/movie-review/example/dataset.py
|
muik/ai-hackathon-2018
|
53fb888bf777b89c1d073acd00a49a3865ca9d1e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright 2018 NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import time
from multiprocessing import Pool
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.model_selection import StratifiedShuffleSplit
from kor_char_parser import decompose_str_as_one_hot
from char import line_to_char_ids
from word import line_to_word_ids
def group_count(name, items):
df = pd.DataFrame(data={name: items})
df = df.groupby([name]).size().reset_index(name='counts')
total = len(items)
df['percent'] = df['counts'].apply(lambda x: round(x * 100 / total, 1))
return df
class MovieReviewDataset(Dataset):
"""
영화리뷰 데이터를 읽어서, tuple (데이터, 레이블)의 형태로 리턴하는 파이썬 오브젝트 입니다.
"""
def __init__(self, dataset_path: str, max_length: int, max_size=-1):
"""
initializer
:param dataset_path: 데이터셋 root path
:param max_length: 문자열의 최대 길이
"""
print('pandas version:', pd.__version__)
if max_size > -1:
print('max dataset size:', max_size)
# 데이터, 레이블 각각의 경로
data_review = os.path.join(dataset_path, 'train', 'train_data')
data_label = os.path.join(dataset_path, 'train', 'train_label')
# 영화리뷰 데이터를 읽고 preprocess까지 진행합니다
with open(data_review, 'rt', encoding='utf-8') as f:
lines = f.readlines()[:max_size]
#self._save_chars(lines)
#self._save_words(lines)
#self._save_train_words(lines)
self.reviews, self.lengths, self.review_char_ids, self.review_char_lengths, self.word_ids, self.word_lengths = preprocess(lines, max_length)
# 영화리뷰 레이블을 읽고 preprocess까지 진행합니다.
with open(data_label) as f:
self.labels = [np.float32(x.rstrip()) for x in f.readlines()[:max_size]]
# 라벨별 비중
#print(group_count('label', self.labels))
# label counts percent
# 1 148402 10.3
# 2 20539 1.4
# 3 21940 1.5
# 4 24137 1.7
# 5 40210 2.8
# 6 53681 3.7
# 7 80048 5.5
# 8 120005 8.3
# 9 129131 8.9
# 10 807600 55.9
def get_sampler(self):
test_size = 0.2
sss = StratifiedShuffleSplit(n_splits=1, test_size=test_size)
X = np.arange(len(self))
try:
train_index, eval_index = next(sss.split(X, self.labels))
except ValueError as e:
if not 'The least populated class in y has only ' in str(e):
raise e
print('Use just ShuffleSplit')
from sklearn.model_selection import ShuffleSplit
sss = ShuffleSplit(n_splits=1, test_size=test_size)
train_index, eval_index = next(sss.split(X, self.labels))
train_sampler = SubsetRandomSampler(train_index)
eval_sampler = SubsetRandomSampler(eval_index)
return train_sampler, eval_sampler
def __len__(self):
"""
:return: 전체 데이터의 수를 리턴합니다
"""
return len(self.reviews)
def __getitem__(self, idx):
"""
:param idx: 필요한 데이터의 인덱스
:return: 인덱스에 맞는 데이터, 레이블 pair를 리턴합니다
"""
return self.reviews[idx], self.lengths[idx], self.review_char_ids[idx], \
self.review_char_lengths[idx], self.word_ids[idx], self.word_lengths[idx], \
self.labels[idx]
def _save_chars(self, lines):
chars = [char for line in lines for char in list(line.strip().replace(' ', ''))]
from collections import Counter
min_count = 5
items = Counter(chars).most_common()
split_idx = 0
for i, (char, count) in enumerate(reversed(items)):
if count >= min_count:
split_idx = i
break
items = items[:-split_idx]
with open('chars.txt', 'wb') as f:
for char, _ in items:
f.write(char.encode('utf-8'))
def _save_words(self, lines):
from word import tokenize
from collections import Counter
tokens = [token for line in lines for token in tokenize(line).split()]
min_count = 5
items = Counter(tokens).most_common()
split_idx = 0
for i, (_, count) in enumerate(reversed(items)):
if count >= min_count:
split_idx = i
break
items = items[:-split_idx]
with open('words.txt', 'wb') as f:
for token, _ in items:
f.write(token.encode('utf-8'))
f.write("\n".encode("utf-8"))
def _save_train_words(self, lines):
from word import tokenize
with open('train_words.txt', 'wb') as f:
for line in lines:
line = tokenize(line)
f.write(line.encode('utf-8'))
f.write("\n".encode("utf-8"))
def preprocess(data: list, max_length: int):
"""
입력을 받아서 딥러닝 모델이 학습 가능한 포맷으로 변경하는 함수입니다.
기본 제공 알고리즘은 char2vec이며, 기본 모델이 MLP이기 때문에, 입력 값의 크기를 모두 고정한 벡터를 리턴합니다.
문자열의 길이가 고정값보다 길면 긴 부분을 제거하고, 짧으면 0으로 채웁니다.
:param data: 문자열 리스트 ([문자열1, 문자열2, ...])
:param max_length: 문자열의 최대 길이
:return: 벡터 리스트 ([[0, 1, 5, 6], [5, 4, 10, 200], ...]) max_length가 4일 때
"""
t0 = time.time()
with Pool(12) as p:
vectorized_data = p.map(decompose_str_as_one_hot, [datum.strip() for datum in data])
print("vectorized_data loaded %.2f s" % (time.time() - t0))
# one hot length
#df = pd.DataFrame(data={'vectorized_data_length': vec_data_lengths})
#print(df.describe(percentiles=[0.95, 0.997]))
t0 = time.time()
total_count = len(data)
zero_padding = np.zeros((total_count, max_length), dtype=np.int32)
vec_data_lengths = np.zeros((total_count), dtype=np.int32)
for idx, seq in enumerate(vectorized_data):
length = len(seq)
seq = [x+1 for x in seq]
if length >= max_length:
length = max_length
zero_padding[idx, :length] = np.array(seq)[:length]
else:
zero_padding[idx, :length] = np.array(seq)
vec_data_lengths[idx] = length
print("zero_padding loaded %.2f s" % (time.time() - t0))
char_zero_padding, char_lengths = preprocess_char(data, 50) # 99.9% max 47, 100% max 99
word_zero_padding, word_lengths = preprocess_word(data, 50) # 99.9% max 51, 100% max 116
return zero_padding, vec_data_lengths, char_zero_padding, char_lengths, word_zero_padding, word_lengths
def preprocess_char(data: list, max_length: int):
t0 = time.time()
with Pool(12) as p:
char_data = p.map(line_to_char_ids, data)
total_count = len(data)
zero_padding = np.zeros((total_count, max_length), dtype=np.int32)
data_lengths = np.zeros((total_count), dtype=np.int32)
#df = pd.DataFrame(data={'char_length': char_lengths})
#print(df.describe(percentiles=[0.95, 0.999]))
for idx, seq in enumerate(char_data):
length = len(seq)
if length >= max_length:
length = max_length
zero_padding[idx, :length] = np.array(seq)[:length]
else:
zero_padding[idx, :length] = np.array(seq)
data_lengths[idx] = length
print("char zero_padding loaded %.2f s" % (time.time() - t0))
return zero_padding, data_lengths
def preprocess_word(data: list, max_length: int):
t0 = time.time()
with Pool(12) as p:
word_data = p.map(line_to_word_ids, data)
total_count = len(data)
zero_padding = np.zeros((total_count, max_length), dtype=np.int32)
data_lengths = np.zeros((total_count), dtype=np.int32)
for idx, seq in enumerate(word_data):
length = len(seq)
if length >= max_length:
length = max_length
zero_padding[idx, :length] = np.array(seq)[:length]
else:
zero_padding[idx, :length] = np.array(seq)
data_lengths[idx] = length
#df = pd.DataFrame(data={'length': data_lengths})
#print(df.describe(percentiles=[0.95, 0.999]))
print("word zero_padding loaded %.2f s" % (time.time() - t0))
return zero_padding, data_lengths
| 36.407843
| 152
| 0.62193
|
67a78049781af2ad582bc7d5e0d5e4ad95d91845
| 4,634
|
py
|
Python
|
tests/unit/partner/model_tests.py
|
Idematica/django-oscar
|
242a0654210d63ba75f798788916c8b2f7abb7fb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/partner/model_tests.py
|
Idematica/django-oscar
|
242a0654210d63ba75f798788916c8b2f7abb7fb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/partner/model_tests.py
|
Idematica/django-oscar
|
242a0654210d63ba75f798788916c8b2f7abb7fb
|
[
"BSD-3-Clause"
] | null | null | null |
from decimal import Decimal as D
from django.db.models import get_model
from django.test import TestCase
from oscar.test import factories, decorators
from oscar.apps.partner import abstract_models
Partner = get_model('partner', 'Partner')
PartnerAddress = get_model('partner', 'PartnerAddress')
Country = get_model('address', 'Country')
class DummyWrapper(object):
def availability(self, stockrecord):
return 'Dummy response'
def dispatch_date(self, stockrecord):
return "Another dummy response"
class TestStockRecord(TestCase):
def setUp(self):
self.product = factories.create_product()
self.stockrecord = factories.create_stockrecord(
self.product, price_excl_tax=D('10.00'), num_in_stock=10)
@decorators.ignore_deprecation_warnings
def test_get_price_incl_tax_defaults_to_no_tax(self):
self.assertEquals(D('10.00'), self.stockrecord.price_incl_tax)
def test_get_price_excl_tax_returns_correct_value(self):
self.assertEquals(D('10.00'), self.stockrecord.price_excl_tax)
def test_net_stock_level_with_no_allocation(self):
self.assertEquals(10, self.stockrecord.net_stock_level)
def test_net_stock_level_with_allocation(self):
self.stockrecord.allocate(5)
self.assertEquals(10-5, self.stockrecord.net_stock_level)
def test_allocated_does_not_alter_num_in_stock(self):
self.stockrecord.allocate(5)
self.assertEqual(10, self.stockrecord.num_in_stock)
self.assertEqual(5, self.stockrecord.num_allocated)
def test_allocation_handles_null_value(self):
self.stockrecord.num_allocated = None
self.stockrecord.allocate(5)
def test_consuming_allocation(self):
self.stockrecord.allocate(5)
self.stockrecord.consume_allocation(3)
self.assertEqual(2, self.stockrecord.num_allocated)
self.assertEqual(7, self.stockrecord.num_in_stock)
def test_cancelling_allocation(self):
self.stockrecord.allocate(5)
self.stockrecord.cancel_allocation(4)
self.assertEqual(1, self.stockrecord.num_allocated)
self.assertEqual(10, self.stockrecord.num_in_stock)
def test_cancelling_allocation_ignores_too_big_allocations(self):
self.stockrecord.allocate(5)
self.stockrecord.cancel_allocation(6)
self.assertEqual(0, self.stockrecord.num_allocated)
self.assertEqual(10, self.stockrecord.num_in_stock)
@decorators.ignore_deprecation_warnings
def test_max_purchase_quantity(self):
self.assertEqual(10, self.stockrecord.max_purchase_quantity())
@decorators.ignore_deprecation_warnings
class CustomWrapperTests(TestCase):
"""
Partner wrappers are deprecated. This testcase will be removed/rewritten
in Oscar 0.7.
"""
def setUp(self):
abstract_models.partner_wrappers = {1: DummyWrapper()}
def tearDown(self):
abstract_models.partner_wrappers = None
def test_wrapper_availability_gets_called(self):
product = factories.create_product(
price=D('10.00'), partner="Acme", num_in_stock=10)
stockrecord = product.stockrecords.all()[0]
self.assertEquals(u"Dummy response",
unicode(stockrecord.availability))
def test_wrapper_dispatch_date_gets_called(self):
product = factories.create_product(
price=D('10.00'), partner="Acme", num_in_stock=10)
stockrecord = product.stockrecords.all()[0]
self.assertEquals("Another dummy response",
stockrecord.dispatch_date)
class TestPartnerAddress(TestCase):
def setUp(self):
self.partner = Partner._default_manager.create(
name="Dummy partner")
self.country = Country._default_manager.create(
iso_3166_1_a2='GB', name="UNITED KINGDOM")
self.address = PartnerAddress._default_manager.create(
title="Dr",
first_name="Barry",
last_name="Barrington",
country=self.country,
postcode="LS1 2HA",
partner=self.partner)
def test_can_get_primary_address(self):
self.assertEqual(self.partner.primary_address, self.address)
def test_fails_on_two_addresses(self):
self.address = PartnerAddress._default_manager.create(
title="Mrs",
first_name="Jane",
last_name="Barrington",
postcode="LS1 2HA",
country=self.country,
partner=self.partner)
self.assertRaises(
NotImplementedError, getattr, self.partner, 'primary_address')
| 35.106061
| 77
| 0.698748
|
5bc1c1a48ad4d6ac5b07962ac68c8abe52c581ca
| 4,075
|
py
|
Python
|
jobsub/Colorer.py
|
simonspa/corryvreckan
|
ffa5d1f7a47341ab6eb1b6208c9e0c472a58f436
|
[
"MIT"
] | null | null | null |
jobsub/Colorer.py
|
simonspa/corryvreckan
|
ffa5d1f7a47341ab6eb1b6208c9e0c472a58f436
|
[
"MIT"
] | null | null | null |
jobsub/Colorer.py
|
simonspa/corryvreckan
|
ffa5d1f7a47341ab6eb1b6208c9e0c472a58f436
|
[
"MIT"
] | null | null | null |
#This code is taken unchanged from http://stackoverflow.com/a/1336640 and therefore is as is
#under CC BY-SA 3.0
#!/usr/bin/env python
# encoding: utf-8
import logging
# now we patch Python code to add color support to logging.StreamHandler
def add_coloring_to_emit_windows(fn):
# add methods we need to the class
def _out_handle(self):
import ctypes
return ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
out_handle = property(_out_handle)
def _set_color(self, code):
import ctypes
# Constants from the Windows API
self.STD_OUTPUT_HANDLE = -11
hdl = ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
ctypes.windll.kernel32.SetConsoleTextAttribute(hdl, code)
setattr(logging.StreamHandler, '_set_color', _set_color)
def new(*args):
FOREGROUND_BLUE = 0x0001 # text color contains blue.
FOREGROUND_GREEN = 0x0002 # text color contains green.
FOREGROUND_RED = 0x0004 # text color contains red.
FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
FOREGROUND_WHITE = FOREGROUND_BLUE|FOREGROUND_GREEN |FOREGROUND_RED
# winbase.h
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h
FOREGROUND_BLACK = 0x0000
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_CYAN = 0x0003
FOREGROUND_RED = 0x0004
FOREGROUND_MAGENTA = 0x0005
FOREGROUND_YELLOW = 0x0006
FOREGROUND_GREY = 0x0007
FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
BACKGROUND_BLACK = 0x0000
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_CYAN = 0x0030
BACKGROUND_RED = 0x0040
BACKGROUND_MAGENTA = 0x0050
BACKGROUND_YELLOW = 0x0060
BACKGROUND_GREY = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
levelno = args[1].levelno
if(levelno>=50):
color = BACKGROUND_YELLOW | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY
elif(levelno>=40):
color = FOREGROUND_RED | FOREGROUND_INTENSITY
elif(levelno>=30):
color = FOREGROUND_YELLOW | FOREGROUND_INTENSITY
elif(levelno>=20):
color = FOREGROUND_GREEN
elif(levelno>=10):
color = FOREGROUND_MAGENTA
else:
color = FOREGROUND_WHITE
args[0]._set_color(color)
ret = fn(*args)
args[0]._set_color( FOREGROUND_WHITE )
#print "after"
return ret
return new
def add_coloring_to_emit_ansi(fn):
# add methods we need to the class
def new(*args):
levelno = args[1].levelno
if(levelno>=50):
color = '\x1b[31m' # red
elif(levelno>=40):
color = '\x1b[31m' # red
elif(levelno>=30):
color = '\x1b[33m' # yellow
elif(levelno>=20):
color = '\x1b[32m' # green
elif(levelno>=10):
color = '\x1b[35m' # pink
else:
color = '\x1b[0m' # normal
if type(args[1].msg) == str:
args[1].msg = color + args[1].msg + '\x1b[0m' # normal
else:
args[1].msg = color + (args[1].msg).decode('utf-8') + '\x1b[0m' # normal
#print "after"
return fn(*args)
return new
import platform
if platform.system()=='Windows':
# Windows does not support ANSI escapes and we are using API calls to set the console color
logging.StreamHandler.emit = add_coloring_to_emit_windows(logging.StreamHandler.emit)
else:
# all non-Windows platforms are supporting ANSI escapes so we use them
logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
#log = logging.getLogger()
#log.addFilter(log_filter())
#//hdlr = logging.StreamHandler()
#//hdlr.setFormatter(formatter())
| 36.061947
| 100
| 0.62454
|
56b8be0a37d3eae1e52c9e15b8c07529f8ef31a1
| 4,275
|
py
|
Python
|
python_modules/socketpool/util.py
|
interactiveinstitute/watthappened
|
0c7ab7a5ae7f7a0f567c32a524b3c27294d1233f
|
[
"MIT"
] | null | null | null |
python_modules/socketpool/util.py
|
interactiveinstitute/watthappened
|
0c7ab7a5ae7f7a0f567c32a524b3c27294d1233f
|
[
"MIT"
] | null | null | null |
python_modules/socketpool/util.py
|
interactiveinstitute/watthappened
|
0c7ab7a5ae7f7a0f567c32a524b3c27294d1233f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -
#
# This file is part of socketpool.
# See the NOTICE for more information.
import errno
import os
import select
import socket
import sys
try:
from importlib import import_module
except ImportError:
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in range(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
def load_backend(backend_name):
""" load pool backend. If this is an external module it should be
passed as "somelib.backend_mod", for socketpool backend you can just
pass the name.
Supported backend are :
- thread: connection are maintained in a threadsafe queue.
- gevent: support gevent
- eventlet: support eventlet
"""
try:
if len(backend_name.split(".")) > 1:
mod = import_module(backend_name)
else:
mod = import_module("socketpool.backend_%s" % backend_name)
return mod
except ImportError:
error_msg = "%s isn't a socketpool backend" % backend_name
raise ImportError(error_msg)
def is_connected(skt):
try:
fno = skt.fileno()
except socket.error as e:
if e[0] == errno.EBADF:
return False
raise
try:
if hasattr(select, "epoll"):
ep = select.epoll()
ep.register(fno, select.EPOLLOUT | select.EPOLLIN)
events = ep.poll(0)
for fd, ev in events:
if fno == fd and \
(ev & select.EPOLLOUT or ev & select.EPOLLIN):
ep.unregister(fno)
return True
ep.unregister(fno)
elif hasattr(select, "poll"):
p = select.poll()
p.register(fno, select.POLLOUT | select.POLLIN)
events = p.poll(0)
for fd, ev in events:
if fno == fd and \
(ev & select.POLLOUT or ev & select.POLLIN):
p.unregister(fno)
return True
p.unregister(fno)
elif hasattr(select, "kqueue"):
kq = select.kqueue()
events = [
select.kevent(fno, select.KQ_FILTER_READ, select.KQ_EV_ADD),
select.kevent(fno, select.KQ_FILTER_WRITE, select.KQ_EV_ADD)
]
kq.control(events, 0)
kevents = kq.control(None, 4, 0)
for ev in kevents:
if ev.ident == fno:
if ev.flags & select.KQ_EV_ERROR:
return False
else:
return True
# delete
events = [
select.kevent(fno, select.KQ_FILTER_READ, select.KQ_EV_DELETE),
select.kevent(fno, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)
]
kq.control(events, 0)
kq.close()
return True
else:
r, _, _ = select.select([fno], [], [], 0)
if not r:
return True
except IOError:
pass
except (ValueError, select.error,) as e:
pass
return False
| 31.433824
| 82
| 0.531462
|
0bfa4aa11ecdbc6e5a02bf2c0f581af4a6240b8a
| 959
|
py
|
Python
|
evaluation/process_bcb_clones.py
|
cragkhit/elasticsearch
|
05567b30c5bde08badcac1bf421454e5d995eb91
|
[
"Apache-2.0"
] | 23
|
2018-10-03T15:02:53.000Z
|
2021-09-16T11:07:36.000Z
|
evaluation/process_bcb_clones.py
|
LandAndLand/Siamese
|
07fb10bec4614f55bcc39e571d1185fc9ce86242
|
[
"Apache-2.0"
] | 18
|
2019-02-10T04:52:54.000Z
|
2022-01-25T02:14:40.000Z
|
evaluation/process_bcb_clones.py
|
LandAndLand/Siamese
|
07fb10bec4614f55bcc39e571d1185fc9ce86242
|
[
"Apache-2.0"
] | 19
|
2018-11-16T13:39:05.000Z
|
2021-09-05T23:59:30.000Z
|
from __future__ import print_function
import sys
def gen_pairs(line):
parts = line.split(',')
return parts
def format_query(query, prefix):
parts = query.split('#')
return parts[0].replace(prefix, '').strip() + ',' \
+ parts[1].strip() + '.java,' + parts[2].strip() + ',' \
+ parts[3].replace('.java_method', '').strip()
def format_clone(clone, prefix):
parts = clone.split('#')
filepart = parts[0].split('.java_')
filename = filepart[0].replace(prefix, '').replace('/', ',') + '.java'
return filename.strip() + ',' + parts[1].strip() + ',' + parts[2].strip()
file = open(sys.argv[1], 'r')
for line in file:
parts = gen_pairs(line)
iterparts = iter(parts)
query = next(iterparts)
for part in iterparts:
print(format_query(query, '/scratch0/NOT_BACKED_UP/crest/cragkhit/siamese/') + ',' +
format_clone(part, '/scratch0/NOT_BACKED_UP/crest/cragkhit/dataset/'))
| 29.96875
| 92
| 0.603754
|
77202187dece0c78166d4562b13e69078c3d7ecd
| 580
|
py
|
Python
|
tests/models.py
|
zgoda/pony-factoryboy
|
3ca614fd9ae8ff781578dee01851390e20acf752
|
[
"Apache-2.0"
] | 1
|
2020-08-30T17:10:07.000Z
|
2020-08-30T17:10:07.000Z
|
tests/models.py
|
zgoda/pony-factoryboy
|
3ca614fd9ae8ff781578dee01851390e20acf752
|
[
"Apache-2.0"
] | null | null | null |
tests/models.py
|
zgoda/pony-factoryboy
|
3ca614fd9ae8ff781578dee01851390e20acf752
|
[
"Apache-2.0"
] | null | null | null |
from pony.orm import Database, Optional, Required, PrimaryKey
db = Database()
class StandardModel(db.Entity):
foo = Required(str, 20)
class NonIntegerPK(db.Entity):
foo = PrimaryKey(str, 20)
bar = Optional(str, 20)
class MultifieldModel(db.Entity):
slug = Required(str, 20, unique=True)
text = Required(str, 20)
class MultifieldUniqueModel(db.Entity):
slug = Required(str, 20, unique=True)
text = Required(str, 20, unique=True)
title = Required(str, 20, unique=True)
db.bind('sqlite', ':memory:')
db.generate_mapping(create_tables=True)
| 20.714286
| 61
| 0.694828
|
5e1c88d194b2275be73306aa0384c972ab170518
| 275
|
py
|
Python
|
revli/lib/plugins/otffragmentorder.py
|
Akida31/revli
|
22ac7da01114916ba1e2eb2f683cd9c346d5d2b1
|
[
"MIT"
] | null | null | null |
revli/lib/plugins/otffragmentorder.py
|
Akida31/revli
|
22ac7da01114916ba1e2eb2f683cd9c346d5d2b1
|
[
"MIT"
] | null | null | null |
revli/lib/plugins/otffragmentorder.py
|
Akida31/revli
|
22ac7da01114916ba1e2eb2f683cd9c346d5d2b1
|
[
"MIT"
] | null | null | null |
from os.path import join
from revli.lib.plugins.plugin import Plugin
class OTFFragmentOrder(Plugin):
def add(self):
self.add_attr_to_initialize("otfFragmentOrder", True)
self.add_dependency(join("otf-fragment-order", "otf-fragment-order.js"))
| 27.5
| 81
| 0.712727
|
467cb20af5be0b546fe311b7c65d93191288a1de
| 293
|
py
|
Python
|
Codes/19 - Packages.py
|
Muntaha-Islam0019/Hello-World
|
a650bd32e039076ea10caea850453fdaa4744975
|
[
"MIT"
] | 1
|
2019-12-18T09:59:44.000Z
|
2019-12-18T09:59:44.000Z
|
Codes/19 - Packages.py
|
Muntaha-Islam0019/Hello-World
|
a650bd32e039076ea10caea850453fdaa4744975
|
[
"MIT"
] | null | null | null |
Codes/19 - Packages.py
|
Muntaha-Islam0019/Hello-World
|
a650bd32e039076ea10caea850453fdaa4744975
|
[
"MIT"
] | null | null | null |
# This code is related to 'Shop' package.
# Yes, one need to mention the package name before
# importing a module from a package.
from Codes.Shop.Prices import price
cart = ["shirt", "pant", "shoe"]
total_price = 0
for product in cart:
total_price += price(product)
print(total_price)
| 20.928571
| 50
| 0.720137
|
3edaa866587a364ec952d91f4ef0f5ecb6b16cc1
| 3,917
|
py
|
Python
|
tempest/api/object_storage/test_object_temp_url_negative.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/object_storage/test_object_temp_url_negative.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/object_storage/test_object_temp_url_negative.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import hmac
import time
from six.moves.urllib import parse as urlparse
from tempest.api.object_storage import base
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
class ObjectTempUrlNegativeTest(base.BaseObjectTest):
metadata = {}
containers = []
@classmethod
def resource_setup(cls):
super(ObjectTempUrlNegativeTest, cls).resource_setup()
cls.container_name = cls.create_container()
# update account metadata
cls.key = 'Meta'
cls.metadata = {'Temp-URL-Key': cls.key}
cls.account_client.create_update_or_delete_account_metadata(
create_update_metadata=cls.metadata)
cls.account_client_metadata, _ = \
cls.account_client.list_account_metadata()
@classmethod
def resource_cleanup(cls):
cls.account_client.create_update_or_delete_account_metadata(
delete_metadata=cls.metadata)
cls.delete_containers()
super(ObjectTempUrlNegativeTest, cls).resource_cleanup()
def setUp(self):
super(ObjectTempUrlNegativeTest, self).setUp()
# make sure the metadata has been set
self.assertIn('x-account-meta-temp-url-key',
self.account_client_metadata)
self.assertEqual(
self.account_client_metadata['x-account-meta-temp-url-key'],
self.key)
# create object
self.object_name = data_utils.rand_name(name='ObjectTemp')
self.content = data_utils.arbitrary_string(size=len(self.object_name),
base_text=self.object_name)
self.object_client.create_object(self.container_name,
self.object_name, self.content)
def _get_expiry_date(self, expiration_time=1000):
return int(time.time() + expiration_time)
def _get_temp_url(self, container, object_name, method, expires,
key):
"""Create the temporary URL."""
path = "%s/%s/%s" % (
urlparse.urlparse(self.object_client.base_url).path,
container, object_name)
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(
key.encode(), hmac_body.encode(), hashlib.sha1
).hexdigest()
url = "%s/%s?temp_url_sig=%s&temp_url_expires=%s" % (container,
object_name,
sig, expires)
return url
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5a583aca-c804-41ba-9d9a-e7be132bdf0b')
@test.requires_ext(extension='tempurl', service='object')
def test_get_object_after_expiration_time(self):
expires = self._get_expiry_date(1)
# get a temp URL for the created object
url = self._get_temp_url(self.container_name,
self.object_name, "GET",
expires, self.key)
# temp URL is valid for 1 seconds, let's wait 2
time.sleep(2)
self.assertRaises(lib_exc.Unauthorized,
self.object_client.get, url)
| 35.288288
| 78
| 0.635946
|
dd39978d66fc6495a211d64480341e0d79ca288e
| 24,930
|
py
|
Python
|
nautobot_golden_config/models.py
|
progala/nautobot-plugin-golden-config
|
63728d56285d8ac2fea3b8f993a481798eef5d12
|
[
"Apache-2.0"
] | null | null | null |
nautobot_golden_config/models.py
|
progala/nautobot-plugin-golden-config
|
63728d56285d8ac2fea3b8f993a481798eef5d12
|
[
"Apache-2.0"
] | null | null | null |
nautobot_golden_config/models.py
|
progala/nautobot-plugin-golden-config
|
63728d56285d8ac2fea3b8f993a481798eef5d12
|
[
"Apache-2.0"
] | null | null | null |
"""Django Models for tracking the configuration compliance per feature and device."""
import logging
import json
from deepdiff import DeepDiff
from django.db import models
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from django.shortcuts import reverse
from django.utils.module_loading import import_string
from graphene_django.settings import graphene_settings
from graphql import get_default_backend
from graphql.error import GraphQLSyntaxError
from nautobot.dcim.models import Device
from nautobot.extras.models import ObjectChange
from nautobot.extras.utils import extras_features
from nautobot.utilities.utils import get_filterset_for_model, serialize_object
from nautobot.core.models.generics import PrimaryModel
from netutils.config.compliance import feature_compliance
from nautobot_golden_config.choices import ComplianceRuleTypeChoice
from nautobot_golden_config.utilities.utils import get_platform
from nautobot_golden_config.utilities.constant import PLUGIN_CFG
LOGGER = logging.getLogger(__name__)
GRAPHQL_STR_START = "query ($device_id: ID!)"
ERROR_MSG = (
"There was an issue with the data that was returned by your get_custom_compliance function. "
"This is a local issue that requires the attention of your systems administrator and not something "
"that can be fixed within the Golden Config plugin. "
)
MISSING_MSG = (
ERROR_MSG + "Specifically the `{}` key was not found in value the get_custom_compliance function provided."
)
VALIDATION_MSG = (
ERROR_MSG + "Specifically the key {} was expected to be of type(s) {} and the value of {} was not that type(s)."
)
def _is_jsonable(val):
"""Check is value can be converted to json."""
try:
json.dumps(val)
return True
except (TypeError, OverflowError):
return False
def _null_to_empty(val):
"""Convert to empty string if the value is currently null."""
if not val:
return ""
return val
def _get_cli_compliance(obj):
"""This function performs the actual compliance for cli configuration."""
feature = {
"ordered": obj.rule.config_ordered,
"name": obj.rule,
}
feature.update({"section": obj.rule.match_config.splitlines()})
value = feature_compliance(feature, obj.actual, obj.intended, get_platform(obj.device.platform.slug))
compliance = value["compliant"]
if compliance:
compliance_int = 1
ordered = value["ordered_compliant"]
else:
compliance_int = 0
ordered = value["ordered_compliant"]
missing = _null_to_empty(value["missing"])
extra = _null_to_empty(value["extra"])
return {
"compliance": compliance,
"compliance_int": compliance_int,
"ordered": ordered,
"missing": missing,
"extra": extra,
}
def _get_json_compliance(obj):
"""This function performs the actual compliance for json serializable data."""
def _normalize_diff(diff, path_to_diff):
"""Normalizes the diff to a list of keys and list indexes that have changed."""
dictionary_items = list(diff.get(f"dictionary_item_{path_to_diff}", []))
list_items = list(diff.get(f"iterable_item_{path_to_diff}", {}).keys())
values_changed = list(diff.get("values_changed", {}).keys())
type_changes = list(diff.get("type_changes", {}).keys())
return dictionary_items + list_items + values_changed + type_changes
diff = DeepDiff(obj.actual, obj.intended, ignore_order=obj.ordered, report_repetition=True)
if not diff:
compliance_int = 1
compliance = True
ordered = True
missing = ""
extra = ""
else:
compliance_int = 0
compliance = False
ordered = False
missing = _null_to_empty(_normalize_diff(diff, "added"))
extra = _null_to_empty(_normalize_diff(diff, "removed"))
return {
"compliance": compliance,
"compliance_int": compliance_int,
"ordered": ordered,
"missing": missing,
"extra": extra,
}
def _verify_get_custom_compliance_data(compliance_details):
"""This function verifies the data is as expected when a custom function is used."""
for val in ["compliance", "compliance_int", "ordered", "missing", "extra"]:
try:
compliance_details[val]
except KeyError:
raise ValidationError(MISSING_MSG.format(val)) from KeyError
for val in ["compliance", "ordered"]:
if compliance_details[val] not in [True, False]:
raise ValidationError(VALIDATION_MSG.format(val, "Boolean", compliance_details[val]))
if compliance_details["compliance_int"] not in [0, 1]:
raise ValidationError(VALIDATION_MSG.format("compliance_int", "0 or 1", compliance_details["compliance_int"]))
for val in ["missing", "extra"]:
if not isinstance(compliance_details[val], str) and not _is_jsonable(compliance_details[val]):
raise ValidationError(VALIDATION_MSG.format(val, "String or Json", compliance_details[val]))
# The below maps the provided compliance types
FUNC_MAPPER = {
ComplianceRuleTypeChoice.TYPE_CLI: _get_cli_compliance,
ComplianceRuleTypeChoice.TYPE_JSON: _get_json_compliance,
}
# The below conditionally add the cusom provided compliance type
if PLUGIN_CFG.get("get_custom_compliance"):
try:
FUNC_MAPPER[ComplianceRuleTypeChoice.TYPE_CUSTOM] = import_string(PLUGIN_CFG["get_custom_compliance"])
except Exception as error: # pylint: disable=broad-except
msg = (
"There was an issue attempting to import the get_custom_compliance function of"
f"{PLUGIN_CFG['get_custom_compliance']}, this is expected with a local configuration issue "
"and not related to the Golden Configuration Plugin, please contact your system admin for further details"
)
raise Exception(msg).with_traceback(error.__traceback__)
@extras_features(
"custom_fields",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ComplianceFeature(PrimaryModel):
"""ComplianceFeature details."""
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True)
description = models.CharField(max_length=200, blank=True)
csv_headers = ["name", "slug", "description"]
def to_csv(self):
"""Indicates model fields to return as csv."""
return (self.name, self.slug, self.description)
class Meta:
"""Meta information for ComplianceFeature model."""
ordering = ("slug",)
def __str__(self):
"""Return a sane string representation of the instance."""
return self.slug
def get_absolute_url(self):
"""Absolute url for the ComplianceFeature instance."""
return reverse("plugins:nautobot_golden_config:compliancefeature", args=[self.pk])
@extras_features(
"custom_fields",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ComplianceRule(PrimaryModel):
"""ComplianceRule details."""
feature = models.ForeignKey(to="ComplianceFeature", on_delete=models.CASCADE, blank=False, related_name="feature")
platform = models.ForeignKey(
to="dcim.Platform",
on_delete=models.CASCADE,
related_name="compliance_rules",
null=False,
blank=False,
)
description = models.CharField(
max_length=200,
blank=True,
)
config_ordered = models.BooleanField(
null=False,
blank=False,
verbose_name="Configured Ordered",
help_text="Whether or not the configuration order matters, such as in ACLs.",
)
match_config = models.TextField(
null=True,
blank=True,
verbose_name="Config to Match",
help_text="The config to match that is matched based on the parent most configuration. e.g. `router bgp` or `ntp`.",
)
config_type = models.CharField(
max_length=20,
default=ComplianceRuleTypeChoice.TYPE_CLI,
choices=ComplianceRuleTypeChoice,
help_text="Whether the config is in cli or json/structured format.",
)
csv_headers = ["platform", "feature", "description", "config_ordered", "match_config", "config_type"]
def to_csv(self):
"""Indicates model fields to return as csv."""
return (
self.platform.slug,
self.feature.name,
self.description,
self.config_ordered,
self.match_config,
self.config_type,
)
class Meta:
"""Meta information for ComplianceRule model."""
ordering = ("platform", "feature__name")
unique_together = (
"feature",
"platform",
)
def __str__(self):
"""Return a sane string representation of the instance."""
return f"{self.platform} - {self.feature.name}"
def get_absolute_url(self):
"""Absolute url for the ComplianceRule instance."""
return reverse("plugins:nautobot_golden_config:compliancerule", args=[self.pk])
def clean(self):
"""Verify that if cli, then match_config is set."""
if self.config_type == ComplianceRuleTypeChoice.TYPE_CLI and not self.match_config:
raise ValidationError("CLI configuration set, but no configuration set to match.")
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ConfigCompliance(PrimaryModel):
"""Configuration compliance details."""
device = models.ForeignKey(to="dcim.Device", on_delete=models.CASCADE, help_text="The device", blank=False)
rule = models.ForeignKey(to="ComplianceRule", on_delete=models.CASCADE, blank=False, related_name="rule")
compliance = models.BooleanField(null=True, blank=True)
actual = models.JSONField(blank=True, help_text="Actual Configuration for feature")
intended = models.JSONField(blank=True, help_text="Intended Configuration for feature")
missing = models.JSONField(blank=True, help_text="Configuration that should be on the device.")
extra = models.JSONField(blank=True, help_text="Configuration that should not be on the device.")
ordered = models.BooleanField(default=True)
# Used for django-pivot, both compliance and compliance_int should be set.
compliance_int = models.IntegerField(null=True, blank=True)
csv_headers = ["Device Name", "Feature", "Compliance"]
def get_absolute_url(self):
"""Return absolute URL for instance."""
return reverse("plugins:nautobot_golden_config:configcompliance", args=[self.pk])
def to_csv(self):
"""Indicates model fields to return as csv."""
return (self.device.name, self.rule.feature.name, self.compliance)
def to_objectchange(self, action):
"""Remove actual and intended configuration from changelog."""
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
object_data=serialize_object(self, exclude=["actual", "intended"]),
)
class Meta:
"""Set unique together fields for model."""
ordering = ["device"]
unique_together = ("device", "rule")
def __str__(self):
"""String representation of a the compliance."""
return f"{self.device} -> {self.rule} -> {self.compliance}"
def save(self, *args, **kwargs):
"""The actual configuration compliance happens here, but the details for actual compliance job would be found in FUNC_MAPPER."""
if self.rule.config_type == ComplianceRuleTypeChoice.TYPE_CUSTOM and not FUNC_MAPPER.get(
ComplianceRuleTypeChoice.TYPE_CUSTOM
):
raise ValidationError(
"Custom type provided, but no `get_custom_compliance` config set, please contact system admin."
)
compliance_details = FUNC_MAPPER[self.rule.config_type](obj=self)
if self.rule.config_type == ComplianceRuleTypeChoice.TYPE_CUSTOM:
_verify_get_custom_compliance_data(compliance_details)
self.compliance = compliance_details["compliance"]
self.compliance_int = compliance_details["compliance_int"]
self.ordered = compliance_details["ordered"]
self.missing = compliance_details["missing"]
self.extra = compliance_details["extra"]
super().save(*args, **kwargs)
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class GoldenConfig(PrimaryModel):
"""Configuration Management Model."""
device = models.ForeignKey(
to="dcim.Device",
on_delete=models.CASCADE,
help_text="device",
blank=False,
)
backup_config = models.TextField(blank=True, help_text="Full backup config for device.")
backup_last_attempt_date = models.DateTimeField(null=True)
backup_last_success_date = models.DateTimeField(null=True)
intended_config = models.TextField(blank=True, help_text="Intended config for the device.")
intended_last_attempt_date = models.DateTimeField(null=True)
intended_last_success_date = models.DateTimeField(null=True)
compliance_config = models.TextField(blank=True, help_text="Full config diff for device.")
compliance_last_attempt_date = models.DateTimeField(null=True)
compliance_last_success_date = models.DateTimeField(null=True)
csv_headers = [
"Device Name",
"backup attempt",
"backup successful",
"intended attempt",
"intended successful",
"compliance attempt",
"compliance successful",
]
def to_csv(self):
"""Indicates model fields to return as csv."""
return (
self.device,
self.backup_last_attempt_date,
self.backup_last_success_date,
self.intended_last_attempt_date,
self.intended_last_success_date,
self.compliance_last_attempt_date,
self.compliance_last_success_date,
)
def to_objectchange(self, action):
"""Remove actual and intended configuration from changelog."""
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
object_data=serialize_object(self, exclude=["backup_config", "intended_config", "compliance_config"]),
)
class Meta:
"""Set unique together fields for model."""
ordering = ["device"]
def __str__(self):
"""String representation of a the compliance."""
return f"{self.device}"
@extras_features(
"graphql",
)
class GoldenConfigSetting(PrimaryModel):
"""GoldenConfigSetting Model defintion. This provides global configs instead of via configs.py."""
name = models.CharField(max_length=100, unique=True, blank=False)
slug = models.SlugField(max_length=100, unique=True, blank=False)
weight = models.PositiveSmallIntegerField(default=1000, blank=False)
description = models.CharField(
max_length=200,
blank=True,
)
backup_repository = models.ForeignKey(
to="extras.GitRepository",
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="backup_repository",
limit_choices_to={"provided_contents__contains": "nautobot_golden_config.backupconfigs"},
)
backup_path_template = models.CharField(
max_length=255,
null=False,
blank=True,
verbose_name="Backup Path in Jinja Template Form",
help_text="The Jinja path representation of where the backup file will be found. The variable `obj` is available as the device instance object of a given device, as is the case for all Jinja templates. e.g. `{{obj.site.slug}}/{{obj.name}}.cfg`",
)
intended_repository = models.ForeignKey(
to="extras.GitRepository",
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="intended_repository",
limit_choices_to={"provided_contents__contains": "nautobot_golden_config.intendedconfigs"},
)
intended_path_template = models.CharField(
max_length=255,
null=False,
blank=True,
verbose_name="Intended Path in Jinja Template Form",
help_text="The Jinja path representation of where the generated file will be places. e.g. `{{obj.site.slug}}/{{obj.name}}.cfg`",
)
jinja_repository = models.ForeignKey(
to="extras.GitRepository",
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="jinja_template",
limit_choices_to={"provided_contents__contains": "nautobot_golden_config.jinjatemplate"},
)
jinja_path_template = models.CharField(
max_length=255,
null=False,
blank=True,
verbose_name="Template Path in Jinja Template Form",
help_text="The Jinja path representation of where the Jinja template can be found. e.g. `{{obj.platform.slug}}.j2`",
)
backup_test_connectivity = models.BooleanField(
null=False,
default=True,
verbose_name="Backup Test",
help_text="Whether or not to pretest the connectivity of the device by verifying there is a resolvable IP that can connect to port 22.",
)
scope = models.JSONField(
encoder=DjangoJSONEncoder,
blank=True,
null=True,
help_text="API filter in JSON format matching the list of devices for the scope of devices to be considered.",
)
sot_agg_query = models.TextField(
null=False,
blank=True,
verbose_name="GraphQL Query",
help_text=f"A query starting with `{GRAPHQL_STR_START}` that is used to render the config. Please make sure to alias name, see FAQ for more details.",
)
def get_absolute_url(self): # pylint: disable=no-self-use
"""Return absolute URL for instance."""
return reverse("plugins:nautobot_golden_config:goldenconfigsetting", args=[self.slug])
def __str__(self):
"""Return a simple string if model is called."""
return f"Golden Config Setting - {self.name}"
class Meta:
"""Set unique fields for model.
Provide ordering used in tables and get_device_to_settings_map.
Sorting on weight is performed from the highest weight value to the lowest weight value.
This is to ensure only one plugin settings could be applied per single device based on priority and name.
"""
verbose_name = "Golden Config Setting"
ordering = ["-weight", "name"] # Refer to weight comment in class docstring.
def clean(self):
"""Validate the scope and GraphQL query."""
super().clean()
if self.sot_agg_query:
try:
LOGGER.debug("GraphQL - test query: `%s`", str(self.sot_agg_query))
backend = get_default_backend()
schema = graphene_settings.SCHEMA
backend.document_from_string(schema, str(self.sot_agg_query))
except GraphQLSyntaxError as err:
raise ValidationError(str(err)) # pylint: disable=raise-missing-from
LOGGER.debug("GraphQL - test query start with: `%s`", GRAPHQL_STR_START)
if not str(self.sot_agg_query).startswith(GRAPHQL_STR_START):
raise ValidationError(f"The GraphQL query must start with exactly `{GRAPHQL_STR_START}`")
if self.scope:
filterset_class = get_filterset_for_model(Device)
filterset = filterset_class(self.scope, Device.objects.all())
if filterset.errors:
for key in filterset.errors:
error_message = ", ".join(filterset.errors[key])
raise ValidationError({"scope": f"{key}: {error_message}"})
filterset_params = set(filterset.get_filters().keys())
for key in self.scope.keys():
if key not in filterset_params:
raise ValidationError({"scope": f"'{key}' is not a valid filter parameter for Device object"})
def get_queryset(self):
"""Generate a Device QuerySet from the filter."""
if not self.scope:
return Device.objects.all()
filterset_class = get_filterset_for_model(Device)
filterset = filterset_class(self.scope, Device.objects.all())
return filterset.qs
def device_count(self):
"""Return the number of devices in the group."""
return self.get_queryset().count()
def get_filter_as_string(self):
"""Get filter as string."""
if not self.scope:
return None
result = ""
for key, value in self.scope.items():
if isinstance(value, list):
for item in value:
if result != "":
result += "&"
result += f"{key}={item}"
else:
result += "&"
result += f"{key}={value}"
return result
def get_url_to_filtered_device_list(self):
"""Get url to all devices that are matching the filter."""
base_url = reverse("dcim:device_list")
filter_str = self.get_filter_as_string()
if filter_str:
return f"{base_url}?{filter_str}"
return base_url
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ConfigRemove(PrimaryModel):
"""ConfigRemove for Regex Line Removals from Backup Configuration Model defintion."""
name = models.CharField(max_length=255, null=False, blank=False)
platform = models.ForeignKey(
to="dcim.Platform",
on_delete=models.CASCADE,
related_name="backup_line_remove",
null=False,
blank=False,
)
description = models.CharField(
max_length=200,
blank=True,
)
regex = models.CharField(
max_length=200,
verbose_name="Regex Pattern",
help_text="Regex pattern used to remove a line from the backup configuration.",
)
clone_fields = ["platform", "description", "regex"]
csv_headers = ["name", "platform", "description", "regex"]
def to_csv(self):
"""Indicates model fields to return as csv."""
return (self.name, self.platform.slug, self.regex)
class Meta:
"""Meta information for ConfigRemove model."""
ordering = ("platform", "name")
unique_together = ("name", "platform")
def __str__(self):
"""Return a simple string if model is called."""
return self.name
def get_absolute_url(self): # pylint: disable=no-self-use
"""Return absolute URL for instance."""
return reverse("plugins:nautobot_golden_config:configremove", args=[self.pk])
@extras_features(
"custom_fields",
"custom_links",
"custom_validators",
"export_templates",
"graphql",
"relationships",
"webhooks",
)
class ConfigReplace(PrimaryModel):
"""ConfigReplace for Regex Line Replacements from Backup Configuration Model defintion."""
name = models.CharField(max_length=255, null=False, blank=False)
platform = models.ForeignKey(
to="dcim.Platform",
on_delete=models.CASCADE,
related_name="backup_line_replace",
null=False,
blank=False,
)
description = models.CharField(
max_length=200,
blank=True,
)
regex = models.CharField(
max_length=200,
verbose_name="Regex Pattern to Substitute",
help_text="Regex pattern that will be found and replaced with 'replaced text'.",
)
replace = models.CharField(
max_length=200,
verbose_name="Replaced Text",
help_text="Text that will be inserted in place of Regex pattern match.",
)
clone_fields = ["platform", "description", "regex", "replace"]
csv_headers = ["name", "platform", "description", "regex", "replace"]
def to_csv(self):
"""Indicates model fields to return as csv."""
return (self.name, self.platform.slug, self.regex, self.replace)
class Meta:
"""Meta information for ConfigReplace model."""
ordering = ("platform", "name")
unique_together = ("name", "platform")
def get_absolute_url(self):
"""Return absolute URL for instance."""
return reverse("plugins:nautobot_golden_config:configreplace", args=[self.pk])
def __str__(self):
"""Return a simple string if model is called."""
return self.name
| 36.078148
| 253
| 0.662535
|
032ee13b0a6fc972580451298bc83c2137ee394b
| 333
|
py
|
Python
|
src/webapp/views.py
|
MaximeRaynal/SimpleNote
|
aee1eeba5561dbf985bd7349a0528bfed6ceaeda
|
[
"MIT"
] | null | null | null |
src/webapp/views.py
|
MaximeRaynal/SimpleNote
|
aee1eeba5561dbf985bd7349a0528bfed6ceaeda
|
[
"MIT"
] | null | null | null |
src/webapp/views.py
|
MaximeRaynal/SimpleNote
|
aee1eeba5561dbf985bd7349a0528bfed6ceaeda
|
[
"MIT"
] | null | null | null |
from django.views.decorators.http import require_http_methods
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
@require_http_methods(["GET"])
@login_required
def index(request):
""" Point d'entrée de sert l'application HTML JS CSS """
return render(request, 'webapp/index.html')
| 33.3
| 61
| 0.786787
|
ae471b99b7813c2c5e308d36ddf815fbdda3cf4a
| 693
|
py
|
Python
|
var/spack/repos/builtin/packages/py-mpmath/package.py
|
mtmiller/spack
|
c97c135f1dbe24955048fcc4f0f98281ef0c9300
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2018-11-16T02:42:57.000Z
|
2019-06-06T19:18:50.000Z
|
var/spack/repos/builtin/packages/py-mpmath/package.py
|
mtmiller/spack
|
c97c135f1dbe24955048fcc4f0f98281ef0c9300
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2021-05-12T05:53:01.000Z
|
2022-03-18T17:30:25.000Z
|
var/spack/repos/builtin/packages/py-mpmath/package.py
|
mtmiller/spack
|
c97c135f1dbe24955048fcc4f0f98281ef0c9300
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-11-06T06:38:51.000Z
|
2020-10-27T07:45:01.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMpmath(PythonPackage):
"""A Python library for arbitrary-precision floating-point arithmetic."""
homepage = "https://mpmath.org"
pypi = "mpmath/mpmath-1.0.0.tar.gz"
version('1.1.0', sha256='fc17abe05fbab3382b61a123c398508183406fa132e0223874578e20946499f6')
version('1.0.0', sha256='04d14803b6875fe6d69e6dccea87d5ae5599802e4b1df7997bddd2024001050c')
version('0.19', sha256='68ddf6426dcda445323467d89892d2cffbbd1ae0b31ac1241b1b671749d63222')
| 40.764706
| 95
| 0.777778
|
8c92bf00844fbcbea3c7c4b548ebfc3d13a7caad
| 47
|
py
|
Python
|
run.py
|
voschezang/ABM
|
523fcf30000057e73ba93f5a500d8896c945a35f
|
[
"MIT"
] | null | null | null |
run.py
|
voschezang/ABM
|
523fcf30000057e73ba93f5a500d8896c945a35f
|
[
"MIT"
] | null | null | null |
run.py
|
voschezang/ABM
|
523fcf30000057e73ba93f5a500d8896c945a35f
|
[
"MIT"
] | null | null | null |
from src.server import server
server.launch()
| 11.75
| 29
| 0.787234
|
6eaec38fbc05eb1d56b81f177995f4b20dbc4888
| 899
|
py
|
Python
|
src/nodeconductor_paas_oracle/log.py
|
opennode/nodeconductor-paas-oracle
|
799c05438265da0b328bcaa425af01e9576f57fe
|
[
"MIT"
] | null | null | null |
src/nodeconductor_paas_oracle/log.py
|
opennode/nodeconductor-paas-oracle
|
799c05438265da0b328bcaa425af01e9576f57fe
|
[
"MIT"
] | null | null | null |
src/nodeconductor_paas_oracle/log.py
|
opennode/nodeconductor-paas-oracle
|
799c05438265da0b328bcaa425af01e9576f57fe
|
[
"MIT"
] | null | null | null |
from nodeconductor.logging.loggers import EventLogger, event_logger
from nodeconductor_paas_oracle.models import Deployment
class OracleDeploymentEventLogger(EventLogger):
deployment = Deployment
jira_issue_key = basestring
class Meta:
nullable_fields = ['jira_issue_key']
event_types = (
'oracle_deployment_resize_requested',
'oracle_deployment_resize_succeeded',
'oracle_deployment_start_requested',
'oracle_deployment_start_succeeded',
'oracle_deployment_restart_requested',
'oracle_deployment_restart_succeeded',
'oracle_deployment_stop_requested',
'oracle_deployment_stop_succeeded',
'oracle_deployment_support_requested',
'oracle_deployment_report_updated',
)
event_logger.register('oracle_deployment', OracleDeploymentEventLogger)
| 34.576923
| 71
| 0.718576
|
ae31d1f336665e6427fa20caa6691927beb6117d
| 7,569
|
py
|
Python
|
rssant_common/dns_service.py
|
lixiupei/rssant
|
a0602c561fbde3a564b4dadd177a96db64be8a56
|
[
"BSD-3-Clause"
] | null | null | null |
rssant_common/dns_service.py
|
lixiupei/rssant
|
a0602c561fbde3a564b4dadd177a96db64be8a56
|
[
"BSD-3-Clause"
] | 5
|
2021-03-19T11:23:24.000Z
|
2022-02-10T11:36:33.000Z
|
rssant_common/dns_service.py
|
lixiupei/rssant
|
a0602c561fbde3a564b4dadd177a96db64be8a56
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import List, Dict, Any
import logging
import random
import ipaddress
import ssl
import socket
import asyncio
from urllib.parse import urlparse
from collections import defaultdict
from urllib3.util import connection
import aiohttp
from rssant_config import CONFIG
from .rss_proxy import RSSProxyClient, ProxyStrategy
from .helper import get_or_create_event_loop
LOG = logging.getLogger(__name__)
_orig_create_connection = connection.create_connection
_cache_records_text = """
104.26.12.87 rsshub.app
104.26.13.87 rsshub.app
168.235.96.195 kindle4rss.com
168.235.96.195 feedmaker.kindle4rss.com
192.30.255.112 github.com
192.30.255.116 api.github.com
"""
def _read_records(text) -> dict:
records = defaultdict(set)
for line in text.strip().splitlines():
ip, host = line.split()
records[host].add(ip)
return records
_CACHE_RECORDS = _read_records(_cache_records_text)
def _is_public_ipv4(value):
try:
ip = ipaddress.ip_address(value)
except ipaddress.AddressValueError:
return False
return not ip.is_private
class RssantAsyncResolver(aiohttp.AsyncResolver):
def __init__(self, *args, dns_service, **kwargs):
self._dns_service = dns_service
super().__init__(*args, **kwargs)
async def resolve(
self, host: str, port: int = 0,
family: int = socket.AF_INET
) -> List[Dict[str, Any]]:
hosts = self._dns_service.resolve_aiohttp(host, port)
if hosts:
return hosts
return await super().resolve(host, port, family=family)
class DNSService:
def __init__(self, client: RSSProxyClient, records: dict = None):
self.hosts = list(records or {})
self.update(records or {})
self.client = client
def update(self, records: dict):
new_records = defaultdict(set)
for host, ip_set in records.items():
new_records[host].update(ip_set)
self.records = new_records
def is_resolved_host(self, host) -> bool:
return bool(self.records.get(host))
def is_resolved_url(self, url) -> bool:
host = urlparse(url).hostname
return self.is_resolved_host(host)
def resolve(self, host) -> list:
ip_set = self.records.get(host)
return list(ip_set) if ip_set else []
def resolve_urllib3(self, host):
ip_set = self.resolve(host)
if ip_set:
ip = random.choice(list(ip_set))
LOG.info('resolve_urllib3 %s to %s', host, ip)
return ip
return host
def aiohttp_resolver(self, **kwargs):
return RssantAsyncResolver(dns_service=self, **kwargs)
def resolve_aiohttp(self, host, port):
hosts = []
ip_set = self.resolve(host)
if not ip_set:
return hosts
LOG.info('resolve_aiohttp %s to %s', host, ip_set)
for ip in ip_set:
hosts.append({
'hostname': host,
'host': ip, 'port': port,
'family': socket.AF_INET, 'proto': 0,
'flags': socket.AI_NUMERICHOST
})
return hosts
def refresh(self):
records = defaultdict(set)
for host, ip_set in self.query_from_cloudflare().items():
records[host].update(ip_set)
LOG.info('resolved from cloudflare: %r', dict(records))
if self.client.has_rss_proxy:
for host, ip_set in self.query_from_google().items():
records[host].update(ip_set)
LOG.info('resolved from google: %r', dict(records))
records = self.validate_records(records)
LOG.info('refresh records: %r', dict(records))
self.update(records)
async def _verify_record_task(self, host, ip):
_NetworkErrors = (
socket.timeout, TimeoutError, asyncio.TimeoutError,
ssl.SSLError, ssl.CertificateError, ConnectionError,
)
try:
reader, writer = await asyncio.wait_for(asyncio.open_connection(
host=ip, port=443,
family=socket.AF_INET,
ssl=True,
server_hostname=host,
ssl_handshake_timeout=10,
), timeout=15)
except _NetworkErrors as ex:
LOG.info(f'verify_record host={host} ip={ip} {ex!r}')
return (host, ip, False)
try:
writer.close()
await writer.wait_closed()
except _NetworkErrors:
pass # ignore
return (host, ip, True)
async def _validate_records(self, records: dict):
valid_records = defaultdict(set)
tasks = []
for host, ip_set in records.items():
for ip in ip_set:
tasks.append(self._verify_record_task(host, ip))
for item in await asyncio.gather(*tasks):
host, ip, ok = item
if ok:
valid_records[host].add(ip)
return valid_records
def validate_records(self, records: dict) -> dict:
loop = get_or_create_event_loop()
valid_records = loop.run_until_complete(self._validate_records(records))
return valid_records
def query_from_dns_over_tls(self, url_template: str) -> dict:
headers = {'accept': 'application/dns-json'}
records = defaultdict(set)
for host in self.hosts:
url = url_template.format(name=host)
LOG.info(f'query {url}')
try:
response = self.client.request('GET', url, headers=headers)
response.raise_for_status()
except Exception as ex:
LOG.warning(ex, exc_info=ex)
continue
for item in response.json()['Answer']:
if item['type'] == 1: # ipv4
ip = item['data']
if ip and _is_public_ipv4(ip):
records[host].add(ip)
return records
def query_from_cloudflare(self):
url_template = 'https://cloudflare-dns.com/dns-query?name={name}&type=A'
return self.query_from_dns_over_tls(url_template)
def query_from_google(self):
url_template = 'https://dns.google.com/resolve?name={name}&type=A'
return self.query_from_dns_over_tls(url_template)
def patch_urllib3(self):
"""
https://stackoverflow.com/questions/22609385/python-requests-library-define-specific-dns
"""
connection.create_connection = self._patched_create_connection
def _patched_create_connection(self, address, *args, **kwargs):
"""Wrap urllib3's create_connection to resolve the name elsewhere"""
# resolve hostname to an ip address; use your own
# resolver here, as otherwise the system resolver will be used.
host, port = address
hostname = self.resolve_urllib3(host)
return _orig_create_connection((hostname, port), *args, **kwargs)
def _setup():
_rss_proxy_options = {}
if CONFIG.rss_proxy_enable:
_rss_proxy_options.update(
rss_proxy_url=CONFIG.rss_proxy_url,
rss_proxy_token=CONFIG.rss_proxy_token,
)
def proxy_strategy(url):
if 'google.com' in url:
return ProxyStrategy.PROXY_FIRST
else:
return ProxyStrategy.DIRECT_FIRST
_rss_proxy_client = RSSProxyClient(
**_rss_proxy_options, proxy_strategy=proxy_strategy)
service = DNSService(client=_rss_proxy_client, records=_CACHE_RECORDS)
service.patch_urllib3()
return service
DNS_SERVICE = _setup()
| 32.072034
| 96
| 0.624389
|
2d6f8831b91239fb9a38ff5b71f93aa25d557c4e
| 1,181
|
py
|
Python
|
blog/migrations/0015_blogpage_body.py
|
dixonary/uwcs-zarya
|
e982f324c0a6aca156a15c6f597012f47bc070cc
|
[
"MIT"
] | 7
|
2016-09-30T20:33:46.000Z
|
2020-03-16T15:04:20.000Z
|
blog/migrations/0015_blogpage_body.py
|
dixonary/uwcs-zarya
|
e982f324c0a6aca156a15c6f597012f47bc070cc
|
[
"MIT"
] | 46
|
2016-10-01T17:52:59.000Z
|
2022-01-13T00:44:09.000Z
|
blog/migrations/0015_blogpage_body.py
|
dixonary/uwcs-zarya
|
e982f324c0a6aca156a15c6f597012f47bc070cc
|
[
"MIT"
] | 6
|
2016-10-15T02:51:18.000Z
|
2020-10-02T12:47:54.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-14 18:56
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.documents.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('blog', '0014_auto_20160814_1854'),
]
operations = [
migrations.AddField(
model_name='blogpage',
name='body',
field=wagtail.core.fields.StreamField((('h2', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.core.blocks.CharBlock(classname='title', icon='title')), ('paragraph', wagtail.core.blocks.RichTextBlock(icon='pilcrow')), ('image', wagtail.images.blocks.ImageChooserBlock()), ('pullquote', wagtail.core.blocks.StructBlock((('quote', wagtail.core.blocks.TextBlock('quote title')), ('attribution', wagtail.core.blocks.CharBlock())))), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse'))), default=''),
preserve_default=False,
),
]
| 45.423077
| 643
| 0.695174
|
c42444fcfdf4d88125e1ed0c4995b2b8fc26fc7d
| 82,000
|
py
|
Python
|
tests/trainer/test_trainer.py
|
alessiobonfiglio/pytorch-lightning
|
c453caf57e8ee65aaf82d4d42b26d7634dbf7046
|
[
"Apache-2.0"
] | 1
|
2021-12-05T09:12:43.000Z
|
2021-12-05T09:12:43.000Z
|
tests/trainer/test_trainer.py
|
AshleySato899/pytorch-lightning
|
854bdc042d12fe4b713de881c58b025de30d0c39
|
[
"Apache-2.0"
] | null | null | null |
tests/trainer/test_trainer.py
|
AshleySato899/pytorch-lightning
|
854bdc042d12fe4b713de881c58b025de30d0c39
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import logging
import math
import os
import pickle
import sys
from argparse import Namespace
from copy import deepcopy
from pathlib import Path
from unittest import mock
from unittest.mock import ANY, call, patch
import cloudpickle
import pytest
import torch
from omegaconf import OmegaConf
from torch.nn.parallel.distributed import DistributedDataParallel
from torch.optim import SGD
from torch.utils.data import DataLoader, IterableDataset
import tests.helpers.utils as tutils
from pytorch_lightning import Callback, LightningDataModule, LightningModule, Trainer
from pytorch_lightning.callbacks import EarlyStopping, GradientAccumulationScheduler, ModelCheckpoint, Timer
from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter
from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml, save_hparams_to_tags_csv
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper, UnrepeatedDistributedSampler
from pytorch_lightning.plugins import DDPSpawnPlugin
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import DeviceType, DistributedType
from pytorch_lightning.utilities.cloud_io import load as pl_load
from pytorch_lightning.utilities.exceptions import DeadlockDetectedException, MisconfigurationException
from pytorch_lightning.utilities.seed import seed_everything
from tests.base import EvalModelTemplate
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.boring_model import RandomIterableDataset, RandomIterableDatasetWithLen
from tests.helpers.runif import RunIf
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_no_val_module(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", str(tmpdir))
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, logger=logger, callbacks=[ModelCheckpoint(dirpath=tmpdir)])
# fit model
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# assert ckpt has hparams
ckpt = torch.load(new_weights_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in ckpt.keys(), "hyper_parameters missing from checkpoints"
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
ckpt_path = (
f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
if url_ckpt
else new_weights_path
)
model_2 = EvalModelTemplate.load_from_checkpoint(checkpoint_path=ckpt_path, hparams_file=hparams_path)
model_2.eval()
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_no_val_end_module(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", tmpdir)
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# fit model
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, logger=logger, callbacks=[ModelCheckpoint(dirpath=tmpdir)])
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
ckpt_path = (
f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
if url_ckpt
else new_weights_path
)
model_2 = EvalModelTemplate.load_from_checkpoint(checkpoint_path=ckpt_path, hparams_file=hparams_path)
model_2.eval()
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_strict_model_load(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", tmpdir)
model = EvalModelTemplate()
# Extra layer
model.c_d3 = torch.nn.Linear(model.hidden_dim, model.hidden_dim)
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# fit model
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, logger=logger, callbacks=[ModelCheckpoint(dirpath=tmpdir)])
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
# save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, "hparams.yaml")
ckpt_path = (
f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
if url_ckpt
else new_weights_path
)
try:
EvalModelTemplate.load_from_checkpoint(checkpoint_path=ckpt_path, hparams_file=hparams_path)
# todo: specify the possible exception
except Exception:
failed = True
else:
failed = False
assert failed, "Model should not been loaded since the extra layer added."
failed = False
try:
EvalModelTemplate.load_from_checkpoint(checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=False)
# todo: specify the possible exception
except Exception:
failed = True
assert not failed, "Model should be loaded due to strict=False."
def test_trainer_accumulate_grad_batches_incorrect_value(tmpdir):
with pytest.raises(MisconfigurationException, match=".*should be an int or a dict.*"):
Trainer(default_root_dir=tmpdir, accumulate_grad_batches=(2, 5))
def test_trainer_accumulate_grad_batches_with_grad_acc_callback(tmpdir):
with pytest.raises(
MisconfigurationException, match=".*set both `accumulate_grad_batches` and passed an instance.*"
):
Trainer(default_root_dir=tmpdir, accumulate_grad_batches=7, callbacks=[GradientAccumulationScheduler({0: 2})])
@pytest.mark.parametrize(
["accumulate_grad_batches", "limit_train_batches"],
[
({1: 2, 3: 4}, 1.0),
({1: 2, 3: 4}, 0.5), # not to be divisible by accumulate_grad_batches on purpose
(3, 1.0),
(3, 0.8), # not to be divisible by accumulate_grad_batches on purpose
(4, 1.0),
(4, 0.7), # not to be divisible by accumulate_grad_batches on purpose
],
)
def test_gradient_accumulation_scheduling_last_batch(tmpdir, accumulate_grad_batches, limit_train_batches):
"""Verify optimizer.step() applied to last batch while grad accumulation."""
class TestModel(BoringModel):
def state_dict(self, *args, **kwargs):
return deepcopy(super().state_dict(*args, **kwargs))
def check(self, d1, d2, equal=True):
keys = d1.keys() | d2.keys()
values = [torch.equal(d1[k], d2[k]) for k in keys]
return all(values) if equal else not any(values)
def backward(self, *args, **kwargs) -> None:
pre_bwd_state_dict = self.state_dict()
assert self.check(self.start_state_dict, pre_bwd_state_dict)
out = super().backward(*args, **kwargs)
# state dict is equal, just the gradients changed
assert self.check(pre_bwd_state_dict, self.state_dict())
return out
def optimizer_step(self, *args, **kwargs):
pre_opt_step_state_dict = self.state_dict()
assert self.check(self.start_state_dict, pre_opt_step_state_dict)
# this calls `backward` and `on_after_backward` inside the closure
out = super().optimizer_step(*args, **kwargs)
# the state dict changed
assert self.check(pre_opt_step_state_dict, self.state_dict(), equal=False)
self.opt_step_called = True
return out
def on_train_batch_start(self, *_):
self.start_state_dict = self.state_dict()
self.opt_step_called = False
def on_train_batch_end(self, outputs, batch, batch_idx):
end_state_dict = self.state_dict()
is_last_batch = (batch_idx + 1) == self.trainer.num_training_batches
if is_last_batch or self.opt_step_called:
assert self.check(self.start_state_dict, end_state_dict, equal=False)
else:
assert self.check(self.start_state_dict, end_state_dict)
model = TestModel()
trainer = Trainer(
accumulate_grad_batches=accumulate_grad_batches,
max_epochs=2,
limit_train_batches=limit_train_batches,
limit_val_batches=0,
default_root_dir=tmpdir,
enable_progress_bar=False,
)
trainer.fit(model)
def test_loading_meta_tags(tmpdir):
"""test for backward compatibility to meta_tags.csv."""
tutils.reset_seed()
hparams = EvalModelTemplate.get_default_hparams()
# save tags
logger = tutils.get_default_logger(tmpdir)
logger.log_hyperparams(Namespace(some_str="a_str", an_int=1, a_float=2.0))
logger.log_hyperparams(hparams)
logger.save()
# load hparams
path_expt_dir = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(path_expt_dir, TensorBoardLogger.NAME_HPARAMS_FILE)
hparams = load_hparams_from_yaml(hparams_path)
# save as legacy meta_tags.csv
tags_path = os.path.join(path_expt_dir, "meta_tags.csv")
save_hparams_to_tags_csv(tags_path, hparams)
tags = load_hparams_from_tags_csv(tags_path)
assert hparams == tags
def test_loading_yaml(tmpdir):
tutils.reset_seed()
hparams = EvalModelTemplate.get_default_hparams()
# save tags
logger = tutils.get_default_logger(tmpdir)
logger.log_hyperparams(Namespace(some_str="a_str", an_int=1, a_float=2.0))
logger.log_hyperparams(hparams)
logger.save()
# load hparams
path_expt_dir = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(path_expt_dir, "hparams.yaml")
tags = load_hparams_from_yaml(hparams_path)
assert tags["batch_size"] == 32 and tags["hidden_dim"] == 1000
@pytest.mark.parametrize(
"save_top_k,save_last,expected_files",
[
pytest.param(-1, False, [f"epoch={i}.ckpt" for i in range(5)], id="CASE K=-1 (all)"),
pytest.param(1, False, {"epoch=4.ckpt"}, id="CASE K=1 (2.5, epoch 4)"),
pytest.param(2, False, [f"epoch={i}.ckpt" for i in (2, 4)], id="CASE K=2 (2.5 epoch 4, 2.8 epoch 2)"),
pytest.param(4, False, [f"epoch={i}.ckpt" for i in range(1, 5)], id="CASE K=4 (save all 4 base)"),
pytest.param(3, False, [f"epoch={i}.ckpt" for i in range(2, 5)], id="CASE K=3 (save the 2nd, 3rd, 4th model)"),
pytest.param(1, True, {"epoch=4.ckpt", "last.ckpt"}, id="CASE K=1 (save the 4th model and the last model)"),
],
)
def test_model_checkpoint_options(tmpdir, save_top_k, save_last, expected_files):
"""Test ModelCheckpoint options."""
def mock_save_function(filepath, *args):
open(filepath, "a").close()
# simulated losses
losses = [10, 9, 2.8, 5, 2.5]
checkpoint_callback = ModelCheckpoint(
dirpath=tmpdir,
filename="{epoch}",
monitor="checkpoint_on",
save_top_k=save_top_k,
save_last=save_last,
verbose=True,
)
trainer = Trainer()
trainer.state.fn = TrainerFn.FITTING
trainer.save_checkpoint = mock_save_function
# emulate callback's calls during the training
for i, loss in enumerate(losses):
trainer.fit_loop.current_epoch = i
trainer.fit_loop.global_step = i
trainer.callback_metrics.update({"checkpoint_on": loss})
checkpoint_callback.on_validation_end(trainer, trainer.lightning_module)
file_lists = set(os.listdir(tmpdir))
assert len(file_lists) == len(
expected_files
), f"Should save {len(expected_files)} models when save_top_k={save_top_k} but found={file_lists}"
# verify correct naming
for fname in expected_files:
assert fname in file_lists
def test_model_checkpoint_only_weights(tmpdir):
"""Tests use case where ModelCheckpoint is configured to save only model weights, and user tries to load
checkpoint to resume training."""
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on", save_weights_only=True)],
)
# fit model
trainer.fit(model)
# training complete
assert trainer.state.finished, f"Training failed with {trainer.state}"
checkpoint_path = list(trainer.checkpoint_callback.best_k_models.keys())[0]
# assert saved checkpoint has no trainer data
checkpoint = torch.load(checkpoint_path)
assert "optimizer_states" not in checkpoint, "checkpoint should contain only model weights"
assert "lr_schedulers" not in checkpoint, "checkpoint should contain only model weights"
# assert loading model works when checkpoint has only weights
assert EvalModelTemplate.load_from_checkpoint(checkpoint_path=checkpoint_path)
# directly save model
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
trainer.save_checkpoint(new_weights_path, weights_only=True)
# assert saved checkpoint has no trainer data
checkpoint = torch.load(new_weights_path)
assert "optimizer_states" not in checkpoint, "checkpoint should contain only model weights"
assert "lr_schedulers" not in checkpoint, "checkpoint should contain only model weights"
# assert restoring train state fails
with pytest.raises(KeyError, match="checkpoint contains only the model"):
trainer.checkpoint_connector.restore(new_weights_path)
def test_model_freeze_unfreeze():
model = EvalModelTemplate()
model.freeze()
model.unfreeze()
@pytest.mark.parametrize("url_ckpt", [True, False])
def test_resume_from_checkpoint_epoch_restored(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Verify resuming from checkpoint runs the right number of epochs."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv("TORCH_HOME", tmpdir)
class TestModel(BoringModel):
# Model that tracks epochs and batches seen
num_epochs_end_seen = 0
num_batches_seen = 0
num_on_load_checkpoint_called = 0
def on_epoch_end(self):
self.num_epochs_end_seen += 1
def on_train_batch_start(self, *_):
self.num_batches_seen += 1
def on_load_checkpoint(self, _):
self.num_on_load_checkpoint_called += 1
model = TestModel()
trainer = Trainer(
max_epochs=2,
limit_train_batches=0.65,
limit_val_batches=1,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on", save_top_k=-1)],
default_root_dir=tmpdir,
val_check_interval=1.0,
enable_progress_bar=False,
logger=False,
enable_model_summary=False,
)
trainer.fit(model)
# `on_epoch_end` will be called once for val_sanity, twice for train, twice for val
assert model.num_epochs_end_seen == 1 + 2 + 2
assert model.num_batches_seen == trainer.num_training_batches * 2
assert model.num_on_load_checkpoint_called == 0
# Other checkpoints can be uncommented if/when resuming mid-epoch is supported
checkpoints = Path(trainer.checkpoint_callback.dirpath).glob("*.ckpt")
if url_ckpt:
# transform local paths into url checkpoints
ip, port = tmpdir_server
checkpoints = [f"http://{ip}:{port}/" + ckpt.name for ckpt in checkpoints]
for ckpt in checkpoints:
next_model = TestModel()
state = pl_load(ckpt)
# Resume training
new_trainer = Trainer(default_root_dir=tmpdir, resume_from_checkpoint=ckpt, max_epochs=2)
new_trainer.fit(next_model)
assert state["global_step"] + next_model.num_batches_seen == trainer.num_training_batches * trainer.max_epochs
assert next_model.num_on_load_checkpoint_called == 1
def test_trainer_max_steps_and_epochs(tmpdir):
"""Verify model trains according to specified max steps."""
model = BoringModel()
num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)
# define less train steps than epochs
trainer_kwargs = {
"limit_train_batches": 0.5,
"default_root_dir": tmpdir,
"max_epochs": 3,
"max_steps": num_train_samples + 10,
"logger": False,
"enable_model_summary": False,
"enable_progress_bar": False,
}
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == trainer.max_steps, "Model did not stop at max_steps"
# define less train epochs than steps
trainer_kwargs["max_epochs"] = 2
trainer_kwargs["max_steps"] = 3 * 2 * num_train_samples
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == num_train_samples * trainer.max_epochs
assert trainer.current_epoch == trainer.max_epochs - 1, "Model did not stop at max_epochs"
# if max_steps is positive and max_epochs is negative, use max_steps
trainer_kwargs["max_epochs"] = -1
trainer_kwargs["max_steps"] = 3
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == 3
@pytest.mark.parametrize(
"max_epochs,max_steps,incorrect_variable,incorrect_value",
[
(-100, None, "max_epochs", -100),
(1, -2, "max_steps", -2),
],
)
def test_trainer_max_steps_and_epochs_validation(max_epochs, max_steps, incorrect_variable, incorrect_value):
"""Don't allow max_epochs or max_steps to be less than -1 or a float."""
with pytest.raises(
MisconfigurationException,
match=f"`{incorrect_variable}` must be a positive integer or -1. You passed in {incorrect_value}",
):
Trainer(max_epochs=max_epochs, max_steps=max_steps)
@pytest.mark.parametrize(
"max_epochs,max_steps,is_done,correct_trainer_epochs",
[
(None, None, False, 1000),
(-1, None, False, -1),
(None, -1, False, None),
(5, -1, False, 5),
(-1, 10, False, -1),
(None, 0, True, None),
(0, None, True, 0),
(-1, 0, True, -1),
(0, -1, True, 0),
],
)
def test_trainer_max_steps_and_epochs_fit_loop_done(max_epochs, max_steps, is_done, correct_trainer_epochs):
trainer = Trainer(max_epochs=max_epochs, max_steps=max_steps)
assert trainer.max_epochs == correct_trainer_epochs
assert trainer.max_steps == max_steps
assert trainer.fit_loop.done is is_done
# Make sure there is no timer
timer_callbacks = [c for c in trainer.callbacks if isinstance(c, Timer)]
assert len(timer_callbacks) == 0
def test_trainer_min_steps_and_epochs(tmpdir):
"""Verify model trains according to specified min steps."""
model = EvalModelTemplate()
num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)
trainer_kwargs = {
"limit_train_batches": 0.5,
"default_root_dir": tmpdir,
# define callback for stopping the model
"callbacks": [EarlyStopping(monitor="early_stop_on", min_delta=1.0)],
"val_check_interval": 2,
"min_epochs": 1,
"max_epochs": 7,
# define less min steps than 1 epoch
"min_steps": num_train_samples // 2,
"logger": False,
"enable_model_summary": False,
"enable_progress_bar": False,
}
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch > 0
assert trainer.global_step >= num_train_samples, "Model did not train for at least min_epochs"
# define less epochs than min_steps
trainer_kwargs["min_steps"] = math.floor(num_train_samples * 1.5)
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch > 0
assert trainer.global_step >= math.floor(num_train_samples * 1.5), "Model did not train for at least min_steps"
def test_trainer_min_steps_and_min_epochs_not_reached(tmpdir, caplog):
"""Test that min_epochs/min_steps in Trainer are enforced even if EarlyStopping is triggered."""
class TestModel(BoringModel):
training_step_invoked = 0
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
output["loss"] = output["loss"] * 0.0 # force minimal loss to trigger early stopping
self.log("loss", output["loss"])
self.training_step_invoked += 1
assert not self.trainer.should_stop
return output
model = TestModel()
early_stop = EarlyStopping(monitor="loss", patience=0, check_on_train_epoch_end=True)
min_epochs = 5
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
min_epochs=min_epochs,
limit_val_batches=0,
limit_train_batches=2,
callbacks=[early_stop],
)
with caplog.at_level(logging.INFO, logger="pytorch_lightning.trainer.trainer"):
trainer.fit(model)
message = f"minimum epochs ({min_epochs}) or minimum steps (None) has not been met. Training will continue"
num_messages = sum(1 for record in caplog.records if message in record.message)
assert num_messages == min_epochs - 2
assert model.training_step_invoked == min_epochs * 2
def test_trainer_max_steps_accumulate_batches(tmpdir):
"""Verify model trains according to specified max steps with grad accumulated batches."""
model = BoringModel()
num_train_samples = math.floor(len(model.train_dataloader()) * 0.5)
# define less train steps than epochs
trainer = Trainer(
limit_train_batches=0.5,
default_root_dir=tmpdir,
max_steps=num_train_samples + 10,
accumulate_grad_batches=10,
logger=False,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == trainer.max_steps, "Model did not stop at max_steps"
def test_benchmark_option(tmpdir):
"""Verify benchmark option."""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple
# verify torch.backends.cudnn.benchmark is not turned on
assert not torch.backends.cudnn.benchmark
# fit model
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, benchmark=True)
trainer.fit(model)
# verify training completed
assert trainer.state.finished, f"Training failed with {trainer.state}"
# verify torch.backends.cudnn.benchmark is not turned off
assert torch.backends.cudnn.benchmark
@pytest.mark.parametrize("ckpt_path", (None, "best", "specific"))
@pytest.mark.parametrize("save_top_k", (-1, 0, 1, 2))
@pytest.mark.parametrize("fn", ("validate", "test", "predict"))
def test_tested_checkpoint_path(tmpdir, ckpt_path, save_top_k, fn):
class TestModel(BoringModel):
def validation_step(self, batch, batch_idx):
self.log("foo", -batch_idx)
return super().validation_step(batch, batch_idx)
def test_step(self, *args):
return self.validation_step(*args)
def predict_step(self, batch, *_):
return self(batch)
model = TestModel()
model.test_epoch_end = None
trainer = Trainer(
max_epochs=2,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
enable_progress_bar=False,
default_root_dir=tmpdir,
callbacks=[ModelCheckpoint(monitor="foo", save_top_k=save_top_k)],
)
trainer.fit(model)
trainer_fn = getattr(trainer, fn)
path_attr = f"{fn}{'d' if fn == 'validate' else 'ed'}_ckpt_path"
assert getattr(trainer, path_attr) is None
if ckpt_path == "best":
# ckpt_path is 'best', meaning we load the best weights
if save_top_k == 0:
with pytest.raises(MisconfigurationException, match=".*is not configured to save the best.*"):
trainer_fn(ckpt_path=ckpt_path)
with pytest.raises(MisconfigurationException, match=".*is not configured to save the best.*"):
trainer_fn(model, ckpt_path=ckpt_path)
else:
trainer_fn(ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) == trainer.checkpoint_callback.best_model_path
trainer_fn(model, ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) == trainer.checkpoint_callback.best_model_path
elif ckpt_path is None:
# ckpt_path is None, meaning we don't load any checkpoints and use the provided model
trainer_fn(model, ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) is None
if save_top_k > 0:
# ckpt_path is None with no model provided means load the best weights
with pytest.warns(UserWarning, match="The best model of the previous `fit` call will be used"):
trainer_fn(ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) == trainer.checkpoint_callback.best_model_path
else:
# specific checkpoint, pick one from saved ones
if save_top_k == 0:
with pytest.raises(FileNotFoundError):
trainer_fn(ckpt_path="random.ckpt")
else:
ckpt_path = str(
list((Path(tmpdir) / f"lightning_logs/version_{trainer.logger.version}/checkpoints").iterdir())[
0
].absolute()
)
trainer_fn(ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) == ckpt_path
trainer_fn(model, ckpt_path=ckpt_path)
assert getattr(trainer, path_attr) == ckpt_path
@pytest.mark.parametrize("enable_checkpointing", (False, True))
@pytest.mark.parametrize("fn", ("validate", "test", "predict"))
def test_tested_checkpoint_path_best(tmpdir, enable_checkpointing, fn):
class TestModel(BoringModel):
def validation_step(self, batch, batch_idx):
self.log("foo", -batch_idx)
return super().validation_step(batch, batch_idx)
def test_step(self, *args):
return self.validation_step(*args)
def predict_step(self, batch, *_):
return self(batch)
model = TestModel()
model.test_epoch_end = None
trainer = Trainer(
max_epochs=2,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
enable_progress_bar=False,
default_root_dir=tmpdir,
enable_checkpointing=enable_checkpointing,
)
trainer.fit(model)
trainer_fn = getattr(trainer, fn)
path_attr = f"{fn}{'d' if fn == 'validate' else 'ed'}_ckpt_path"
assert getattr(trainer, path_attr) is None
if enable_checkpointing:
trainer_fn(ckpt_path="best")
assert getattr(trainer, path_attr) == trainer.checkpoint_callback.best_model_path
trainer_fn(model, ckpt_path="best")
assert getattr(trainer, path_attr) == trainer.checkpoint_callback.best_model_path
else:
with pytest.raises(MisconfigurationException, match="`ModelCheckpoint` is not configured."):
trainer_fn(ckpt_path="best")
with pytest.raises(MisconfigurationException, match="`ModelCheckpoint` is not configured."):
trainer_fn(model, ckpt_path="best")
def test_disabled_training(tmpdir):
"""Verify that `limit_train_batches=0` disables the training loop unless `fast_dev_run=True`."""
class CurrentModel(BoringModel):
training_step_invoked = False
training_epoch_end_invoked = False
def training_step(self, *args, **kwargs):
self.training_step_invoked = True
return super().training_step(*args, **kwargs)
def training_epoch_end(self, *args, **kwargs):
self.training_epoch_end_invoked = True
return super().training_epoch_end(*args, **kwargs)
model = CurrentModel()
trainer_options = dict(
default_root_dir=tmpdir,
enable_progress_bar=False,
max_epochs=2,
limit_train_batches=0.0,
limit_val_batches=0.2,
fast_dev_run=False,
)
before_state_dict = deepcopy(model.state_dict())
trainer = Trainer(**trainer_options)
trainer.fit(model)
after_state_dict = model.state_dict()
for key in before_state_dict.keys():
assert torch.all(torch.eq(before_state_dict[key], after_state_dict[key]))
# check that limit_train_batches=0 turns off training
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 0
assert not model.training_step_invoked, "`training_step` should not run when `limit_train_batches=0`"
assert not model.training_epoch_end_invoked, "`training_epoch_end` should not run when `limit_train_batches=0`"
# check that limit_train_batches has no influence when fast_dev_run is turned on
model = CurrentModel()
trainer_options.update(fast_dev_run=True)
before_state_dict = deepcopy(model.state_dict())
trainer = Trainer(**trainer_options)
trainer.fit(model)
after_state_dict = model.state_dict()
for key in before_state_dict.keys():
assert not torch.all(torch.eq(before_state_dict[key], after_state_dict[key]))
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 0
assert model.training_step_invoked, "did not run `training_step` with `fast_dev_run=True`"
assert model.training_epoch_end_invoked, "did not run `training_epoch_end` with `fast_dev_run=True`"
def test_disabled_validation(tmpdir):
"""Verify that `limit_val_batches=0` disables the validation loop unless `fast_dev_run=True`."""
class CurrentModel(EvalModelTemplate):
validation_step_invoked = False
validation_epoch_end_invoked = False
def validation_step(self, *args, **kwargs):
self.validation_step_invoked = True
return super().validation_step(*args, **kwargs)
def validation_epoch_end(self, *args, **kwargs):
self.validation_epoch_end_invoked = True
return super().validation_epoch_end(*args, **kwargs)
hparams = EvalModelTemplate.get_default_hparams()
model = CurrentModel(**hparams)
trainer_options = dict(
default_root_dir=tmpdir,
enable_progress_bar=False,
max_epochs=2,
limit_train_batches=0.4,
limit_val_batches=0.0,
fast_dev_run=False,
)
trainer = Trainer(**trainer_options)
trainer.fit(model)
# check that limit_val_batches=0 turns off validation
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 1
assert not model.validation_step_invoked, "`validation_step` should not run when `limit_val_batches=0`"
assert not model.validation_epoch_end_invoked, "`validation_epoch_end` should not run when `limit_val_batches=0`"
# check that limit_val_batches has no influence when fast_dev_run is turned on
model = CurrentModel(**hparams)
trainer_options.update(fast_dev_run=True)
trainer = Trainer(**trainer_options)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch == 0
assert model.validation_step_invoked, "did not run `validation_step` with `fast_dev_run=True`"
assert model.validation_epoch_end_invoked, "did not run `validation_epoch_end` with `fast_dev_run=True`"
@mock.patch("torch.Tensor.backward")
def test_nan_loss_detection(backward_mock, tmpdir):
class CurrentModel(BoringModel):
test_batch_inf = 3
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
if batch_idx == self.test_batch_inf:
if isinstance(output, dict):
output["loss"] *= torch.tensor(math.inf) # make loss infinite
else:
output /= 0
return output
model = CurrentModel()
# fit model
trainer = Trainer(default_root_dir=tmpdir, max_steps=(model.test_batch_inf + 1), terminate_on_nan=True)
with pytest.raises(ValueError, match=r".*The loss returned in `training_step` is.*"):
trainer.fit(model)
assert trainer.global_step == model.test_batch_inf
assert backward_mock.call_count == model.test_batch_inf
for param in model.parameters():
assert torch.isfinite(param).all()
def test_invalid_terminate_on_nan(tmpdir):
with pytest.raises(TypeError, match="`terminate_on_nan` should be a bool"):
Trainer(default_root_dir=tmpdir, terminate_on_nan="False")
@pytest.mark.parametrize("track_grad_norm", [0, torch.tensor(1), "nan"])
def test_invalid_track_grad_norm(tmpdir, track_grad_norm):
with pytest.raises(MisconfigurationException, match="`track_grad_norm` must be a positive number or 'inf'"):
Trainer(default_root_dir=tmpdir, track_grad_norm=track_grad_norm)
@mock.patch("torch.Tensor.backward")
def test_nan_params_detection(backward_mock, tmpdir):
class CurrentModel(BoringModel):
test_batch_nan = 3
def on_after_backward(self):
if self.global_step == self.test_batch_nan:
# simulate parameter that became nan
torch.nn.init.constant_(self.layer.bias, math.nan)
model = CurrentModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=(model.test_batch_nan + 1), terminate_on_nan=True)
with pytest.raises(ValueError, match=r".*Detected nan and/or inf values in `layer.bias`.*"):
trainer.fit(model)
assert trainer.global_step == model.test_batch_nan
assert backward_mock.call_count == model.test_batch_nan + 1
# after aborting the training loop, model still has nan-valued params
params = torch.cat([param.view(-1) for param in model.parameters()])
assert not torch.isfinite(params).all()
def test_on_exception_hook(tmpdir):
"""Test the on_exception callback hook and the trainer interrupted flag."""
model = BoringModel()
class InterruptCallback(Callback):
def __init__(self):
super().__init__()
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx):
raise KeyboardInterrupt
def on_test_start(self, trainer, pl_module):
raise MisconfigurationException
class HandleInterruptCallback(Callback):
def __init__(self):
super().__init__()
self.exception = None
self.exc_info = None
def on_exception(self, trainer, pl_module, exception):
self.exception = exception
def on_keyboard_interrupt(self, trainer, pl_module):
self.exc_info = sys.exc_info()
interrupt_callback = InterruptCallback()
handle_interrupt_callback = HandleInterruptCallback()
trainer = Trainer(
callbacks=[interrupt_callback, handle_interrupt_callback],
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
enable_progress_bar=False,
logger=False,
default_root_dir=tmpdir,
)
assert not trainer.interrupted
assert handle_interrupt_callback.exception is None
assert handle_interrupt_callback.exc_info is None
trainer.fit(model)
assert trainer.interrupted
assert isinstance(handle_interrupt_callback.exception, KeyboardInterrupt)
assert isinstance(handle_interrupt_callback.exc_info[1], KeyboardInterrupt)
with pytest.raises(MisconfigurationException):
trainer.test(model)
assert trainer.interrupted
assert isinstance(handle_interrupt_callback.exception, MisconfigurationException)
@pytest.mark.parametrize(
"precision",
[32, pytest.param(16, marks=RunIf(min_gpus=1))],
)
def test_gradient_clipping_by_norm(tmpdir, precision):
"""Test gradient clipping by norm."""
tutils.reset_seed()
model = EvalModelTemplate() # TODO: when precision=16, BoringModel produces NaN, but EvalModelTemplate not
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=1,
max_epochs=1,
gpus=int(torch.cuda.is_available()),
precision=precision,
gradient_clip_algorithm="norm",
gradient_clip_val=1.0,
)
old_backward = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop._backward
def backward(*args, **kwargs):
# test that gradient is clipped correctly
ret_val = old_backward(*args, **kwargs)
parameters = model.parameters()
grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
assert (grad_norm - 1.0).abs() < 0.01, f"Gradient norm != 1.0: {grad_norm}"
return ret_val
trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop._backward = backward
trainer.fit(model)
@pytest.mark.parametrize(
"precision",
[32, pytest.param(16, marks=RunIf(min_gpus=1))],
)
def test_gradient_clipping_by_value(tmpdir, precision):
"""Test gradient clipping by value."""
tutils.reset_seed()
model = BoringModel()
grad_clip_val = 1e-10
trainer = Trainer(
max_steps=1,
max_epochs=1,
precision=precision,
gpus=int(torch.cuda.is_available()),
gradient_clip_val=grad_clip_val,
gradient_clip_algorithm="value",
default_root_dir=tmpdir,
)
old_backward = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop._backward
def backward(*args, **kwargs):
# test that gradient is clipped correctly
ret_val = old_backward(*args, **kwargs)
parameters = model.parameters()
grad_max_list = [torch.max(p.grad.detach().abs()) for p in parameters]
grad_max = torch.max(torch.stack(grad_max_list))
assert (
abs(grad_max.item() - grad_clip_val) < 1e-11
), f"Gradient max value {grad_max} != grad_clip_val {grad_clip_val} ."
return ret_val
trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop._backward = backward
trainer.fit(model)
def test_invalid_gradient_clip_value(tmpdir):
with pytest.raises(TypeError, match="`gradient_clip_val` should be an int or a float"):
Trainer(default_root_dir=tmpdir, gradient_clip_val=(1, 2))
def test_invalid_gradient_clip_algo(tmpdir):
with pytest.raises(MisconfigurationException, match="`gradient_clip_algorithm` norm2 is invalid"):
Trainer(default_root_dir=tmpdir, gradient_clip_algorithm="norm2")
def test_gpu_choice(tmpdir):
trainer_options = dict(default_root_dir=tmpdir)
# Only run if CUDA is available
if not torch.cuda.is_available():
return
num_gpus = torch.cuda.device_count()
Trainer(**trainer_options, gpus=num_gpus, auto_select_gpus=True)
with pytest.raises(RuntimeError, match=r".*No GPUs available.*"):
Trainer(**trainer_options, gpus=num_gpus + 1, auto_select_gpus=True)
@pytest.mark.parametrize("limit_val_batches", [0.0, 1, 1.0, 0.5, 5])
def test_num_sanity_val_steps(tmpdir, limit_val_batches):
"""Test that the number of sanity check batches is clipped to `limit_val_batches`."""
model = EvalModelTemplate()
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
num_sanity_val_steps = 4
trainer = Trainer(
default_root_dir=tmpdir,
num_sanity_val_steps=num_sanity_val_steps,
limit_val_batches=limit_val_batches,
max_steps=1,
)
assert trainer.num_sanity_val_steps == num_sanity_val_steps
with patch.object(
trainer.fit_loop.epoch_loop.val_loop.epoch_loop,
"_evaluation_step",
wraps=trainer.fit_loop.epoch_loop.val_loop.epoch_loop._evaluation_step,
) as mocked:
val_dataloaders = model.val_dataloader__multiple_mixed_length()
trainer.fit(model, val_dataloaders=val_dataloaders)
assert mocked.call_count == sum(
min(num_sanity_val_steps, num_batches) for num_batches in trainer.num_val_batches
)
@pytest.mark.parametrize("limit_val_batches", [0.0, 1, 1.0, 0.3])
def test_num_sanity_val_steps_neg_one(tmpdir, limit_val_batches):
"""Test that `num_sanity_val_steps=-1` runs through all validation data once, and as many batches as limited by
`limit_val_batches` Trainer argument."""
model = EvalModelTemplate()
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
trainer = Trainer(
default_root_dir=tmpdir, num_sanity_val_steps=-1, limit_val_batches=limit_val_batches, max_steps=1
)
assert trainer.num_sanity_val_steps == float("inf")
with patch.object(
trainer.fit_loop.epoch_loop.val_loop.epoch_loop,
"_evaluation_step",
wraps=trainer.fit_loop.epoch_loop.val_loop.epoch_loop._evaluation_step,
) as mocked:
val_dataloaders = model.val_dataloader__multiple()
trainer.fit(model, val_dataloaders=val_dataloaders)
assert mocked.call_count == sum(trainer.num_val_batches)
@pytest.mark.parametrize(
"trainer_kwargs,expected",
[
(
dict(accelerator=None, gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="dp", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="ddp", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="ddp", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="ddp", num_nodes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator="ddp_cpu", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="ddp2", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(accelerator=None, gpus=1),
dict(_distrib_type=None, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator="dp", gpus=1),
dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator="ddp", gpus=1),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator="ddp_cpu", num_processes=2, gpus=1),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="ddp2", gpus=1),
dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(accelerator=None, gpus=2),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2),
),
(
dict(accelerator="dp", gpus=2),
dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1),
),
(
dict(accelerator="ddp", gpus=2),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2),
),
(
dict(accelerator="ddp2", gpus=2),
dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1),
),
(
dict(accelerator="ddp2", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(accelerator="dp", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
],
)
def test_trainer_config(trainer_kwargs, expected, monkeypatch):
if trainer_kwargs["gpus"] is not None:
monkeypatch.setattr(torch.cuda, "is_available", lambda: True)
monkeypatch.setattr(torch.cuda, "device_count", lambda: trainer_kwargs["gpus"])
trainer = Trainer(**trainer_kwargs)
assert len(expected) == 4
for k, v in expected.items():
assert getattr(trainer, k) == v, f"Failed {k}: {v}"
def test_trainer_subclassing():
model = EvalModelTemplate()
# First way of pulling out args from signature is to list them
class TrainerSubclass(Trainer):
def __init__(self, custom_arg, *args, custom_kwarg="test", **kwargs):
super().__init__(*args, **kwargs)
self.custom_arg = custom_arg
self.custom_kwarg = custom_kwarg
trainer = TrainerSubclass(123, custom_kwarg="custom", fast_dev_run=True)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.custom_arg == 123
assert trainer.custom_kwarg == "custom"
assert trainer.fast_dev_run
# Second way is to pop from the dict
# It's a special case because Trainer does not have any positional args
class TrainerSubclass(Trainer):
def __init__(self, **kwargs):
self.custom_arg = kwargs.pop("custom_arg", 0)
self.custom_kwarg = kwargs.pop("custom_kwarg", "test")
super().__init__(**kwargs)
trainer = TrainerSubclass(custom_kwarg="custom", fast_dev_run=True)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.custom_kwarg == "custom"
assert trainer.fast_dev_run
# when we pass in an unknown arg, the base class should complain
with pytest.raises(TypeError, match=r"__init__\(\) got an unexpected keyword argument 'abcdefg'"):
TrainerSubclass(abcdefg="unknown_arg")
@pytest.mark.parametrize(
"trainer_params", [OmegaConf.create(dict(max_epochs=1, gpus=1)), OmegaConf.create(dict(max_epochs=1, gpus=[0]))]
)
@RunIf(min_gpus=1)
def test_trainer_omegaconf(trainer_params):
Trainer(**trainer_params)
def test_trainer_pickle(tmpdir):
trainer = Trainer(max_epochs=1, default_root_dir=tmpdir)
pickle.dumps(trainer)
cloudpickle.dumps(trainer)
@pytest.mark.parametrize("stage", ("fit", "validate", "test"))
def test_trainer_setup_call(tmpdir, stage):
"""Test setup call gets the correct stage."""
class CurrentModel(BoringModel):
def setup(self, stage):
self.stage = stage
class CurrentCallback(Callback):
def setup(self, trainer, model, stage):
assert model is not None
self.stage = stage
model = CurrentModel()
callback = CurrentCallback()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, enable_checkpointing=False, callbacks=[callback])
if stage == "fit":
trainer.fit(model)
elif stage == "validate":
trainer.validate(model)
else:
trainer.test(model)
assert callback.stage == stage
assert model.stage == stage
@pytest.mark.parametrize("train_batches, max_steps, log_interval", [(10, 10, 1), (3, 10, 1), (3, 10, 5)])
@patch("pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_metrics")
def test_log_every_n_steps(log_metrics_mock, tmpdir, train_batches, max_steps, log_interval):
class TestModel(BoringModel):
def training_step(self, *args, **kwargs):
self.log("foo", -1)
return super().training_step(*args, **kwargs)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
log_every_n_steps=log_interval,
flush_logs_every_n_steps=log_interval,
limit_train_batches=train_batches,
limit_val_batches=0,
max_steps=max_steps,
)
trainer.fit(model)
expected_calls = [call(metrics=ANY, step=s) for s in range(log_interval - 1, max_steps, log_interval)]
log_metrics_mock.assert_has_calls(expected_calls)
class TestLightningDataModule(LightningDataModule):
def __init__(self, dataloaders):
super().__init__()
self._dataloaders = dataloaders
def test_dataloader(self):
return self._dataloaders
def predict_dataloader(self):
return self._dataloaders
class CustomPredictionWriter(BasePredictionWriter):
write_on_batch_end_called = False
write_on_epoch_end_called = False
def __init__(self, output_dir: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_dir = output_dir
def write_on_batch_end(self, trainer, pl_module, prediction, batch_indices, *args, **kwargs):
assert prediction.shape == torch.Size([1, 2])
assert len(batch_indices) == 1
self.write_on_batch_end_called = True
def write_on_epoch_end(self, trainer, pl_module, predictions, batch_indices):
expected = 1 if trainer.accelerator_connector.is_distributed else 2
assert len(predictions) == 2
assert len(predictions[0]) == expected
assert len(batch_indices) == 2
assert len(batch_indices[0]) == expected
self.write_on_epoch_end_called = True
def on_predict_epoch_end(self, trainer, pl_module, outputs):
if trainer.accelerator_connector.is_distributed:
for idx in range(2):
assert isinstance(trainer.predict_dataloaders[idx].batch_sampler.sampler, UnrepeatedDistributedSampler)
assert isinstance(trainer.predict_dataloaders[idx].batch_sampler, IndexBatchSamplerWrapper)
super().on_predict_epoch_end(trainer, pl_module, outputs)
def predict(
tmpdir,
accelerator,
gpus,
num_processes,
model=None,
plugins=None,
datamodule=True,
enable_progress_bar=True,
use_callbacks=True,
):
dataloaders = [torch.utils.data.DataLoader(RandomDataset(32, 2)), torch.utils.data.DataLoader(RandomDataset(32, 2))]
model = model or BoringModel()
dm = TestLightningDataModule(dataloaders)
cb = CustomPredictionWriter(tmpdir, write_interval="batch")
cb_1 = CustomPredictionWriter(tmpdir, write_interval="epoch")
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
log_every_n_steps=1,
enable_model_summary=False,
accelerator=accelerator,
gpus=gpus,
num_processes=num_processes,
plugins=plugins,
enable_progress_bar=enable_progress_bar,
callbacks=[cb, cb_1] if use_callbacks else [],
)
if accelerator == "ddp_spawn":
with pytest.raises(MisconfigurationException):
trainer.predict(model, datamodule=dm, return_predictions=True)
if datamodule:
results = trainer.predict(model, datamodule=dm)
else:
results = trainer.predict(model, dataloaders=dataloaders)
if not isinstance(trainer.training_type_plugin, DDPSpawnPlugin):
if use_callbacks:
assert cb.write_on_batch_end_called
assert not cb.write_on_epoch_end_called
assert not cb_1.write_on_batch_end_called
assert cb_1.write_on_epoch_end_called
num_samples = 1 if accelerator == "ddp" else 2
assert len(results) == 2
assert len(results[0]) == num_samples
assert results[0][0].shape == torch.Size([1, 2])
def test_trainer_predict_no_return(tmpdir):
"""Test trainer.predict warns when nothing is returned."""
class CustomBoringModel(BoringModel):
def predict_step(self, batch, batch_idx, dataloader_idx=None):
if (batch_idx + 1) % 2 == 0:
return
return super().predict_step(batch, batch_idx, dataloader_idx)
with pytest.warns(UserWarning, match="predict returned None"):
predict(tmpdir, None, None, 1, model=CustomBoringModel(), use_callbacks=False)
def test_trainer_predict_grad(tmpdir):
class CustomBoringModel(BoringModel):
def predict_step(self, batch, batch_idx, dataloader_idx=None):
assert batch.expand_as(batch).grad_fn is None
return super().predict_step(batch, batch_idx, dataloader_idx)
predict(tmpdir, None, None, 1, model=CustomBoringModel(), use_callbacks=False)
x = torch.zeros(1, requires_grad=True)
assert x.expand_as(x).grad_fn is not None
@pytest.mark.parametrize("enable_progress_bar", [False, True])
@pytest.mark.parametrize("datamodule", [False, True])
def test_trainer_predict_cpu(tmpdir, datamodule, enable_progress_bar):
predict(tmpdir, None, None, 1, datamodule=datamodule, enable_progress_bar=enable_progress_bar)
@RunIf(min_gpus=2, special=True)
@pytest.mark.parametrize("num_gpus", [1, 2])
def test_trainer_predict_dp(tmpdir, num_gpus):
predict(tmpdir, "dp", num_gpus, None)
@RunIf(min_gpus=2, special=True, fairscale=True)
def test_trainer_predict_ddp(tmpdir):
predict(tmpdir, "ddp", 2, None)
@RunIf(min_gpus=2, skip_windows=True, special=True)
def test_trainer_predict_ddp_spawn(tmpdir):
predict(tmpdir, "ddp_spawn", 2, None)
@RunIf(min_gpus=2, special=True)
def test_trainer_predict_1_gpu(tmpdir):
predict(tmpdir, None, 1, None)
@RunIf(skip_windows=True)
def test_trainer_predict_ddp_cpu(tmpdir):
predict(tmpdir, "ddp_cpu", 0, 2)
@pytest.mark.parametrize("dataset_cls", [RandomDataset, RandomIterableDatasetWithLen, RandomIterableDataset])
def test_index_batch_sampler_wrapper_with_iterable_dataset(dataset_cls, tmpdir):
ds = dataset_cls(32, 8)
loader = DataLoader(ds)
is_iterable_dataset = isinstance(ds, IterableDataset)
class CustomPredictionWriter(BasePredictionWriter):
def __init__(self, output_dir: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_dir = output_dir
def write_on_batch_end(self, trainer, pl_module, prediction, batch_indices, *args, **kwargs):
assert not batch_indices if is_iterable_dataset else batch_indices
cb = CustomPredictionWriter(tmpdir)
trainer = Trainer(default_root_dir=tmpdir, callbacks=cb)
predictions = trainer.predict(BoringModel(), dataloaders=loader)
assert len(predictions) == 8
@patch("torch.cuda.device_count", return_value=2)
@patch("torch.cuda.is_available", return_value=True)
def test_spawn_predict_return_predictions(*_):
"""Test that `return_predictions=True` raise a MisconfigurationException with spawn training type plugins."""
model = BoringModel()
def run(expected_plugin, **trainer_kwargs):
trainer = Trainer(**trainer_kwargs, fast_dev_run=True)
assert isinstance(trainer.training_type_plugin, expected_plugin)
with pytest.raises(MisconfigurationException, match="`return_predictions` should be set to `False`"):
trainer.predict(model, dataloaders=model.train_dataloader(), return_predictions=True)
run(DDPSpawnPlugin, accelerator="ddp_spawn", gpus=2)
run(DDPSpawnPlugin, accelerator="ddp_cpu", num_processes=2)
@pytest.mark.parametrize("return_predictions", [None, False, True])
@pytest.mark.parametrize("precision", [32, 64])
def test_predict_return_predictions_cpu(return_predictions, precision, tmpdir):
"""Test that `return_predictions=True`."""
seed_everything(42)
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, precision=precision)
preds = trainer.predict(model, dataloaders=model.train_dataloader(), return_predictions=return_predictions)
if return_predictions or return_predictions is None:
assert len(preds) == 1
assert preds[0].shape == torch.Size([1, 2])
assert preds[0].dtype == (torch.float64 if precision == 64 else torch.float32)
@pytest.mark.parametrize(
["limit_train_batches", "global_step", "num_training_batches", "current_epoch", "should_train"],
[(0.2, 0, 0, 0, False), (0.5, 10, 2, 4, True)],
)
def test_disabled_training_for_insufficient_limit_train_batches(
tmpdir, limit_train_batches, global_step, num_training_batches, current_epoch, should_train
):
"""Verify when `limit_train_batches` is float & between [0.0, 1.0] and.
`int(self.num_training_batches * self.limit_train_batches) == 0`, the training loop is disabled.
"""
class CurrentModel(BoringModel):
training_step_invoked = False
training_epoch_end_invoked = False
def training_step(self, *args, **kwargs):
self.training_step_invoked = True
return super().training_step(*args, **kwargs)
def training_epoch_end(self, *args, **kwargs):
self.training_epoch_end_invoked = True
return super().training_epoch_end(*args, **kwargs)
dataset_len = 100
batch_size = 25
train = RandomDataset(32, length=dataset_len)
train_loader = DataLoader(train, batch_size=batch_size)
model = CurrentModel()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=5, limit_train_batches=limit_train_batches)
trainer.fit(model, train_loader)
params_string = f"""`limit_train_batches={limit_train_batches}`, `dataset_len={dataset_len}`
& `batch_size={batch_size}` as
`num_training_batches={num_training_batches}`"""
if should_train:
error_string = f"should run with {params_string}"
else:
error_string = f"should not run with {params_string}"
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.global_step == global_step
assert trainer.num_training_batches == num_training_batches
assert trainer.current_epoch == current_epoch
assert model.training_step_invoked == should_train, f"`training_step` {error_string}"
assert model.training_epoch_end_invoked == should_train, f"`training_epoch_end` {error_string}"
@pytest.mark.parametrize(["max_steps", "max_epochs", "global_step"], [(10, 5, 10), (20, None, 20)])
def test_repeated_fit_calls_with_max_epochs_and_steps(tmpdir, max_steps, max_epochs, global_step):
"""Ensure that the training loop is bound by `max_steps` and `max_epochs` for repeated calls of `trainer.fit`,
and disabled if the limit is reached."""
dataset_len = 200
batch_size = 10
train_data = DataLoader(RandomDataset(32, dataset_len), batch_size=batch_size)
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=max_steps, max_epochs=max_epochs)
trainer.fit(model, train_data)
assert trainer.global_step == global_step
trainer.fit(model, train_data)
assert trainer.global_step == global_step
def test_trainer_access_in_configure_optimizers(tmpdir):
"""Verify that the configure optimizer function can reference the trainer."""
class TestModel(BoringModel):
def configure_optimizers(self):
assert self.trainer is not None, "Expect to have access to the trainer within `configure_optimizers`"
train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model, train_data)
@RunIf(min_gpus=1)
def test_setup_hook_move_to_device_correctly(tmpdir):
"""Verify that if a user defines a layer in the setup hook function, this is moved to the correct device."""
class TestModel(BoringModel):
def setup(self, stage: str) -> None:
self.new_layer = torch.nn.Linear(2, 2)
def training_step(self, batch, batch_idx):
output = self.layer(batch)
# will crash if not moved to correct device
output = self.new_layer(output)
loss = self.loss(batch, output)
return {"loss": loss}
# fake data
train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
# model
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, gpus=1)
trainer.fit(model, train_data)
def test_train_loop_system(tmpdir):
"""
Test the following methods are called in the order in automatic optimization.
1. optimizer.step (skip when gradient accumulation)
2. model.training_step
3. optimizer.zero_grad (run when the first batch of gradient accumulation)
4. model.backward
Note that the order is NOT `training_step`->`zero_grad`->`backward`->`step`.
This is because `optimizer.step(closure)` calls `closure()` which then calls
the three remaining methods `training_step`, `zero_grad` and `backward` inside.
"""
called_methods = []
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=5,
limit_val_batches=1,
limit_test_batches=1,
enable_progress_bar=False,
)
class TestOptimizer(SGD):
def step(self, *args, **kwargs):
called_methods.append("step")
return super().step(*args, **kwargs)
def zero_grad(self, *args, **kwargs):
called_methods.append("zero_grad")
return super().zero_grad(*args, **kwargs)
class TestModel(BoringModel):
def configure_optimizers(self):
return TestOptimizer(self.parameters(), lr=0.1)
def training_step(self, *args, **kwargs):
called_methods.append("training_step")
return super().training_step(*args, **kwargs)
def backward(self, *args, **kwargs):
called_methods.append("backward")
return super().backward(*args, **kwargs)
model = TestModel()
trainer = Trainer(**trainer_options)
# No methods are called yet.
assert called_methods == []
trainer.fit(model)
assert called_methods == ["step", "training_step", "zero_grad", "backward"] * trainer.limit_train_batches
called_methods.clear()
trainer = Trainer(**trainer_options, accumulate_grad_batches=3)
# No methods are called yet.
assert called_methods == []
trainer.fit(model)
assert called_methods == [
# 0
"training_step",
"zero_grad",
"backward",
# 1
"training_step",
"backward",
# 2
"step",
"training_step",
"backward",
# 3
"training_step",
"zero_grad",
"backward",
# 4
"step",
"training_step",
"backward",
]
def test_init_optimizers_resets_lightning_optimizers(tmpdir):
"""Test that the Trainer resets the `lightning_optimizers` list everytime new optimizers get initialized."""
def compare_optimizers():
assert trainer.lightning_optimizers[0].optimizer is trainer.optimizers[0]
model = BoringModel()
model.lr = 0.2
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, auto_lr_find=True)
trainer.tune(model)
compare_optimizers()
trainer.fit(model)
compare_optimizers()
trainer.fit_loop.max_epochs = 2 # simulate multiple fit calls
trainer.fit(model)
compare_optimizers()
def test_check_val_every_n_epoch_exception(tmpdir):
with pytest.raises(MisconfigurationException, match="should be an integer."):
Trainer(default_root_dir=tmpdir, max_epochs=1, check_val_every_n_epoch=1.2)
def test_trainer_attach_data_pipeline_to_model(tmpdir):
class DataPipeline:
pass
class TestDataModule(LightningDataModule):
data_pipeline = DataPipeline()
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
return DataLoader(RandomDataset(32, 64))
class TestCallback(Callback):
def on_fit_start(self, trainer, pl_module: LightningModule) -> None:
"""Called when fit begins."""
assert isinstance(pl_module.data_pipeline, DataPipeline)
model = BoringModel()
dm = TestDataModule()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, callbacks=[TestCallback()])
trainer.fit(model, datamodule=dm)
def test_exception_when_testing_or_validating_with_fast_dev_run(tmpdir):
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
model = BoringModel()
trainer.fit(model)
with pytest.raises(MisconfigurationException, match=r"\.validate\(\)` with `fast_dev_run=True"):
trainer.validate()
with pytest.raises(MisconfigurationException, match=r"\.test\(\)` with `fast_dev_run=True"):
trainer.test()
class TrainerStagesModel(BoringModel):
def on_train_start(self) -> None:
assert self.trainer.model.training
assert self.training
def on_validation_start(self) -> None:
assert not self.trainer.model.training
assert not self.training
def on_test_start(self) -> None:
assert not self.trainer.model.training
assert not self.training
def on_predict_start(self) -> None:
assert not self.trainer.model.training
assert not self.training
@pytest.mark.parametrize(
"accelerator,num_processes", [(None, 1), pytest.param("ddp_cpu", 2, marks=RunIf(skip_windows=True))]
)
def test_model_in_correct_mode_during_stages(tmpdir, accelerator, num_processes):
model = TrainerStagesModel()
trainer = Trainer(default_root_dir=tmpdir, accelerator=accelerator, num_processes=num_processes, fast_dev_run=True)
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model, model.val_dataloader())
class TestDummyModelForCheckpoint(BoringModel):
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("x", loss)
def validation_epoch_end(self, outputs) -> None:
pass
@RunIf(skip_windows=True)
def test_fit_test_synchronization(tmpdir):
"""Test that the trainer synchronizes processes before returning control back to the caller."""
tutils.set_random_master_port()
model = TestDummyModelForCheckpoint()
checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor="x", mode="min", save_top_k=1)
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=2, accelerator="ddp_cpu", num_processes=2, callbacks=[checkpoint]
)
trainer.fit(model)
assert os.path.exists(checkpoint.best_model_path), f"Could not find checkpoint at rank {trainer.global_rank}"
trainer.test()
class CustomCallbackOnLoadCheckpoint(Callback):
def on_save_checkpoint(self, trainer, pl_module, checkpoint) -> dict:
return {"a": None}
def test_on_load_checkpoint_missing_callbacks(tmpdir):
"""Test a warning appears when callbacks in the checkpoint don't match callbacks provided when resuming."""
model = BoringModel()
chk = ModelCheckpoint(dirpath=tmpdir, save_last=True)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, callbacks=[chk, CustomCallbackOnLoadCheckpoint()])
trainer.fit(model)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=5, resume_from_checkpoint=chk.last_model_path)
with pytest.warns(UserWarning, match="CustomCallbackOnLoadCheckpoint"):
trainer.fit(model)
def test_module_current_fx_attributes_reset(tmpdir):
"""Ensure that lightning module's attributes related to current fx are reset at the end of execution."""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=1, enable_checkpointing=False, logger=False)
trainer.fit(model)
assert model._current_fx_name is None
assert model._current_dataloader_idx is None
trainer.test(model)
assert model._current_fx_name is None
assert model._current_dataloader_idx is None
def test_exception_when_lightning_module_is_not_set_on_trainer():
trainer = Trainer()
with pytest.raises(MisconfigurationException, match=r"`model` must be provided.*validate"):
trainer.validate()
with pytest.raises(MisconfigurationException, match=r"`model` must be provided.*test"):
trainer.test()
with pytest.raises(MisconfigurationException, match=r"`model` must be provided.*predict"):
trainer.predict()
class CustomException(Exception):
pass
@RunIf(min_gpus=2, special=True)
def test_ddp_terminate_when_deadlock_is_detected(tmpdir):
"""Test that DDP kills the remaining processes when only one rank is throwing an exception."""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
if batch_idx == 1 and self.trainer.is_global_zero:
# rank 0: raises an exception
# rank 1: continues training but will hang on the next barrier in the training loop
raise CustomException
return super().training_step(batch, batch_idx)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=1, limit_train_batches=5, num_sanity_val_steps=0, gpus=2, accelerator="ddp"
)
# simulate random failure in training_step on rank 0
with pytest.raises(DeadlockDetectedException, match="CustomException"):
trainer.fit(model)
@RunIf(min_gpus=1)
def test_multiple_trainer_constant_memory_allocated(tmpdir):
"""This tests ensures calling the trainer several times reset the memory back to 0."""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
loss = super().training_step(batch, batch_idx)
self.log("train_loss", loss["loss"])
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.layer.parameters(), lr=0.1)
class Check(Callback):
def on_epoch_start(self, trainer, *_):
assert isinstance(trainer.training_type_plugin.model, DistributedDataParallel)
def current_memory():
# before measuring the memory force release any leftover allocations, including CUDA tensors
gc.collect()
return torch.cuda.memory_allocated(0)
initial = current_memory()
model = TestModel()
trainer_kwargs = dict(
default_root_dir=tmpdir,
fast_dev_run=True,
gpus=1,
accelerator="ddp",
enable_progress_bar=False,
callbacks=Check(),
)
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.training_type_plugin.model is model
assert list(trainer.optimizers[0].state.values())[0]["exp_avg_sq"].device == torch.device("cpu")
assert trainer.callback_metrics["train_loss"].device == torch.device("cpu")
assert current_memory() <= initial
deepcopy(trainer)
assert current_memory() <= initial
trainer_2 = Trainer(**trainer_kwargs)
trainer_2.fit(model)
assert current_memory() <= initial
class TrainerStagesErrorsModel(BoringModel):
def on_train_start(self) -> None:
raise Exception("Error during train")
def on_validation_start(self) -> None:
raise Exception("Error during validation")
def on_test_start(self) -> None:
raise Exception("Error during test")
def on_predict_start(self) -> None:
raise Exception("Error during predict")
@pytest.mark.parametrize(
"accelerator,num_processes",
[
(None, 1),
pytest.param("ddp_cpu", 2, marks=RunIf(skip_windows=True)),
],
)
def test_error_handling_all_stages(tmpdir, accelerator, num_processes):
model = TrainerStagesErrorsModel()
trainer = Trainer(default_root_dir=tmpdir, accelerator=accelerator, num_processes=num_processes, fast_dev_run=True)
with pytest.raises(Exception, match=r"Error during train"), patch(
"pytorch_lightning.Trainer._on_exception"
) as exception_hook:
trainer.fit(model)
exception_hook.assert_called()
with pytest.raises(Exception, match=r"Error during validation"), patch(
"pytorch_lightning.Trainer._on_exception"
) as exception_hook:
trainer.validate(model)
exception_hook.assert_called()
with pytest.raises(Exception, match=r"Error during test"), patch(
"pytorch_lightning.Trainer._on_exception"
) as exception_hook:
trainer.test(model)
exception_hook.assert_called()
with pytest.raises(Exception, match=r"Error during predict"), patch(
"pytorch_lightning.Trainer._on_exception"
) as exception_hook:
trainer.predict(model, model.val_dataloader(), return_predictions=False)
exception_hook.assert_called()
def test_trainer_metrics_reset_before_each_task(tmpdir):
"""Test that callback, logged and progress bar metrics are reset before each task starts."""
class TestMetricRestartCallback(Callback):
def _make_assertions(self, trainer):
assert trainer.callback_metrics == {}
assert trainer.progress_bar_metrics == {}
assert trainer.logged_metrics == {}
def on_train_start(self, trainer, *args, **kwargs):
self._make_assertions(trainer)
def on_validation_start(self, trainer, *args, **kwargs):
if trainer.state.fn == TrainerFn.VALIDATING:
self._make_assertions(trainer)
def on_test_start(self, trainer, *args, **kwargs):
self._make_assertions(trainer)
def on_predict_start(self, trainer, *args, **kwargs):
self._make_assertions(trainer)
class CustomBoringModel(BoringModel):
def __init__(self):
super().__init__()
def training_step(self, *args, **kwargs):
self.log("train/metric", 7.0)
return super().training_step(*args, **kwargs)
def validation_step(self, *args, **kwargs):
self.log("val/metric", 14.0)
return super().validation_step(*args, **kwargs)
def test_step(self, *args, **kwargs):
self.log("test/metric", 21.0)
return super().test_step(*args, **kwargs)
model = CustomBoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=4, callbacks=[TestMetricRestartCallback()])
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
def test_detect_anomaly_nan(tmpdir):
class NanModel(BoringModel):
def training_step(self, batch, batch_idx):
output = super().training_step(batch, batch_idx)
output["loss"] = output["loss"] * torch.tensor(float("nan"))
return output
model = NanModel()
trainer = Trainer(default_root_dir=tmpdir, detect_anomaly=True)
with pytest.raises(RuntimeError, match=r"returned nan values in its 0th output."):
with pytest.warns(
UserWarning, match=r".*Error detected in.* Traceback of forward call that caused the error.*"
):
trainer.fit(model)
@pytest.mark.parametrize(
"trainer_kwargs,expected",
[
(
dict(strategy=None, gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(strategy="dp", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(strategy="ddp", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(strategy="ddp", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(strategy="ddp", num_nodes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(strategy="ddp2", gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(strategy=None, gpus=1),
dict(_distrib_type=None, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(strategy="dp", gpus=1),
dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(strategy="ddp", gpus=1),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(strategy="ddp_spawn", gpus=1),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(strategy="ddp2", gpus=1),
dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=1, num_processes=1),
),
(
dict(strategy=None, gpus=2),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2),
),
(
dict(strategy="dp", gpus=2),
dict(_distrib_type=DistributedType.DP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1),
),
(
dict(strategy="ddp", gpus=2),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.GPU, num_gpus=2, num_processes=2),
),
(
dict(strategy="ddp2", gpus=2),
dict(_distrib_type=DistributedType.DDP2, _device_type=DeviceType.GPU, num_gpus=2, num_processes=1),
),
(
dict(strategy="ddp2", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(strategy="dp", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(strategy="ddp_spawn", num_processes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(strategy="ddp_spawn", num_processes=1, gpus=None),
dict(_distrib_type=None, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(strategy="ddp_cpu", num_processes=1, num_nodes=1, gpus=None),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(strategy="ddp_cpu", num_processes=2, num_nodes=1, gpus=None),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
(
dict(strategy="ddp_cpu", num_processes=1, num_nodes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP, _device_type=DeviceType.CPU, num_gpus=0, num_processes=1),
),
(
dict(strategy="ddp_cpu", num_processes=2, num_nodes=2, gpus=None),
dict(_distrib_type=DistributedType.DDP_SPAWN, _device_type=DeviceType.CPU, num_gpus=0, num_processes=2),
),
],
)
def test_trainer_config_strategy(trainer_kwargs, expected, monkeypatch):
if trainer_kwargs["gpus"] is not None:
monkeypatch.setattr(torch.cuda, "is_available", lambda: True)
monkeypatch.setattr(torch.cuda, "device_count", lambda: trainer_kwargs["gpus"])
trainer = Trainer(**trainer_kwargs)
assert len(expected) == 4
for k, v in expected.items():
assert getattr(trainer, k) == v, f"Failed {k}: {v}"
| 37.477148
| 120
| 0.691293
|
014add1f87871527bcbcfc051efd1917f820f5df
| 32,903
|
py
|
Python
|
salt/cloud/clouds/proxmox.py
|
nielsk/salt
|
be5d400d903e68d99c216fd63a7146d86a64a55d
|
[
"Apache-2.0"
] | 1
|
2022-02-09T06:40:14.000Z
|
2022-02-09T06:40:14.000Z
|
salt/cloud/clouds/proxmox.py
|
nielsk/salt
|
be5d400d903e68d99c216fd63a7146d86a64a55d
|
[
"Apache-2.0"
] | null | null | null |
salt/cloud/clouds/proxmox.py
|
nielsk/salt
|
be5d400d903e68d99c216fd63a7146d86a64a55d
|
[
"Apache-2.0"
] | 4
|
2020-11-04T06:28:05.000Z
|
2022-02-09T10:54:49.000Z
|
# -*- coding: utf-8 -*-
'''
Proxmox Cloud Module
======================
.. versionadded:: 2014.7.0
The Proxmox cloud module is used to control access to cloud providers using
the Proxmox system (KVM / OpenVZ / LXC).
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/proxmox.conf``:
.. code-block:: yaml
my-proxmox-config:
# Proxmox account information
user: myuser@pam or myuser@pve
password: mypassword
url: hypervisor.domain.tld
driver: proxmox
verify_ssl: True
:maintainer: Frank Klaassen <frank@cloudright.nl>
:depends: requests >= 2.2.1
:depends: IPy >= 0.81
'''
# Import python libs
from __future__ import absolute_import
import time
import pprint
import logging
import re
import json
# Import salt libs
import salt.ext.six as six
import salt.utils
# Import salt cloud libs
import salt.utils.cloud
import salt.config as config
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout
)
from salt.ext.six.moves import range
# Import Third Party Libs
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
from IPy import IP
HAS_IPY = True
except ImportError:
HAS_IPY = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'proxmox'
def __virtual__():
'''
Check for PROXMOX configurations
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('user',)
)
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
deps = {
'requests': HAS_REQUESTS,
'IPy': HAS_IPY
}
return config.check_driver_dependencies(
__virtualname__,
deps
)
url = None
ticket = None
csrf = None
verify_ssl = None
api = None
def _authenticate():
'''
Retrieve CSRF and API tickets for the Proxmox API
'''
global url, ticket, csrf, verify_ssl
url = config.get_cloud_config_value(
'url', get_configured_provider(), __opts__, search_global=False
)
username = config.get_cloud_config_value(
'user', get_configured_provider(), __opts__, search_global=False
),
passwd = config.get_cloud_config_value(
'password', get_configured_provider(), __opts__, search_global=False
)
verify_ssl = config.get_cloud_config_value(
'verify_ssl', get_configured_provider(), __opts__,
default=True, search_global=False
)
connect_data = {'username': username, 'password': passwd}
full_url = 'https://{0}:8006/api2/json/access/ticket'.format(url)
returned_data = requests.post(
full_url, verify=verify_ssl, data=connect_data).json()
ticket = {'PVEAuthCookie': returned_data['data']['ticket']}
csrf = str(returned_data['data']['CSRFPreventionToken'])
def query(conn_type, option, post_data=None):
'''
Execute the HTTP request to the API
'''
if ticket is None or csrf is None or url is None:
log.debug('Not authenticated yet, doing that now..')
_authenticate()
full_url = 'https://{0}:8006/api2/json/{1}'.format(url, option)
log.debug('{0}: {1} ({2})'.format(conn_type, full_url, post_data))
httpheaders = {'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'salt-cloud-proxmox'}
if conn_type == 'post':
httpheaders['CSRFPreventionToken'] = csrf
response = requests.post(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == 'put':
httpheaders['CSRFPreventionToken'] = csrf
response = requests.put(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == 'delete':
httpheaders['CSRFPreventionToken'] = csrf
response = requests.delete(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == 'get':
response = requests.get(full_url, verify=verify_ssl,
cookies=ticket)
response.raise_for_status()
try:
returned_data = response.json()
if 'data' not in returned_data:
raise SaltCloudExecutionFailure
return returned_data['data']
except Exception:
log.error('Error in trying to process JSON')
log.error(response)
def _get_vm_by_name(name, allDetails=False):
'''
Since Proxmox works based op id's rather than names as identifiers this
requires some filtering to retrieve the required information.
'''
vms = get_resources_vms(includeConfig=allDetails)
if name in vms:
return vms[name]
log.info('VM with name "{0}" could not be found.'.format(name))
return False
def _get_vm_by_id(vmid, allDetails=False):
'''
Retrieve a VM based on the ID.
'''
for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)):
if str(vm_details['vmid']) == str(vmid):
return vm_details
log.info('VM with ID "{0}" could not be found.'.format(vmid))
return False
def _get_next_vmid():
'''
Proxmox allows the use of alternative ids instead of autoincrementing.
Because of that its required to query what the first available ID is.
'''
return int(query('get', 'cluster/nextid'))
def _check_ip_available(ip_addr):
'''
Proxmox VMs refuse to start when the IP is already being used.
This function can be used to prevent VMs being created with duplicate
IP's or to generate a warning.
'''
for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)):
vm_config = vm_details['config']
if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr:
log.debug('IP "{0}" is already defined'.format(ip_addr))
return False
log.debug('IP \'{0}\' is available to be defined'.format(ip_addr))
return True
def _parse_proxmox_upid(node, vm_=None):
'''
Upon requesting a task that runs for a longer period of time a UPID is given.
This includes information about the job and can be used to lookup information in the log.
'''
ret = {}
upid = node
# Parse node response
node = node.split(':')
if node[0] == 'UPID':
ret['node'] = str(node[1])
ret['pid'] = str(node[2])
ret['pstart'] = str(node[3])
ret['starttime'] = str(node[4])
ret['type'] = str(node[5])
ret['vmid'] = str(node[6])
ret['user'] = str(node[7])
# include the upid again in case we'll need it again
ret['upid'] = str(upid)
if vm_ is not None and 'technology' in vm_:
ret['technology'] = str(vm_['technology'])
return ret
def _lookup_proxmox_task(upid):
'''
Retrieve the (latest) logs and retrieve the status for a UPID.
This can be used to verify whether a task has completed.
'''
log.debug('Getting creation status for upid: {0}'.format(upid))
tasks = query('get', 'cluster/tasks')
if tasks:
for task in tasks:
if task['upid'] == upid:
log.debug('Found upid task: {0}'.format(task))
return task
return False
def get_resources_nodes(call=None, resFilter=None):
'''
Retrieve all hypervisors (nodes) available on this environment
CLI Example:
.. code-block:: bash
salt-cloud -f get_resources_nodes my-proxmox-config
'''
log.debug('Getting resource: nodes.. (filter: {0})'.format(resFilter))
resources = query('get', 'cluster/resources')
ret = {}
for resource in resources:
if 'type' in resource and resource['type'] == 'node':
name = resource['node']
ret[name] = resource
if resFilter is not None:
log.debug('Filter given: {0}, returning requested '
'resource: nodes'.format(resFilter))
return ret[resFilter]
log.debug('Filter not given: {0}, returning all resource: nodes'.format(ret))
return ret
def get_resources_vms(call=None, resFilter=None, includeConfig=True):
'''
Retrieve all VMs available on this environment
CLI Example:
.. code-block:: bash
salt-cloud -f get_resources_vms my-proxmox-config
'''
log.debug('Getting resource: vms.. (filter: {0})'.format(resFilter))
resources = query('get', 'cluster/resources')
ret = {}
for resource in resources:
if 'type' in resource and resource['type'] in ['openvz', 'qemu', 'lxc']:
name = resource['name']
ret[name] = resource
if includeConfig:
# Requested to include the detailed configuration of a VM
ret[name]['config'] = get_vmconfig(
ret[name]['vmid'],
ret[name]['node'],
ret[name]['type']
)
if resFilter is not None:
log.debug('Filter given: {0}, returning requested '
'resource: nodes'.format(resFilter))
return ret[resFilter]
log.debug('Filter not given: {0}, returning all resource: nodes'.format(ret))
return ret
def script(vm_):
'''
Return the script deployment object
'''
script_name = config.get_cloud_config_value('script', vm_, __opts__)
if not script_name:
script_name = 'bootstrap-salt'
return salt.utils.cloud.os_script(
script_name,
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
def avail_locations(call=None):
'''
Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-proxmox-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
# could also use the get_resources_nodes but speed is ~the same
nodes = query('get', 'nodes')
ret = {}
for node in nodes:
name = node['node']
ret[name] = node
return ret
def avail_images(call=None, location='local'):
'''
Return a list of the images that are on the provider
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-proxmox-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
ret = {}
for host_name, host_details in six.iteritems(avail_locations()):
for item in query('get', 'nodes/{0}/storage/{1}/content'.format(host_name, location)):
ret[item['volid']] = item
return ret
def list_nodes(call=None):
'''
Return a list of the VMs that are managed by the provider
CLI Example:
.. code-block:: bash
salt-cloud -Q my-proxmox-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)):
log.debug('VM_Name: {0}'.format(vm_name))
log.debug('vm_details: {0}'.format(vm_details))
# Limit resultset on what Salt-cloud demands:
ret[vm_name] = {}
ret[vm_name]['id'] = str(vm_details['vmid'])
ret[vm_name]['image'] = str(vm_details['vmid'])
ret[vm_name]['size'] = str(vm_details['disk'])
ret[vm_name]['state'] = str(vm_details['status'])
# Figure out which is which to put it in the right column
private_ips = []
public_ips = []
if 'ip_address' in vm_details['config'] and vm_details['config']['ip_address'] != '-':
ips = vm_details['config']['ip_address'].split(' ')
for ip_ in ips:
if IP(ip_).iptype() == 'PRIVATE':
private_ips.append(str(ip_))
else:
public_ips.append(str(ip_))
ret[vm_name]['private_ips'] = private_ips
ret[vm_name]['public_ips'] = public_ips
return ret
def list_nodes_full(call=None):
'''
Return a list of the VMs that are on the provider
CLI Example:
.. code-block:: bash
salt-cloud -F my-proxmox-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
return get_resources_vms(includeConfig=True)
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields
CLI Example:
.. code-block:: bash
salt-cloud -S my-proxmox-config
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(), __opts__['query.selection'], call,
)
def create(vm_):
'''
Create a single VM from a data dict
CLI Example:
.. code-block:: bash
salt-cloud -p proxmox-ubuntu vmhostname
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'proxmox',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
ret = {}
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM {0}'.format(vm_['name']))
if 'use_dns' in vm_ and 'ip_address' not in vm_:
use_dns = vm_['use_dns']
if use_dns:
from socket import gethostbyname, gaierror
try:
ip_address = gethostbyname(str(vm_['name']))
except gaierror:
log.debug('Resolving of {hostname} failed'.format(hostname=str(vm_['name'])))
else:
vm_['ip_address'] = str(ip_address)
try:
newid = _get_next_vmid()
data = create_node(vm_, newid)
except Exception as exc:
log.error(
'Error creating {0} on PROXMOX\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], str(exc)
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
ret['creation_data'] = data
name = vm_['name'] # hostname which we know
if 'clone' in vm_ and vm_['clone'] is True:
vmid = newid
else:
vmid = data['vmid'] # vmid which we have received
host = data['node'] # host which we have received
nodeType = data['technology'] # VM tech (Qemu / OpenVZ)
# Determine which IP to use in order of preference:
if 'ip_address' in vm_:
ip_address = str(vm_['ip_address'])
elif 'public_ips' in data:
ip_address = str(data['public_ips'][0]) # first IP
elif 'private_ips' in data:
ip_address = str(data['private_ips'][0]) # first IP
else:
raise SaltCloudExecutionFailure # err.. not a good idea i reckon
log.debug('Using IP address {0}'.format(ip_address))
# wait until the vm has been created so we can start it
if not wait_for_created(data['upid'], timeout=300):
return {'Error': 'Unable to create {0}, command timed out'.format(name)}
# VM has been created. Starting..
if not start(name, vmid, call='action'):
log.error('Node {0} ({1}) failed to start!'.format(name, vmid))
raise SaltCloudExecutionFailure
# Wait until the VM has fully started
log.debug('Waiting for state "running" for vm {0} on {1}'.format(vmid, host))
if not wait_for_state(vmid, 'running'):
return {'Error': 'Unable to start {0}, command timed out'.format(name)}
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
ssh_password = config.get_cloud_config_value(
'password', vm_, __opts__,
)
ret['ip_address'] = ip_address
ret['username'] = ssh_username
ret['password'] = ssh_password
vm_['ssh_host'] = ip_address
vm_['password'] = ssh_password
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
# Report success!
log.info('Created Cloud VM \'{0[name]}\''.format(vm_))
log.debug(
'\'{0[name]}\' VM creation details:\n{1}'.format(
vm_, pprint.pformat(data)
)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
)
return ret
def _import_api():
'''
Download https://<url>/pve-docs/api-viewer/apidoc.js
Extract content of pveapi var (json formated)
Load this json content into global variable "api"
'''
global api
full_url = 'https://{0}:8006/pve-docs/api-viewer/apidoc.js'.format(url)
returned_data = requests.get(full_url, verify=verify_ssl)
re_filter = re.compile('(?<=pveapi =)(.*)(?=^;)', re.DOTALL | re.MULTILINE)
api_json = re_filter.findall(returned_data.text)[0]
api = json.loads(api_json)
def _get_properties(path="", method="GET", forced_params=None):
'''
Return the parameter list from api for defined path and HTTP method
'''
if api is None:
_import_api()
sub = api
path_levels = [level for level in path.split('/') if level != '']
search_path = ''
props = []
parameters = set([] if forced_params is None else forced_params)
# Browse all path elements but last
for elem in path_levels[:-1]:
search_path += '/' + elem
# Lookup for a dictionnary with path = "requested path" in list" and return its children
sub = (item for item in sub if item["path"] == search_path).next()['children']
# Get leaf element in path
search_path += '/' + path_levels[-1]
sub = next((item for item in sub if item["path"] == search_path))
try:
# get list of properties for requested method
props = sub['info'][method]['parameters']['properties'].keys()
except KeyError as exc:
log.error('method not found: "{0}"'.format(str(exc)))
except:
raise
for prop in props:
numerical = re.match(r'(\w+)\[n\]', prop)
# generate (arbitrarily) 10 properties for duplicatable properties identified by:
# "prop[n]"
if numerical:
for i in range(10):
parameters.add(numerical.group(1) + str(i))
else:
parameters.add(prop)
return parameters
def create_node(vm_, newid):
'''
Build and submit the requestdata to create a new node
'''
newnode = {}
if 'technology' not in vm_:
vm_['technology'] = 'openvz' # default virt tech if none is given
if vm_['technology'] not in ['qemu', 'openvz', 'lxc']:
# Wrong VM type given
log.error('Wrong VM type. Valid options are: qemu, openvz (proxmox3) or lxc (proxmox4)')
raise SaltCloudExecutionFailure
if 'host' not in vm_:
# Use globally configured/default location
vm_['host'] = config.get_cloud_config_value(
'default_host', get_configured_provider(), __opts__, search_global=False
)
if vm_['host'] is None:
# No location given for the profile
log.error('No host given to create this VM on')
raise SaltCloudExecutionFailure
# Required by both OpenVZ and Qemu (KVM)
vmhost = vm_['host']
newnode['vmid'] = newid
for prop in 'cpuunits', 'description', 'memory', 'onboot':
if prop in vm_: # if the property is set, use it for the VM request
newnode[prop] = vm_[prop]
if vm_['technology'] == 'openvz':
# OpenVZ related settings, using non-default names:
newnode['hostname'] = vm_['name']
newnode['ostemplate'] = vm_['image']
# optional VZ settings
for prop in 'cpus', 'disk', 'ip_address', 'nameserver', 'password', 'swap', 'poolid', 'storage':
if prop in vm_: # if the property is set, use it for the VM request
newnode[prop] = vm_[prop]
elif vm_['technology'] == 'lxc':
# LXC related settings, using non-default names:
newnode['hostname'] = vm_['name']
newnode['ostemplate'] = vm_['image']
static_props = ('cpuunits', 'description', 'memory', 'onboot', 'net0',
'password', 'nameserver', 'swap', 'storage', 'rootfs')
for prop in _get_properties('/nodes/{node}/lxc',
'POST',
static_props):
if prop in vm_: # if the property is set, use it for the VM request
newnode[prop] = vm_[prop]
# inform user the "disk" option is not supported for LXC hosts
if 'disk' in vm_:
log.warning('The "disk" option is not supported for LXC hosts and was ignored')
# LXC specific network config
# OpenVZ allowed specifying IP and gateway. To ease migration from
# Proxmox 3, I've mapped the ip_address and gw to a generic net0 config.
# If you need more control, please use the net0 option directly.
# This also assumes a /24 subnet.
if 'ip_address' in vm_ and 'net0' not in vm_:
newnode['net0'] = 'bridge=vmbr0,ip=' + vm_['ip_address'] + '/24,name=eth0,type=veth'
# gateway is optional and does not assume a default
if 'gw' in vm_:
newnode['net0'] = newnode['net0'] + ',gw=' + vm_['gw']
elif vm_['technology'] == 'qemu':
# optional Qemu settings
static_props = ('acpi', 'cores', 'cpu', 'pool', 'storage', 'sata0', 'ostype', 'ide2', 'net0')
for prop in _get_properties('/nodes/{node}/qemu',
'POST',
static_props):
if prop in vm_: # if the property is set, use it for the VM request
newnode[prop] = vm_[prop]
# The node is ready. Lets request it to be added
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', newnode, list(newnode)),
},
sock_dir=__opts__['sock_dir'],
)
log.debug('Preparing to generate a node using these parameters: {0} '.format(
newnode))
if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu':
postParams = {}
postParams['newid'] = newnode['vmid']
for prop in 'description', 'format', 'full', 'name':
if 'clone_' + prop in vm_: # if the property is set, use it for the VM request
postParams[prop] = vm_['clone_' + prop]
node = query('post', 'nodes/{0}/qemu/{1}/clone'.format(vmhost, vm_['clone_from']), postParams)
else:
node = query('post', 'nodes/{0}/{1}'.format(vmhost, vm_['technology']), newnode)
return _parse_proxmox_upid(node, vm_)
def show_instance(name, call=None):
'''
Show the details from Proxmox concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
nodes = list_nodes_full()
__utils__['cloud.cache_node'](nodes[name], __active_provider_name__, __opts__)
return nodes[name]
def get_vmconfig(vmid, node=None, node_type='openvz'):
'''
Get VM configuration
'''
if node is None:
# We need to figure out which node this VM is on.
for host_name, host_details in six.iteritems(avail_locations()):
for item in query('get', 'nodes/{0}/{1}'.format(host_name, node_type)):
if item['vmid'] == vmid:
node = host_name
# If we reached this point, we have all the information we need
data = query('get', 'nodes/{0}/{1}/{2}/config'.format(node, node_type, vmid))
return data
def wait_for_created(upid, timeout=300):
'''
Wait until a the vm has been created successfully
'''
start_time = time.time()
info = _lookup_proxmox_task(upid)
if not info:
log.error('wait_for_created: No task information '
'retrieved based on given criteria.')
raise SaltCloudExecutionFailure
while True:
if 'status' in info and info['status'] == 'OK':
log.debug('Host has been created!')
return True
time.sleep(3) # Little more patience, we're not in a hurry
if time.time() - start_time > timeout:
log.debug('Timeout reached while waiting for host to be created')
return False
info = _lookup_proxmox_task(upid)
def wait_for_state(vmid, state, timeout=300):
'''
Wait until a specific state has been reached on a node
'''
start_time = time.time()
node = get_vm_status(vmid=vmid)
if not node:
log.error('wait_for_state: No VM retrieved based on given criteria.')
raise SaltCloudExecutionFailure
while True:
if node['status'] == state:
log.debug('Host {0} is now in "{1}" state!'.format(
node['name'], state
))
return True
time.sleep(1)
if time.time() - start_time > timeout:
log.debug('Timeout reached while waiting for {0} to '
'become {1}'.format(node['name'], state))
return False
node = get_vm_status(vmid=vmid)
log.debug('State for {0} is: "{1}" instead of "{2}"'.format(
node['name'], node['status'], state))
def destroy(name, call=None):
'''
Destroy a node.
CLI Example:
.. code-block:: bash
salt-cloud --destroy mymachine
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
vmobj = _get_vm_by_name(name)
if vmobj is not None:
# stop the vm
if get_vm_status(vmid=vmobj['vmid'])['status'] != 'stopped':
stop(name, vmobj['vmid'], 'action')
# wait until stopped
if not wait_for_state(vmobj['vmid'], 'stopped'):
return {'Error': 'Unable to stop {0}, command timed out'.format(name)}
# required to wait a bit here, otherwise the VM is sometimes
# still locked and destroy fails.
time.sleep(1)
query('delete', 'nodes/{0}/{1}'.format(
vmobj['node'], vmobj['id']
))
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return {'Destroyed': '{0} was destroyed.'.format(name)}
def set_vm_status(status, name=None, vmid=None):
'''
Convenience function for setting VM status
'''
log.debug('Set status to {0} for {1} ({2})'.format(status, name, vmid))
if vmid is not None:
log.debug('set_vm_status: via ID - VMID {0} ({1}): {2}'.format(
vmid, name, status))
vmobj = _get_vm_by_id(vmid)
else:
log.debug('set_vm_status: via name - VMID {0} ({1}): {2}'.format(
vmid, name, status))
vmobj = _get_vm_by_name(name)
if not vmobj or 'node' not in vmobj or 'type' not in vmobj or 'vmid' not in vmobj:
log.error('Unable to set status {0} for {1} ({2})'.format(
status, name, vmid))
raise SaltCloudExecutionTimeout
log.debug("VM_STATUS: Has desired info ({0}). Setting status..".format(vmobj))
data = query('post', 'nodes/{0}/{1}/{2}/status/{3}'.format(
vmobj['node'], vmobj['type'], vmobj['vmid'], status))
result = _parse_proxmox_upid(data, vmobj)
if result is not False and result is not None:
log.debug('Set_vm_status action result: {0}'.format(result))
return True
return False
def get_vm_status(vmid=None, name=None):
'''
Get the status for a VM, either via the ID or the hostname
'''
if vmid is not None:
log.debug('get_vm_status: VMID {0}'.format(vmid))
vmobj = _get_vm_by_id(vmid)
elif name is not None:
log.debug('get_vm_status: name {0}'.format(name))
vmobj = _get_vm_by_name(name)
else:
log.debug("get_vm_status: No ID or NAME given")
raise SaltCloudExecutionFailure
log.debug('VM found: {0}'.format(vmobj))
if vmobj is not None and 'node' in vmobj:
log.debug("VM_STATUS: Has desired info. Retrieving.. ({0})".format(
vmobj['name']))
data = query('get', 'nodes/{0}/{1}/{2}/status/current'.format(
vmobj['node'], vmobj['type'], vmobj['vmid']))
return data
log.error('VM or requested status not found..')
return False
def start(name, vmid=None, call=None):
'''
Start a node.
CLI Example:
.. code-block:: bash
salt-cloud -a start mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The start action must be called with -a or --action.'
)
log.debug('Start: {0} ({1}) = Start'.format(name, vmid))
if not set_vm_status('start', name, vmid=vmid):
log.error('Unable to bring VM {0} ({1}) up..'.format(name, vmid))
raise SaltCloudExecutionFailure
# xxx: TBD: Check here whether the status was actually changed to 'started'
return {'Started': '{0} was started.'.format(name)}
def stop(name, vmid=None, call=None):
'''
Stop a node ("pulling the plug").
CLI Example:
.. code-block:: bash
salt-cloud -a stop mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
if not set_vm_status('stop', name, vmid=vmid):
log.error('Unable to bring VM {0} ({1}) down..'.format(name, vmid))
raise SaltCloudExecutionFailure
# xxx: TBD: Check here whether the status was actually changed to 'stopped'
return {'Stopped': '{0} was stopped.'.format(name)}
def shutdown(name=None, vmid=None, call=None):
'''
Shutdown a node via ACPI.
CLI Example:
.. code-block:: bash
salt-cloud -a shutdown mymachine
'''
if call != 'action':
raise SaltCloudSystemExit(
'The shutdown action must be called with -a or --action.'
)
if not set_vm_status('shutdown', name, vmid=vmid):
log.error('Unable to shut VM {0} ({1}) down..'.format(name, vmid))
raise SaltCloudExecutionFailure
# xxx: TBD: Check here whether the status was actually changed to 'stopped'
return {'Shutdown': '{0} was shutdown.'.format(name)}
| 31.276616
| 109
| 0.595873
|
74b8e14774d13fd4425f022840e79457afdbfa4f
| 27,389
|
py
|
Python
|
datmo/core/controller/file/driver/tests/test_local.py
|
awesome-archive/datmo
|
72ea51c28a9947e24a464395bb0136b39eb6001a
|
[
"Apache-2.0"
] | 331
|
2018-03-30T14:33:59.000Z
|
2022-01-10T19:43:32.000Z
|
datmo/core/controller/file/driver/tests/test_local.py
|
KIMS-Github/datmo
|
a456d196006b67ce56af96cb4900682eab747bef
|
[
"MIT"
] | 274
|
2018-04-08T17:12:44.000Z
|
2020-07-29T02:45:22.000Z
|
datmo/core/controller/file/driver/tests/test_local.py
|
KIMS-Github/datmo
|
a456d196006b67ce56af96cb4900682eab747bef
|
[
"MIT"
] | 28
|
2018-05-03T21:57:22.000Z
|
2020-12-31T04:18:42.000Z
|
"""
Tests for local.py
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import shutil
import tempfile
import platform
from io import TextIOWrapper
try:
def to_bytes(val):
return bytes(val)
to_bytes("test")
except TypeError:
def to_bytes(val):
return bytes(val, "utf-8")
to_bytes("test")
from datmo.core.util.misc_functions import get_datmo_temp_path
from datmo.core.controller.file.driver.local import LocalFileDriver
from datmo.core.util.exceptions import PathDoesNotExist
from datmo.config import Config
class TestLocalFileDriver():
# TODO: Add more cases for each test
"""
Checks all functions of the LocalFileDriver
"""
def setup_method(self):
# provide mountable tmp directory for docker
tempfile.tempdir = "/tmp" if not platform.system(
) == "Windows" else None
test_datmo_dir = os.environ.get('TEST_DATMO_DIR',
tempfile.gettempdir())
self.temp_dir = tempfile.mkdtemp(dir=test_datmo_dir)
Config().set_home(self.temp_dir)
self.local_file_driver = LocalFileDriver(
root=self.temp_dir, datmo_directory_name=".datmo")
def teardown_method(self):
pass
def test_initialize(self):
assert self.local_file_driver != None
# Static Method Tests
def test_get_safe_dst_filepath(self):
# Create first file to copy
relative_filepath = "test.json"
self.local_file_driver.create(relative_filepath)
# Create destination directory
relative_dirpath = "dest"
self.local_file_driver.create(relative_dirpath, directory=True)
# Create file within destination directory
relative_filename_2 = os.path.join(relative_dirpath, "test.json")
self.local_file_driver.create(relative_filename_2)
filepath = os.path.join(self.local_file_driver.root, relative_filepath)
dirpath = os.path.join(self.local_file_driver.root, relative_dirpath)
result = self.local_file_driver.\
get_safe_dst_filepath(filepath, dirpath)
assert result == os.path.join(dirpath, "test_0.json")
def test_copytree(self):
# Create source directory
relative_src_dirpath = "src"
self.local_file_driver.create(relative_src_dirpath, directory=True)
relative_src_filepath = os.path.join(relative_src_dirpath, "test.json")
self.local_file_driver.create(relative_src_filepath)
# Create destination directory
relative_dst_dirpath = "dst"
self.local_file_driver.create(relative_dst_dirpath, directory=True)
# Copy source directory to destination
src_dirpath = os.path.join(self.local_file_driver.root,
relative_src_dirpath)
src_dishash = self.local_file_driver.get_dirhash(src_dirpath)
assert src_dishash == "74be16979710d4c4e7c6647856088456"
dst_dirpath = os.path.join(self.local_file_driver.root,
relative_dst_dirpath)
self.local_file_driver.copytree(src_dirpath, dst_dirpath)
dst_dirhash = self.local_file_driver.get_dirhash(dst_dirpath)
assert dst_dirhash == "74be16979710d4c4e7c6647856088456"
dst_filepath = os.path.join(dst_dirpath, "test.json")
assert os.path.isdir(os.path.join(dst_dirpath)) and \
os.path.isfile(dst_filepath) == True
def test_copyfile(self):
# Create first file to copy
relative_filepath = "test.json"
self.local_file_driver.create(relative_filepath)
# Create destination directory
relative_dst_dirpath = "dest"
self.local_file_driver.create(relative_dst_dirpath, directory=True)
# Copy file to destination
filepath = os.path.join(self.local_file_driver.root, relative_filepath)
dst_dirpath = os.path.join(self.local_file_driver.root,
relative_dst_dirpath)
self.local_file_driver.copyfile(filepath, dst_dirpath)
assert os.path.isfile(os.path.join(dst_dirpath, relative_filepath))
# Property Method Test
def test_is_initialized(self):
self.local_file_driver.init()
assert self.local_file_driver.is_initialized == True
# Instance Method Tests
def test_init(self):
result = self.local_file_driver.init()
assert result and \
self.local_file_driver.is_initialized
def test_create(self):
temp_relative_filepath = "test.json"
temp_filepath = self.local_file_driver.create(temp_relative_filepath)
assert os.path.isfile(temp_filepath) == True
def test_exists(self):
temp_relative_filepath = "test.json"
result = self.local_file_driver.exists(temp_relative_filepath)
assert result == False
self.local_file_driver.create(temp_relative_filepath)
result = self.local_file_driver.exists(temp_relative_filepath)
assert result == True
def test_get(self):
# Test failure
temp_relative_filepath = "test.json"
failed = False
try:
self.local_file_driver.get(temp_relative_filepath)
except PathDoesNotExist:
failed = True
assert failed
# Test success with default mode
self.local_file_driver.create(temp_relative_filepath)
result = self.local_file_driver.get(temp_relative_filepath)
assert isinstance(result, TextIOWrapper)
# Test success with default mode and directory=True
# Create test directories to move
self.local_file_driver.create("dirpath1", directory=True)
self.local_file_driver.create(os.path.join("dirpath1", "filepath1"))
# Absolute file paths after added to collection (to test)
filepath1 = os.path.join(self.local_file_driver.root, "dirpath1",
"filepath1")
result = self.local_file_driver.get(
os.path.join("dirpath1"), directory=True)
assert len(result) == 1
assert isinstance(result[0], TextIOWrapper) and \
result[0].name == filepath1
def test_ensure(self):
temp_relative_filepath = "test.json"
self.local_file_driver.ensure(temp_relative_filepath)
assert os.path.isfile(
os.path.join(self.local_file_driver.root,
temp_relative_filepath)) == True
def test_delete(self):
temp_relative_filepath = "test.json"
self.local_file_driver.create(temp_relative_filepath)
filepath = os.path.join(self.local_file_driver.root,
temp_relative_filepath)
assert os.path.exists(filepath) == True
self.local_file_driver.delete(temp_relative_filepath)
assert os.path.exists(filepath) == False
# Hidden .datmo directory tests
def test_create_hidden_datmo_dir(self):
result = self.local_file_driver.create_hidden_datmo_dir()
assert result == True and \
os.path.isdir(self.local_file_driver.datmo_directory)
def test_exists_hidden_datmo_dir(self):
result = self.local_file_driver.exists_hidden_datmo_dir()
assert result == False
self.local_file_driver.create_hidden_datmo_dir()
result = self.local_file_driver.exists_hidden_datmo_dir()
assert result == True
def test_ensure_hidden_datmo_dir(self):
result = self.local_file_driver.ensure_hidden_datmo_dir()
assert result == True and \
os.path.isdir(self.local_file_driver.datmo_directory)
def test_delete_hidden_datmo_dir(self):
self.local_file_driver.create_hidden_datmo_dir()
result = self.local_file_driver.delete_hidden_datmo_dir()
assert result == True and \
not os.path.isdir(self.local_file_driver.datmo_directory)
# Template tests
# TODO : Add tests for code that handles various project templates
# Files directory tests
def test_create_files_dir(self):
files_path = os.path.join(self.local_file_driver.datmo_directory,
"files")
thrown = False
try:
self.local_file_driver.create_files_dir()
except Exception:
thrown = True
assert thrown == True and \
not os.path.isdir(files_path)
self.local_file_driver.init()
result = self.local_file_driver.create_files_dir()
assert result == True and \
os.path.isdir(files_path)
def test_exists_files_dir(self):
files_path = os.path.join(self.local_file_driver.datmo_directory,
"files")
result = self.local_file_driver.exists_files_dir()
assert result == False and \
not os.path.isdir(files_path)
self.local_file_driver.init()
self.local_file_driver.create_files_dir()
result = self.local_file_driver.exists_files_dir()
assert result == True and \
os.path.isdir(files_path)
def test_ensure_files_dir(self):
files_path = os.path.join(self.local_file_driver.datmo_directory,
"files")
result = self.local_file_driver.ensure_files_dir()
assert result == True and \
os.path.isdir(files_path)
def test_delete_files_dir(self):
files_path = os.path.join(self.local_file_driver.datmo_directory,
"files")
self.local_file_driver.init()
self.local_file_driver.create_files_dir()
result = self.local_file_driver.delete_files_dir()
assert result == True and \
not os.path.isdir(files_path)
# Collection directory tests
def test_create_collections_dir(self):
collections_path = os.path.join(self.local_file_driver.datmo_directory,
"collections")
thrown = False
try:
self.local_file_driver.create_collections_dir()
except Exception:
thrown = True
assert thrown == True and \
not os.path.isdir(collections_path)
self.local_file_driver.init()
result = self.local_file_driver.create_collections_dir()
assert result == True and \
os.path.isdir(collections_path)
def test_exists_collections_dir(self):
collections_path = os.path.join(self.local_file_driver.datmo_directory,
"collections")
result = self.local_file_driver.exists_collections_dir()
assert result == False and \
not os.path.isdir(collections_path)
self.local_file_driver.init()
self.local_file_driver.create_collections_dir()
result = self.local_file_driver.exists_collections_dir()
assert result == True and \
os.path.isdir(collections_path)
def test_ensure_collections_dir(self):
collections_path = os.path.join(self.local_file_driver.datmo_directory,
"collections")
result = self.local_file_driver.ensure_collections_dir()
assert result == True and \
os.path.isdir(collections_path)
def test_delete_collections_dir(self):
collections_path = os.path.join(self.local_file_driver.datmo_directory,
"collections")
self.local_file_driver.init()
self.local_file_driver.create_collections_dir()
result = self.local_file_driver.delete_collections_dir()
assert result == True and \
not os.path.isdir(collections_path)
# .datmo directory structure tests
def test_create_hidden_datmo_file_structure(self):
result = self.local_file_driver.create_hidden_datmo_file_structure()
assert result == True and \
os.path.isdir(self.local_file_driver.datmo_directory)
def test_exists_hidden_datmo_file_structure(self):
result = self.local_file_driver.exists_hidden_datmo_file_structure()
assert result == False
self.local_file_driver.ensure_hidden_datmo_file_structure()
result = self.local_file_driver.exists_hidden_datmo_file_structure()
assert result == True
def test_ensure_hidden_datmo_file_structure(self):
result = self.local_file_driver.ensure_hidden_datmo_file_structure()
assert result == True and \
os.path.isdir(self.local_file_driver.datmo_directory)
def test_delete_hidden_datmo_file_structure(self):
self.local_file_driver.create_hidden_datmo_file_structure()
result = self.local_file_driver.delete_hidden_datmo_file_structure()
assert result == True and \
not os.path.isdir(self.local_file_driver.datmo_directory)
# Other functions for collections
def test_create_collection(self):
self.local_file_driver.init()
collections_path = os.path.join(self.local_file_driver.datmo_directory,
"collections")
# Test empty file collection already exists
filehash_empty, _, _ = self.local_file_driver. \
create_collection([])
collection_path_empty = os.path.join(collections_path, filehash_empty)
assert os.path.isdir(collection_path_empty)
assert len(os.listdir(collections_path)) == 1
# Test creating another empty file collection (should not fail again)
filehash_empty, _, _ = self.local_file_driver. \
create_collection([])
collection_path_empty = os.path.join(collections_path, filehash_empty)
assert os.path.isdir(collection_path_empty)
assert len(os.listdir(collections_path)) == 1
# Create test directories to move
self.local_file_driver.create("dirpath1", directory=True)
self.local_file_driver.create("dirpath2", directory=True)
self.local_file_driver.create("filepath1")
dirpath1 = os.path.join(self.local_file_driver.root, "dirpath1")
dirpath2 = os.path.join(self.local_file_driver.root, "dirpath2")
filepath1 = os.path.join(self.local_file_driver.root, "filepath1")
filehash, _, _ = self.local_file_driver.\
create_collection([dirpath1, dirpath2, filepath1])
collection_path = os.path.join(collections_path, filehash)
assert os.path.isdir(collection_path)
assert len(os.listdir(collections_path)) == 2
# Run these for all platforms
assert os.path.isdir(os.path.join(collection_path, "dirpath1"))
assert os.path.isdir(os.path.join(collection_path, "dirpath2"))
assert os.path.isfile(os.path.join(collection_path, "filepath1"))
# Only assume success for non-Windows platforms
if not platform.system() == "Windows":
assert (oct(
os.stat(os.path.join(collection_path, "dirpath1")).st_mode &
0o777) == '0o755' or oct(
os.stat(os.path.join(collection_path, "dirpath1")).st_mode
& 0o777) == '0755')
assert (oct(
os.stat(os.path.join(collection_path, "dirpath2")).st_mode &
0o777) == '0o755' or oct(
os.stat(os.path.join(collection_path, "dirpath2")).st_mode
& 0o777) == '0755')
assert (oct(
os.stat(os.path.join(collection_path, "filepath1")).st_mode &
0o777) == '0o755' or oct(
os.stat(os.path.join(collection_path, "filepath1")).st_mode
& 0o777) == '0755')
# TODO: Create test for Windows platform
# else:
# assert (oct(
# os.stat(os.path.join(collection_path, "dirpath1")).st_mode &
# 0o777) == '0o777' or oct(
# os.stat(os.path.join(collection_path, "dirpath1")).st_mode
# & 0o777) == '0777')
# assert (oct(
# os.stat(os.path.join(collection_path, "dirpath2")).st_mode &
# 0o777) == '0o777' or oct(
# os.stat(os.path.join(collection_path, "dirpath2")).st_mode
# & 0o777) == '0777')
# assert (oct(
# os.stat(os.path.join(collection_path, "filepath1")).st_mode &
# 0o777) == '0o777' or oct(
# os.stat(os.path.join(collection_path, "filepath1")).st_mode
# & 0o777) == '0777')
self.local_file_driver.delete_collection(filehash)
def test_calculate_hash_paths_simple(self):
self.local_file_driver.init()
# Create test directories to move
self.local_file_driver.create("dirpath1", directory=True)
self.local_file_driver.create("dirpath2", directory=True)
self.local_file_driver.create("filepath1")
self.local_file_driver.create("filepath2")
dirpath1 = os.path.join(self.local_file_driver.root, "dirpath1")
dirpath2 = os.path.join(self.local_file_driver.root, "dirpath2")
filepath1 = os.path.join(self.local_file_driver.root, "filepath1")
filepath2 = os.path.join(self.local_file_driver.root, "filepath2")
# check with just 1 blank filepath
paths = [filepath1]
temp_dir = get_datmo_temp_path(self.local_file_driver.root)
result = self.local_file_driver.calculate_hash_paths(paths, temp_dir)
assert result == "74be16979710d4c4e7c6647856088456"
shutil.rmtree(temp_dir)
# check with 1 empty directory and 1 blank filepath (empty directories do NOT change hash)
paths = [filepath1, dirpath1]
temp_dir = get_datmo_temp_path(self.local_file_driver.root)
result = self.local_file_driver.calculate_hash_paths(paths, temp_dir)
assert result == "74be16979710d4c4e7c6647856088456"
shutil.rmtree(temp_dir)
# check with 2 empty directories and 1 blank filepath (empty directories do NOT change hash)
paths = [filepath1, dirpath1, dirpath2]
temp_dir = get_datmo_temp_path(self.local_file_driver.root)
result = self.local_file_driver.calculate_hash_paths(paths, temp_dir)
assert result == "74be16979710d4c4e7c6647856088456"
shutil.rmtree(temp_dir)
# check 2 blank filepaths (should be different)
paths = [filepath1, filepath2]
temp_dir = get_datmo_temp_path(self.local_file_driver.root)
result = self.local_file_driver.calculate_hash_paths(paths, temp_dir)
assert result == "020eb29b524d7ba672d9d48bc72db455"
shutil.rmtree(temp_dir)
# check 1 blank filepath with a different name (same because name not factored into hash)
paths = [filepath2]
temp_dir = get_datmo_temp_path(self.local_file_driver.root)
result = self.local_file_driver.calculate_hash_paths(paths, temp_dir)
assert result == "74be16979710d4c4e7c6647856088456"
shutil.rmtree(temp_dir)
def test_calculate_hash_paths_single_line(self):
self.local_file_driver.init()
# Create test directories to move
self.local_file_driver.create("filepath1")
filepath1 = os.path.join(self.local_file_driver.root, "filepath1")
paths = [filepath1]
# Add contents to the file in python and verify hash
temp_dir = get_datmo_temp_path(self.local_file_driver.root)
with open(filepath1, "wb") as f:
f.write(to_bytes("hello\n"))
result = self.local_file_driver.calculate_hash_paths(paths, temp_dir)
shutil.rmtree(temp_dir)
assert result == "57ae7aad8abe2f317e460c92d3ed1178"
def test_calculate_hash_paths_multiple_lines(self):
self.local_file_driver.init()
# Create test directories to move
self.local_file_driver.create("filepath1")
filepath1 = os.path.join(self.local_file_driver.root, "filepath1")
paths = [filepath1]
# Add contents to the file in python and verify hash
temp_dir = get_datmo_temp_path(self.local_file_driver.root)
with open(filepath1, "wb") as f:
f.write(to_bytes("FROM something:something\n"))
f.write(to_bytes("test multiple lines\n"))
result = self.local_file_driver.calculate_hash_paths(paths, temp_dir)
shutil.rmtree(temp_dir)
assert result == "a14de65c0fc13bc50cb246cc518195af"
def test_get_filehash(self):
filepath = os.path.join(self.temp_dir, "test.txt")
with open(filepath, "wb") as f:
f.write(to_bytes("hello\n"))
result = self.local_file_driver.get_filehash(filepath)
assert len(result) == 32
assert result == "b1946ac92492d2347c6235b4d2611184"
def test_get_dirhash(self):
temp_dir_1 = get_datmo_temp_path(self.temp_dir)
filepath = os.path.join(temp_dir_1, "test.txt")
with open(filepath, "wb") as f:
f.write(to_bytes("hello\n"))
result = self.local_file_driver.get_dirhash(temp_dir_1)
assert result == "57ae7aad8abe2f317e460c92d3ed1178"
temp_dir_2 = get_datmo_temp_path(self.temp_dir)
filepath_2 = os.path.join(temp_dir_2, "test.txt")
with open(filepath_2, "wb") as f:
f.write(to_bytes("hello\n"))
result_2 = self.local_file_driver.get_dirhash(temp_dir_2)
assert result == result_2
def test_get_absolute_collection_path(self):
self.local_file_driver.init()
filehash, _, _ = self.local_file_driver. \
create_collection([])
collection_path = os.path.join(self.local_file_driver.datmo_directory,
"collections", filehash)
returned_collection_path = self.local_file_driver.\
get_absolute_collection_path(filehash)
assert returned_collection_path == collection_path
def test_get_relative_collection_path(self):
self.local_file_driver.init()
filehash, _, _ = self.local_file_driver. \
create_collection([])
relative_collection_path = os.path.join(
self.local_file_driver.datmo_directory_name, "collections",
filehash)
returned_relative_collection_path = self.local_file_driver.\
get_relative_collection_path(filehash)
assert returned_relative_collection_path == relative_collection_path
def test_exists_collection(self):
self.local_file_driver.init()
filehash, _, _ = self.local_file_driver.create_collection([])
collection_path = os.path.join(self.local_file_driver.datmo_directory,
"collections", filehash)
result = self.local_file_driver.exists_collection(filehash)
assert result == True and \
os.path.isdir(collection_path)
def test_get_collection_files(self):
self.local_file_driver.init()
# Test empty file collection default mode
filehash_empty, _, _ = self.local_file_driver. \
create_collection([])
result = self.local_file_driver.get_collection_files(filehash_empty)
assert not result
# Create test directories to move
self.local_file_driver.create("dirpath1", directory=True)
self.local_file_driver.create("dirpath2", directory=True)
self.local_file_driver.create(os.path.join("dirpath1", "filepath1"))
self.local_file_driver.create(os.path.join("dirpath2", "filepath2"))
self.local_file_driver.create("filepath3")
# Absolute file paths to add to collection
dirpath1 = os.path.join(self.local_file_driver.root, "dirpath1")
dirpath2 = os.path.join(self.local_file_driver.root, "dirpath2")
filepath3 = os.path.join(self.local_file_driver.root, "filepath3")
filehash, _, _ = self.local_file_driver. \
create_collection([dirpath1, dirpath2, filepath3])
# Absolute file paths after added to collection (to test)
filepath1_after = os.path.join(self.local_file_driver.datmo_directory,
"collections", filehash, "dirpath1",
"filepath1")
filepath2_after = os.path.join(self.local_file_driver.datmo_directory,
"collections", filehash, "dirpath2",
"filepath2")
filepath3_after = os.path.join(self.local_file_driver.datmo_directory,
"collections", filehash, "filepath3")
paths_list = [filepath1_after, filepath2_after, filepath3_after]
result = self.local_file_driver.get_collection_files(filehash)
assert len(result) == 3
assert isinstance(result[0], TextIOWrapper) and \
result[0].name in paths_list
assert isinstance(result[1], TextIOWrapper) and \
result[1].name in paths_list
assert isinstance(result[2], TextIOWrapper) and \
result[2].name in paths_list
def test_delete_collection(self):
self.local_file_driver.init()
filehash, _, _ = self.local_file_driver.create_collection([])
collection_path = os.path.join(self.local_file_driver.datmo_directory,
"collections", filehash)
result = self.local_file_driver.delete_collection(filehash)
assert result == True and \
not os.path.isdir(collection_path)
def test_list_file_collections(self):
self.local_file_driver.init()
filehash_1, _, _ = self.local_file_driver.create_collection([])
self.local_file_driver.create("filepath1")
filepath1 = os.path.join(self.local_file_driver.root, "filepath1")
filehash_2, _, _ = self.local_file_driver.create_collection(
[filepath1])
collection_list = self.local_file_driver.list_file_collections()
assert filehash_1 in collection_list and \
filehash_2 in collection_list
def test_transfer_collection(self):
# Create test directories to move
self.local_file_driver.create("dirpath1", directory=True)
self.local_file_driver.create("dirpath2", directory=True)
self.local_file_driver.create("filepath1")
dirpath1 = os.path.join(self.local_file_driver.root, "dirpath1")
dirpath2 = os.path.join(self.local_file_driver.root, "dirpath2")
filepath1 = os.path.join(self.local_file_driver.root, "filepath1")
self.local_file_driver.init()
filehash, _, _ = self.local_file_driver. \
create_collection([dirpath1, dirpath2, filepath1])
dst_dirpath = os.path.join(self.temp_dir, "new_dir")
self.local_file_driver.create(dst_dirpath, directory=True)
result = self.local_file_driver.transfer_collection(
filehash, dst_dirpath)
assert result == True and \
os.path.isdir(os.path.join(dst_dirpath,
"dirpath1")) and \
os.path.isdir(os.path.join(dst_dirpath,
"dirpath2")) and \
os.path.isfile(os.path.join(dst_dirpath,
"filepath1"))
| 44.033762
| 100
| 0.654934
|
20cdfb4b265d6680a5f650353db3eef5eacae3c6
| 1,077
|
py
|
Python
|
dogia/server/reconnect_task.py
|
dogia-coin/dogia-blockchain
|
16ea0a09777354905150c216e1fab60141296473
|
[
"Apache-2.0"
] | null | null | null |
dogia/server/reconnect_task.py
|
dogia-coin/dogia-blockchain
|
16ea0a09777354905150c216e1fab60141296473
|
[
"Apache-2.0"
] | null | null | null |
dogia/server/reconnect_task.py
|
dogia-coin/dogia-blockchain
|
16ea0a09777354905150c216e1fab60141296473
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import socket
from dogia.server.server import DogiaServer
from dogia.types.peer_info import PeerInfo
def start_reconnect_task(server: DogiaServer, peer_info_arg: PeerInfo, log, auth: bool):
"""
Start a background task that checks connection and reconnects periodically to a peer.
"""
peer_info = PeerInfo(socket.gethostbyname(peer_info_arg.host), peer_info_arg.port)
async def connection_check():
while True:
peer_retry = True
for _, connection in server.all_connections.items():
if connection.get_peer_info() == peer_info or connection.get_peer_info() == peer_info_arg:
peer_retry = False
if peer_retry:
log.info(f"Reconnecting to peer {peer_info}")
try:
await server.start_client(peer_info, None, auth=auth)
except Exception as e:
log.info(f"Failed to connect to {peer_info} {e}")
await asyncio.sleep(3)
return asyncio.create_task(connection_check())
| 37.137931
| 106
| 0.645311
|
968951c5f2844f875d4799cbc1a0d0e36ff4b20e
| 399
|
py
|
Python
|
advisor_api/asgi.py
|
MarikIshtar007/User-Advisor-Booking-API
|
b5b400199fab9572d62b929bbc2a3c89d09feb23
|
[
"MIT"
] | null | null | null |
advisor_api/asgi.py
|
MarikIshtar007/User-Advisor-Booking-API
|
b5b400199fab9572d62b929bbc2a3c89d09feb23
|
[
"MIT"
] | null | null | null |
advisor_api/asgi.py
|
MarikIshtar007/User-Advisor-Booking-API
|
b5b400199fab9572d62b929bbc2a3c89d09feb23
|
[
"MIT"
] | null | null | null |
"""
ASGI config for advisor_api project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'advisor_api.settings')
application = get_asgi_application()
| 23.470588
| 78
| 0.789474
|
be834726af20441f3f34784d6b8b7f9b46026234
| 4,747
|
py
|
Python
|
dashboard/dashboard/pinpoint/models/results2.py
|
xinghun61/catapult
|
a120c4f6e011a9830eefdb783b818ab7113aacfd
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/pinpoint/models/results2.py
|
xinghun61/catapult
|
a120c4f6e011a9830eefdb783b818ab7113aacfd
|
[
"BSD-3-Clause"
] | 5
|
2020-09-07T12:36:46.000Z
|
2022-03-02T05:49:30.000Z
|
dashboard/dashboard/pinpoint/models/results2.py
|
brave-experiments/catapult
|
0d8246fe06fb598577f2344efcbc4b4e5b3aa323
|
[
"BSD-3-Clause"
] | 1
|
2020-07-25T00:02:48.000Z
|
2020-07-25T00:02:48.000Z
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import cloudstorage
import logging
import os
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from dashboard.pinpoint.models.quest import read_value
from tracing_build import render_histograms_viewer
from tracing.value import gtest_json_converter
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import reserved_infos
class Results2Error(Exception):
pass
class CachedResults2(ndb.Model):
"""Stores data on when a results2 was generated."""
updated = ndb.DateTimeProperty(required=True, auto_now_add=True)
job_id = ndb.StringProperty()
class _GcsFileStream(object):
"""Wraps a gcs file providing a FileStream like api."""
# pylint: disable=invalid-name
def __init__(self, *args, **kwargs):
self._gcs_file = cloudstorage.open(*args, **kwargs)
def seek(self, _):
pass
def truncate(self):
pass
def write(self, data):
self._gcs_file.write(data)
def close(self):
self._gcs_file.close()
def _GetCloudStorageName(job_id):
return '/results2-public/%s.html' % job_id
def GetCachedResults2(job):
filename = _GetCloudStorageName(job.job_id)
results = cloudstorage.listbucket(filename)
for _ in results:
return 'https://storage.cloud.google.com' + filename
return None
def ScheduleResults2Generation(job):
logging.debug('Job [%s]: ScheduleResults2Generation', job.job_id)
try:
# Don't want several tasks creating results2, so create task with specific
# name to deduplicate.
task_name = 'results2-public-%s' % job.job_id
taskqueue.add(
queue_name='job-queue', url='/api/generate-results2/' + job.job_id,
name=task_name)
except taskqueue.TombstonedTaskError:
return False
except taskqueue.TaskAlreadyExistsError:
pass
return True
def GenerateResults2(job):
logging.debug('Job [%s]: GenerateResults2', job.job_id)
histogram_dicts = _FetchHistograms(job)
vulcanized_html = _ReadVulcanizedHistogramsViewer()
CachedResults2(job_id=job.job_id).put()
filename = _GetCloudStorageName(job.job_id)
gcs_file = _GcsFileStream(
filename, 'w', content_type='text/html',
retry_params=cloudstorage.RetryParams(backoff_factor=1.1))
render_histograms_viewer.RenderHistogramsViewer(
histogram_dicts, gcs_file,
reset_results=True, vulcanized_html=vulcanized_html)
gcs_file.close()
def _ReadVulcanizedHistogramsViewer():
viewer_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..',
'vulcanized_histograms_viewer', 'vulcanized_histograms_viewer.html')
with open(viewer_path, 'r') as f:
return f.read()
def _FetchHistograms(job):
for change in _ChangeList(job):
for attempt in job.state._attempts[change]:
for execution in attempt.executions:
if isinstance(
execution, read_value._ReadHistogramsJsonValueExecution):
# The histogram sets are very big. Since we have limited
# memory, delete the histogram sets as we go along.
histogram_set = _JsonFromExecution(execution)
for histogram in histogram_set:
yield histogram
del histogram_set
elif isinstance(
execution, read_value._ReadGraphJsonValueExecution):
graphjson_results = _JsonFromExecution(execution)
hs = gtest_json_converter.ConvertGtestJson(graphjson_results)
hs.AddSharedDiagnosticToAllHistograms(
reserved_infos.LABELS.name,
generic_set.GenericSet([str(change)]))
hs = hs.AsDicts()
for histogram in hs:
yield histogram
del hs
def _ChangeList(job):
# If there are differences, only include Changes with differences.
changes = []
for change_a, change_b in job.state.Differences():
if change_a not in changes:
changes.append(change_a)
if change_b not in changes:
changes.append(change_b)
if changes:
return changes
return job.state._changes
def _JsonFromExecution(execution):
if hasattr(execution, '_isolate_server'):
isolate_server = execution._isolate_server
else:
isolate_server = 'https://isolateserver.appspot.com'
isolate_hash = execution._isolate_hash
if hasattr(execution, '_results_filename'):
results_filename = execution._results_filename
else:
results_filename = 'chartjson-output.json'
return read_value._RetrieveOutputJson(
isolate_server, isolate_hash, results_filename)
| 28.255952
| 78
| 0.728671
|
b28b27c762e7af7c1e66b06f8ac663671d4bd5ad
| 1,045
|
py
|
Python
|
tests/kubernetes/checks/test_ApiServerAuditLogMaxAge.py
|
vangundy-jason-pfg/checkov
|
2fb50908f62390c98dda665f1fa94fe24806b654
|
[
"Apache-2.0"
] | null | null | null |
tests/kubernetes/checks/test_ApiServerAuditLogMaxAge.py
|
vangundy-jason-pfg/checkov
|
2fb50908f62390c98dda665f1fa94fe24806b654
|
[
"Apache-2.0"
] | null | null | null |
tests/kubernetes/checks/test_ApiServerAuditLogMaxAge.py
|
vangundy-jason-pfg/checkov
|
2fb50908f62390c98dda665f1fa94fe24806b654
|
[
"Apache-2.0"
] | null | null | null |
import os
import unittest
from checkov.kubernetes.checks.resource.k8s.ApiServerAuditLogMaxAge import check
from checkov.kubernetes.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestApiServerAuditLogMaxAge(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/example_ApiServerAuditLogMaxAge"
report = runner.run(root_folder=test_files_dir,runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
self.assertEqual(summary['passed'], 1)
self.assertEqual(summary['failed'], 1)
self.assertEqual(summary['skipped'], 0)
self.assertEqual(summary['parsing_errors'], 0)
for failed in report.failed_checks:
self.assertIn("should-fail", failed.resource)
for passed in report.passed_checks:
self.assertIn("should-pass", passed.resource)
if __name__ == '__main__':
unittest.main()
| 32.65625
| 101
| 0.712919
|
4032d8d30f8a5cd1862da5df1603b1230846ef62
| 574
|
py
|
Python
|
lab/refactoring-pep8/indentation2.py
|
stark276/SPD-2.31-Testing-and-Architecture1
|
8ec4f3a2968a04584f581354925e927ea1bab34d
|
[
"MIT"
] | null | null | null |
lab/refactoring-pep8/indentation2.py
|
stark276/SPD-2.31-Testing-and-Architecture1
|
8ec4f3a2968a04584f581354925e927ea1bab34d
|
[
"MIT"
] | null | null | null |
lab/refactoring-pep8/indentation2.py
|
stark276/SPD-2.31-Testing-and-Architecture1
|
8ec4f3a2968a04584f581354925e927ea1bab34d
|
[
"MIT"
] | null | null | null |
# by Kami Bigdely
# Indentation
def write_to_db():
print('person info are written into db.')
def set_person_info(first_name, last_name, gender, date_of_birth, photo, nationality, place_of_birth):
if not first_name:
print('first name is empty.')
if not last_name:
print('last name is empty.')
# ...
write_to_db()
photo_path = "https://en.wikipedia.org/wiki/Tim_Hunt#/media/File:Tim_Hunt_at_UCSF_05_2009_(4).jpg"
set_person_info('Tim', 'Hunt', 'male','19 February 1943', photo_path, 'Uited Kingdom', 'Neston, Cheshire, England')
| 33.764706
| 115
| 0.69338
|
b6129c5422f4c389138bc08a7c9f4df33dfe6255
| 104,579
|
py
|
Python
|
tests/test_s3/test_s3.py
|
sblumin/moto
|
6bc07360a1b44b43bc7d22005559168299d2c1da
|
[
"Apache-2.0"
] | null | null | null |
tests/test_s3/test_s3.py
|
sblumin/moto
|
6bc07360a1b44b43bc7d22005559168299d2c1da
|
[
"Apache-2.0"
] | null | null | null |
tests/test_s3/test_s3.py
|
sblumin/moto
|
6bc07360a1b44b43bc7d22005559168299d2c1da
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import HTTPError
from functools import wraps
from gzip import GzipFile
from io import BytesIO
import zlib
import pickle
import json
import boto
import boto3
from botocore.client import ClientError
import botocore.exceptions
from boto.exception import S3CreateError, S3ResponseError
from botocore.handlers import disable_signing
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from freezegun import freeze_time
import six
import requests
import tests.backport_assert_raises # noqa
from nose.tools import assert_raises
import sure # noqa
from moto import settings, mock_s3, mock_s3_deprecated
import moto.s3.models as s3model
if settings.TEST_SERVER_MODE:
REDUCED_PART_SIZE = s3model.UPLOAD_PART_MIN_SIZE
EXPECTED_ETAG = '"140f92a6df9f9e415f74a1463bcee9bb-2"'
else:
REDUCED_PART_SIZE = 256
EXPECTED_ETAG = '"66d1a1a2ed08fd05c137f316af4ff255-2"'
def reduced_min_part_size(f):
""" speed up tests by temporarily making the multipart minimum part size
small
"""
orig_size = s3model.UPLOAD_PART_MIN_SIZE
@wraps(f)
def wrapped(*args, **kwargs):
try:
s3model.UPLOAD_PART_MIN_SIZE = REDUCED_PART_SIZE
return f(*args, **kwargs)
finally:
s3model.UPLOAD_PART_MIN_SIZE = orig_size
return wrapped
class MyModel(object):
def __init__(self, name, value):
self.name = name
self.value = value
def save(self):
s3 = boto3.client('s3', region_name='us-east-1')
s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value)
@mock_s3
def test_keys_are_pickleable():
"""Keys must be pickleable due to boto3 implementation details."""
key = s3model.FakeKey('name', b'data!')
assert key.value == b'data!'
pickled = pickle.dumps(key)
loaded = pickle.loads(pickled)
assert loaded.value == key.value
@mock_s3
def test_append_to_value__basic():
key = s3model.FakeKey('name', b'data!')
assert key.value == b'data!'
assert key.size == 5
key.append_to_value(b' And even more data')
assert key.value == b'data! And even more data'
assert key.size == 24
@mock_s3
def test_append_to_value__nothing_added():
key = s3model.FakeKey('name', b'data!')
assert key.value == b'data!'
assert key.size == 5
key.append_to_value(b'')
assert key.value == b'data!'
assert key.size == 5
@mock_s3
def test_append_to_value__empty_key():
key = s3model.FakeKey('name', b'')
assert key.value == b''
assert key.size == 0
key.append_to_value(b'stuff')
assert key.value == b'stuff'
assert key.size == 5
@mock_s3
def test_my_model_save():
# Create Bucket so that test can run
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='mybucket')
####################################
model_instance = MyModel('steve', 'is awesome')
model_instance.save()
body = conn.Object('mybucket', 'steve').get()['Body'].read().decode()
assert body == 'is awesome'
@mock_s3
def test_key_etag():
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='mybucket')
model_instance = MyModel('steve', 'is awesome')
model_instance.save()
conn.Bucket('mybucket').Object('steve').e_tag.should.equal(
'"d32bda93738f7e03adb22e66c90fbc04"')
@mock_s3_deprecated
def test_multipart_upload_too_small():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload("the-key")
multipart.upload_part_from_file(BytesIO(b'hello'), 1)
multipart.upload_part_from_file(BytesIO(b'world'), 2)
# Multipart with total size under 5MB is refused
multipart.complete_upload.should.throw(S3ResponseError)
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_upload():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload("the-key")
part1 = b'0' * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 1)
# last part, can be less than 5 MB
part2 = b'1'
multipart.upload_part_from_file(BytesIO(part2), 2)
multipart.complete_upload()
# we should get both parts as the key contents
bucket.get_key(
"the-key").get_contents_as_string().should.equal(part1 + part2)
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_upload_out_of_order():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload("the-key")
# last part, can be less than 5 MB
part2 = b'1'
multipart.upload_part_from_file(BytesIO(part2), 4)
part1 = b'0' * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 2)
multipart.complete_upload()
# we should get both parts as the key contents
bucket.get_key(
"the-key").get_contents_as_string().should.equal(part1 + part2)
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_upload_with_headers():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload(
"the-key", metadata={"foo": "bar"})
part1 = b'0' * 10
multipart.upload_part_from_file(BytesIO(part1), 1)
multipart.complete_upload()
key = bucket.get_key("the-key")
key.metadata.should.equal({"foo": "bar"})
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_upload_with_copy_key():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "original-key"
key.set_contents_from_string("key_value")
multipart = bucket.initiate_multipart_upload("the-key")
part1 = b'0' * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 1)
multipart.copy_part_from_key("foobar", "original-key", 2, 0, 3)
multipart.complete_upload()
bucket.get_key(
"the-key").get_contents_as_string().should.equal(part1 + b"key_")
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_upload_cancel():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload("the-key")
part1 = b'0' * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 1)
multipart.cancel_upload()
# TODO we really need some sort of assertion here, but we don't currently
# have the ability to list mulipart uploads for a bucket.
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_etag():
# Create Bucket so that test can run
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket('mybucket')
multipart = bucket.initiate_multipart_upload("the-key")
part1 = b'0' * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 1)
# last part, can be less than 5 MB
part2 = b'1'
multipart.upload_part_from_file(BytesIO(part2), 2)
multipart.complete_upload()
# we should get both parts as the key contents
bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG)
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_invalid_order():
# Create Bucket so that test can run
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket('mybucket')
multipart = bucket.initiate_multipart_upload("the-key")
part1 = b'0' * 5242880
etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag
# last part, can be less than 5 MB
part2 = b'1'
etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag
xml = "<Part><PartNumber>{0}</PartNumber><ETag>{1}</ETag></Part>"
xml = xml.format(2, etag2) + xml.format(1, etag1)
xml = "<CompleteMultipartUpload>{0}</CompleteMultipartUpload>".format(xml)
bucket.complete_multipart_upload.when.called_with(
multipart.key_name, multipart.id, xml).should.throw(S3ResponseError)
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_etag_quotes_stripped():
# Create Bucket so that test can run
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket('mybucket')
multipart = bucket.initiate_multipart_upload("the-key")
part1 = b'0' * REDUCED_PART_SIZE
etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag
# last part, can be less than 5 MB
part2 = b'1'
etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag
# Strip quotes from etags
etag1 = etag1.replace('"','')
etag2 = etag2.replace('"','')
xml = "<Part><PartNumber>{0}</PartNumber><ETag>{1}</ETag></Part>"
xml = xml.format(1, etag1) + xml.format(2, etag2)
xml = "<CompleteMultipartUpload>{0}</CompleteMultipartUpload>".format(xml)
bucket.complete_multipart_upload.when.called_with(
multipart.key_name, multipart.id, xml).should_not.throw(S3ResponseError)
# we should get both parts as the key contents
bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG)
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_duplicate_upload():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload("the-key")
part1 = b'0' * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 1)
# same part again
multipart.upload_part_from_file(BytesIO(part1), 1)
part2 = b'1' * 1024
multipart.upload_part_from_file(BytesIO(part2), 2)
multipart.complete_upload()
# We should get only one copy of part 1.
bucket.get_key(
"the-key").get_contents_as_string().should.equal(part1 + part2)
@mock_s3_deprecated
def test_list_multiparts():
# Create Bucket so that test can run
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket('mybucket')
multipart1 = bucket.initiate_multipart_upload("one-key")
multipart2 = bucket.initiate_multipart_upload("two-key")
uploads = bucket.get_all_multipart_uploads()
uploads.should.have.length_of(2)
dict([(u.key_name, u.id) for u in uploads]).should.equal(
{'one-key': multipart1.id, 'two-key': multipart2.id})
multipart2.cancel_upload()
uploads = bucket.get_all_multipart_uploads()
uploads.should.have.length_of(1)
uploads[0].key_name.should.equal("one-key")
multipart1.cancel_upload()
uploads = bucket.get_all_multipart_uploads()
uploads.should.be.empty
@mock_s3_deprecated
def test_key_save_to_missing_bucket():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.get_bucket('mybucket', validate=False)
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string.when.called_with(
"foobar").should.throw(S3ResponseError)
@mock_s3_deprecated
def test_missing_key():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
bucket.get_key("the-key").should.equal(None)
@mock_s3_deprecated
def test_missing_key_urllib2():
conn = boto.connect_s3('the_key', 'the_secret')
conn.create_bucket("foobar")
urlopen.when.called_with(
"http://foobar.s3.amazonaws.com/the-key").should.throw(HTTPError)
@mock_s3_deprecated
def test_empty_key():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("")
key = bucket.get_key("the-key")
key.size.should.equal(0)
key.get_contents_as_string().should.equal(b'')
@mock_s3_deprecated
def test_empty_key_set_on_existing_key():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("foobar")
key = bucket.get_key("the-key")
key.size.should.equal(6)
key.get_contents_as_string().should.equal(b'foobar')
key.set_contents_from_string("")
bucket.get_key("the-key").get_contents_as_string().should.equal(b'')
@mock_s3_deprecated
def test_large_key_save():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("foobar" * 100000)
bucket.get_key(
"the-key").get_contents_as_string().should.equal(b'foobar' * 100000)
@mock_s3_deprecated
def test_copy_key():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
bucket.copy_key('new-key', 'foobar', 'the-key')
bucket.get_key(
"the-key").get_contents_as_string().should.equal(b"some value")
bucket.get_key(
"new-key").get_contents_as_string().should.equal(b"some value")
@mock_s3_deprecated
def test_copy_key_with_unicode():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-unicode-💩-key"
key.set_contents_from_string("some value")
bucket.copy_key('new-key', 'foobar', 'the-unicode-💩-key')
bucket.get_key(
"the-unicode-💩-key").get_contents_as_string().should.equal(b"some value")
bucket.get_key(
"new-key").get_contents_as_string().should.equal(b"some value")
@mock_s3_deprecated
def test_copy_key_with_version():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
bucket.configure_versioning(versioning=True)
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
key.set_contents_from_string("another value")
key = [
key.version_id
for key in bucket.get_all_versions()
if not key.is_latest
][0]
bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id=key)
bucket.get_key(
"the-key").get_contents_as_string().should.equal(b"another value")
bucket.get_key(
"new-key").get_contents_as_string().should.equal(b"some value")
@mock_s3_deprecated
def test_set_metadata():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = 'the-key'
key.set_metadata('md', 'Metadatastring')
key.set_contents_from_string("Testval")
bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring')
@mock_s3_deprecated
def test_copy_key_replace_metadata():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_metadata('md', 'Metadatastring')
key.set_contents_from_string("some value")
bucket.copy_key('new-key', 'foobar', 'the-key',
metadata={'momd': 'Mometadatastring'})
bucket.get_key("new-key").get_metadata('md').should.be.none
bucket.get_key(
"new-key").get_metadata('momd').should.equal('Mometadatastring')
@freeze_time("2012-01-01 12:00:00")
@mock_s3_deprecated
def test_last_modified():
# See https://github.com/boto/boto/issues/466
conn = boto.connect_s3()
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
rs = bucket.get_all_keys()
rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z')
bucket.get_key(
"the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT')
@mock_s3_deprecated
def test_missing_bucket():
conn = boto.connect_s3('the_key', 'the_secret')
conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError)
@mock_s3_deprecated
def test_bucket_with_dash():
conn = boto.connect_s3('the_key', 'the_secret')
conn.get_bucket.when.called_with(
'mybucket-test').should.throw(S3ResponseError)
@mock_s3_deprecated
def test_create_existing_bucket():
"Trying to create a bucket that already exists should raise an Error"
conn = boto.s3.connect_to_region("us-west-2")
conn.create_bucket("foobar")
with assert_raises(S3CreateError):
conn.create_bucket('foobar')
@mock_s3_deprecated
def test_create_existing_bucket_in_us_east_1():
"Trying to create a bucket that already exists in us-east-1 returns the bucket"
""""
http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
Your previous request to create the named bucket succeeded and you already
own it. You get this error in all AWS regions except US Standard,
us-east-1. In us-east-1 region, you will get 200 OK, but it is no-op (if
bucket exists it Amazon S3 will not do anything).
"""
conn = boto.s3.connect_to_region("us-east-1")
conn.create_bucket("foobar")
bucket = conn.create_bucket("foobar")
bucket.name.should.equal("foobar")
@mock_s3_deprecated
def test_other_region():
conn = S3Connection(
'key', 'secret', host='s3-website-ap-southeast-2.amazonaws.com')
conn.create_bucket("foobar")
list(conn.get_bucket("foobar").get_all_keys()).should.equal([])
@mock_s3_deprecated
def test_bucket_deletion():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
# Try to delete a bucket that still has keys
conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError)
bucket.delete_key("the-key")
conn.delete_bucket("foobar")
# Get non-existing bucket
conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError)
# Delete non-existant bucket
conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError)
@mock_s3_deprecated
def test_get_all_buckets():
conn = boto.connect_s3('the_key', 'the_secret')
conn.create_bucket("foobar")
conn.create_bucket("foobar2")
buckets = conn.get_all_buckets()
buckets.should.have.length_of(2)
@mock_s3
@mock_s3_deprecated
def test_post_to_bucket():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
requests.post("https://foobar.s3.amazonaws.com/", {
'key': 'the-key',
'file': 'nothing'
})
bucket.get_key('the-key').get_contents_as_string().should.equal(b'nothing')
@mock_s3
@mock_s3_deprecated
def test_post_with_metadata_to_bucket():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
requests.post("https://foobar.s3.amazonaws.com/", {
'key': 'the-key',
'file': 'nothing',
'x-amz-meta-test': 'metadata'
})
bucket.get_key('the-key').get_metadata('test').should.equal('metadata')
@mock_s3_deprecated
def test_delete_missing_key():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket('foobar')
deleted_key = bucket.delete_key("foobar")
deleted_key.key.should.equal("foobar")
@mock_s3_deprecated
def test_delete_keys():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket('foobar')
Key(bucket=bucket, name='file1').set_contents_from_string('abc')
Key(bucket=bucket, name='file2').set_contents_from_string('abc')
Key(bucket=bucket, name='file3').set_contents_from_string('abc')
Key(bucket=bucket, name='file4').set_contents_from_string('abc')
result = bucket.delete_keys(['file2', 'file3'])
result.deleted.should.have.length_of(2)
result.errors.should.have.length_of(0)
keys = bucket.get_all_keys()
keys.should.have.length_of(2)
keys[0].name.should.equal('file1')
@mock_s3_deprecated
def test_delete_keys_with_invalid():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket('foobar')
Key(bucket=bucket, name='file1').set_contents_from_string('abc')
Key(bucket=bucket, name='file2').set_contents_from_string('abc')
Key(bucket=bucket, name='file3').set_contents_from_string('abc')
Key(bucket=bucket, name='file4').set_contents_from_string('abc')
result = bucket.delete_keys(['abc', 'file3'])
result.deleted.should.have.length_of(1)
result.errors.should.have.length_of(1)
keys = bucket.get_all_keys()
keys.should.have.length_of(3)
keys[0].name.should.equal('file1')
@mock_s3_deprecated
def test_bucket_name_with_dot():
conn = boto.connect_s3()
bucket = conn.create_bucket('firstname.lastname')
k = Key(bucket, 'somekey')
k.set_contents_from_string('somedata')
@mock_s3_deprecated
def test_key_with_special_characters():
conn = boto.connect_s3()
bucket = conn.create_bucket('test_bucket_name')
key = Key(bucket, 'test_list_keys_2/x?y')
key.set_contents_from_string('value1')
key_list = bucket.list('test_list_keys_2/', '/')
keys = [x for x in key_list]
keys[0].name.should.equal("test_list_keys_2/x?y")
@mock_s3_deprecated
def test_unicode_key_with_slash():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "/the-key-unîcode/test"
key.set_contents_from_string("value")
key = bucket.get_key("/the-key-unîcode/test")
key.get_contents_as_string().should.equal(b'value')
@mock_s3_deprecated
def test_bucket_key_listing_order():
conn = boto.connect_s3()
bucket = conn.create_bucket('test_bucket')
prefix = 'toplevel/'
def store(name):
k = Key(bucket, prefix + name)
k.set_contents_from_string('somedata')
names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key']
for name in names:
store(name)
delimiter = None
keys = [x.name for x in bucket.list(prefix, delimiter)]
keys.should.equal([
'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key',
'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3'
])
delimiter = '/'
keys = [x.name for x in bucket.list(prefix, delimiter)]
keys.should.equal([
'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/'
])
# Test delimiter with no prefix
delimiter = '/'
keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)]
keys.should.equal(['toplevel/'])
delimiter = None
keys = [x.name for x in bucket.list(prefix + 'x', delimiter)]
keys.should.equal(
[u'toplevel/x/key', u'toplevel/x/y/key', u'toplevel/x/y/z/key'])
delimiter = '/'
keys = [x.name for x in bucket.list(prefix + 'x', delimiter)]
keys.should.equal([u'toplevel/x/'])
@mock_s3_deprecated
def test_key_with_reduced_redundancy():
conn = boto.connect_s3()
bucket = conn.create_bucket('test_bucket_name')
key = Key(bucket, 'test_rr_key')
key.set_contents_from_string('value1', reduced_redundancy=True)
# we use the bucket iterator because of:
# https:/github.com/boto/boto/issues/1173
list(bucket)[0].storage_class.should.equal('REDUCED_REDUNDANCY')
@mock_s3_deprecated
def test_copy_key_reduced_redundancy():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
bucket.copy_key('new-key', 'foobar', 'the-key',
storage_class='REDUCED_REDUNDANCY')
# we use the bucket iterator because of:
# https:/github.com/boto/boto/issues/1173
keys = dict([(k.name, k) for k in bucket])
keys['new-key'].storage_class.should.equal("REDUCED_REDUNDANCY")
keys['the-key'].storage_class.should.equal("STANDARD")
@freeze_time("2012-01-01 12:00:00")
@mock_s3_deprecated
def test_restore_key():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
list(bucket)[0].ongoing_restore.should.be.none
key.restore(1)
key = bucket.get_key('the-key')
key.ongoing_restore.should_not.be.none
key.ongoing_restore.should.be.false
key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT")
key.restore(2)
key = bucket.get_key('the-key')
key.ongoing_restore.should_not.be.none
key.ongoing_restore.should.be.false
key.expiry_date.should.equal("Tue, 03 Jan 2012 12:00:00 GMT")
@freeze_time("2012-01-01 12:00:00")
@mock_s3_deprecated
def test_restore_key_headers():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
key.restore(1, headers={'foo': 'bar'})
key = bucket.get_key('the-key')
key.ongoing_restore.should_not.be.none
key.ongoing_restore.should.be.false
key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT")
@mock_s3_deprecated
def test_get_versioning_status():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket('foobar')
d = bucket.get_versioning_status()
d.should.be.empty
bucket.configure_versioning(versioning=True)
d = bucket.get_versioning_status()
d.shouldnt.be.empty
d.should.have.key('Versioning').being.equal('Enabled')
bucket.configure_versioning(versioning=False)
d = bucket.get_versioning_status()
d.should.have.key('Versioning').being.equal('Suspended')
@mock_s3_deprecated
def test_key_version():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket('foobar')
bucket.configure_versioning(versioning=True)
versions = []
key = Key(bucket)
key.key = 'the-key'
key.version_id.should.be.none
key.set_contents_from_string('some string')
versions.append(key.version_id)
key.set_contents_from_string('some string')
versions.append(key.version_id)
set(versions).should.have.length_of(2)
key = bucket.get_key('the-key')
key.version_id.should.equal(versions[-1])
@mock_s3_deprecated
def test_list_versions():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket('foobar')
bucket.configure_versioning(versioning=True)
key_versions = []
key = Key(bucket, 'the-key')
key.version_id.should.be.none
key.set_contents_from_string("Version 1")
key_versions.append(key.version_id)
key.set_contents_from_string("Version 2")
key_versions.append(key.version_id)
key_versions.should.have.length_of(2)
versions = list(bucket.list_versions())
versions.should.have.length_of(2)
versions[0].name.should.equal('the-key')
versions[0].version_id.should.equal(key_versions[0])
versions[0].get_contents_as_string().should.equal(b"Version 1")
versions[1].name.should.equal('the-key')
versions[1].version_id.should.equal(key_versions[1])
versions[1].get_contents_as_string().should.equal(b"Version 2")
key = Key(bucket, 'the2-key')
key.set_contents_from_string("Version 1")
keys = list(bucket.list())
keys.should.have.length_of(2)
versions = list(bucket.list_versions(prefix='the2-'))
versions.should.have.length_of(1)
@mock_s3_deprecated
def test_acl_setting():
conn = boto.connect_s3()
bucket = conn.create_bucket('foobar')
content = b'imafile'
keyname = 'test.txt'
key = Key(bucket, name=keyname)
key.content_type = 'text/plain'
key.set_contents_from_string(content)
key.make_public()
key = bucket.get_key(keyname)
assert key.get_contents_as_string() == content
grants = key.get_acl().acl.grants
assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
g.permission == 'READ' for g in grants), grants
@mock_s3_deprecated
def test_acl_setting_via_headers():
conn = boto.connect_s3()
bucket = conn.create_bucket('foobar')
content = b'imafile'
keyname = 'test.txt'
key = Key(bucket, name=keyname)
key.content_type = 'text/plain'
key.set_contents_from_string(content, headers={
'x-amz-grant-full-control': 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
})
key = bucket.get_key(keyname)
assert key.get_contents_as_string() == content
grants = key.get_acl().acl.grants
assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
g.permission == 'FULL_CONTROL' for g in grants), grants
@mock_s3_deprecated
def test_acl_switching():
conn = boto.connect_s3()
bucket = conn.create_bucket('foobar')
content = b'imafile'
keyname = 'test.txt'
key = Key(bucket, name=keyname)
key.content_type = 'text/plain'
key.set_contents_from_string(content, policy='public-read')
key.set_acl('private')
grants = key.get_acl().acl.grants
assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
g.permission == 'READ' for g in grants), grants
@mock_s3_deprecated
def test_bucket_acl_setting():
conn = boto.connect_s3()
bucket = conn.create_bucket('foobar')
bucket.make_public()
grants = bucket.get_acl().acl.grants
assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
g.permission == 'READ' for g in grants), grants
@mock_s3_deprecated
def test_bucket_acl_switching():
conn = boto.connect_s3()
bucket = conn.create_bucket('foobar')
bucket.make_public()
bucket.set_acl('private')
grants = bucket.get_acl().acl.grants
assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
g.permission == 'READ' for g in grants), grants
@mock_s3
def test_s3_object_in_public_bucket():
s3 = boto3.resource('s3')
bucket = s3.Bucket('test-bucket')
bucket.create(ACL='public-read')
bucket.put_object(Body=b'ABCD', Key='file.txt')
s3_anonymous = boto3.resource('s3')
s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read()
contents.should.equal(b'ABCD')
bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt')
with assert_raises(ClientError) as exc:
s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()
exc.exception.response['Error']['Code'].should.equal('403')
params = {'Bucket': 'test-bucket', 'Key': 'file.txt'}
presigned_url = boto3.client('s3').generate_presigned_url('get_object', params, ExpiresIn=900)
response = requests.get(presigned_url)
assert response.status_code == 200
@mock_s3
def test_s3_object_in_private_bucket():
s3 = boto3.resource('s3')
bucket = s3.Bucket('test-bucket')
bucket.create(ACL='private')
bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt')
s3_anonymous = boto3.resource('s3')
s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
with assert_raises(ClientError) as exc:
s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()
exc.exception.response['Error']['Code'].should.equal('403')
bucket.put_object(ACL='public-read', Body=b'ABCD', Key='file.txt')
contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read()
contents.should.equal(b'ABCD')
@mock_s3_deprecated
def test_unicode_key():
conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket')
key = Key(bucket)
key.key = u'こんにちは.jpg'
key.set_contents_from_string('Hello world!')
assert [listed_key.key for listed_key in bucket.list()] == [key.key]
fetched_key = bucket.get_key(key.key)
assert fetched_key.key == key.key
assert fetched_key.get_contents_as_string().decode("utf-8") == 'Hello world!'
@mock_s3_deprecated
def test_unicode_value():
conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket')
key = Key(bucket)
key.key = 'some_key'
key.set_contents_from_string(u'こんにちは.jpg')
list(bucket.list())
key = bucket.get_key(key.key)
assert key.get_contents_as_string().decode("utf-8") == u'こんにちは.jpg'
@mock_s3_deprecated
def test_setting_content_encoding():
conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket')
key = bucket.new_key("keyname")
key.set_metadata("Content-Encoding", "gzip")
compressed_data = "abcdef"
key.set_contents_from_string(compressed_data)
key = bucket.get_key("keyname")
key.content_encoding.should.equal("gzip")
@mock_s3_deprecated
def test_bucket_location():
conn = boto.s3.connect_to_region("us-west-2")
bucket = conn.create_bucket('mybucket')
bucket.get_location().should.equal("us-west-2")
@mock_s3
def test_bucket_location_us_east_1():
cli = boto3.client('s3')
bucket_name = 'mybucket'
# No LocationConstraint ==> us-east-1
cli.create_bucket(Bucket=bucket_name)
cli.get_bucket_location(Bucket=bucket_name)['LocationConstraint'].should.equal(None)
@mock_s3_deprecated
def test_ranged_get():
conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket')
key = Key(bucket)
key.key = 'bigkey'
rep = b"0123456789"
key.set_contents_from_string(rep * 10)
# Implicitly bounded range requests.
key.get_contents_as_string(
headers={'Range': 'bytes=0-'}).should.equal(rep * 10)
key.get_contents_as_string(
headers={'Range': 'bytes=50-'}).should.equal(rep * 5)
key.get_contents_as_string(
headers={'Range': 'bytes=99-'}).should.equal(b'9')
# Explicitly bounded range requests starting from the first byte.
key.get_contents_as_string(
headers={'Range': 'bytes=0-0'}).should.equal(b'0')
key.get_contents_as_string(
headers={'Range': 'bytes=0-49'}).should.equal(rep * 5)
key.get_contents_as_string(
headers={'Range': 'bytes=0-99'}).should.equal(rep * 10)
key.get_contents_as_string(
headers={'Range': 'bytes=0-100'}).should.equal(rep * 10)
key.get_contents_as_string(
headers={'Range': 'bytes=0-700'}).should.equal(rep * 10)
# Explicitly bounded range requests starting from the / a middle byte.
key.get_contents_as_string(
headers={'Range': 'bytes=50-54'}).should.equal(rep[:5])
key.get_contents_as_string(
headers={'Range': 'bytes=50-99'}).should.equal(rep * 5)
key.get_contents_as_string(
headers={'Range': 'bytes=50-100'}).should.equal(rep * 5)
key.get_contents_as_string(
headers={'Range': 'bytes=50-700'}).should.equal(rep * 5)
# Explicitly bounded range requests starting from the last byte.
key.get_contents_as_string(
headers={'Range': 'bytes=99-99'}).should.equal(b'9')
key.get_contents_as_string(
headers={'Range': 'bytes=99-100'}).should.equal(b'9')
key.get_contents_as_string(
headers={'Range': 'bytes=99-700'}).should.equal(b'9')
# Suffix range requests.
key.get_contents_as_string(
headers={'Range': 'bytes=-1'}).should.equal(b'9')
key.get_contents_as_string(
headers={'Range': 'bytes=-60'}).should.equal(rep * 6)
key.get_contents_as_string(
headers={'Range': 'bytes=-100'}).should.equal(rep * 10)
key.get_contents_as_string(
headers={'Range': 'bytes=-101'}).should.equal(rep * 10)
key.get_contents_as_string(
headers={'Range': 'bytes=-700'}).should.equal(rep * 10)
key.size.should.equal(100)
@mock_s3_deprecated
def test_policy():
conn = boto.connect_s3()
bucket_name = 'mybucket'
bucket = conn.create_bucket(bucket_name)
policy = json.dumps({
"Version": "2012-10-17",
"Id": "PutObjPolicy",
"Statement": [
{
"Sid": "DenyUnEncryptedObjectUploads",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::{bucket_name}/*".format(bucket_name=bucket_name),
"Condition": {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": "aws:kms"
}
}
}
]
})
with assert_raises(S3ResponseError) as err:
bucket.get_policy()
ex = err.exception
ex.box_usage.should.be.none
ex.error_code.should.equal('NoSuchBucketPolicy')
ex.message.should.equal('The bucket policy does not exist')
ex.reason.should.equal('Not Found')
ex.resource.should.be.none
ex.status.should.equal(404)
ex.body.should.contain(bucket_name)
ex.request_id.should_not.be.none
bucket.set_policy(policy).should.be.true
bucket = conn.get_bucket(bucket_name)
bucket.get_policy().decode('utf-8').should.equal(policy)
bucket.delete_policy()
with assert_raises(S3ResponseError) as err:
bucket.get_policy()
@mock_s3_deprecated
def test_website_configuration_xml():
conn = boto.connect_s3()
bucket = conn.create_bucket('test-bucket')
bucket.set_website_configuration_xml(TEST_XML)
bucket.get_website_configuration_xml().should.equal(TEST_XML)
@mock_s3_deprecated
def test_key_with_trailing_slash_in_ordinary_calling_format():
conn = boto.connect_s3(
'access_key',
'secret_key',
calling_format=boto.s3.connection.OrdinaryCallingFormat()
)
bucket = conn.create_bucket('test_bucket_name')
key_name = 'key_with_slash/'
key = Key(bucket, key_name)
key.set_contents_from_string('some value')
[k.name for k in bucket.get_all_keys()].should.contain(key_name)
"""
boto3
"""
@mock_s3
def test_boto3_key_etag():
s3 = boto3.client('s3', region_name='us-east-1')
s3.create_bucket(Bucket='mybucket')
s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome')
resp = s3.get_object(Bucket='mybucket', Key='steve')
resp['ETag'].should.equal('"d32bda93738f7e03adb22e66c90fbc04"')
@mock_s3
def test_website_redirect_location():
s3 = boto3.client('s3', region_name='us-east-1')
s3.create_bucket(Bucket='mybucket')
s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome')
resp = s3.get_object(Bucket='mybucket', Key='steve')
resp.get('WebsiteRedirectLocation').should.be.none
url = 'https://github.com/spulec/moto'
s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome', WebsiteRedirectLocation=url)
resp = s3.get_object(Bucket='mybucket', Key='steve')
resp['WebsiteRedirectLocation'].should.equal(url)
@mock_s3
def test_boto3_list_keys_xml_escaped():
s3 = boto3.client('s3', region_name='us-east-1')
s3.create_bucket(Bucket='mybucket')
key_name = 'Q&A.txt'
s3.put_object(Bucket='mybucket', Key=key_name, Body=b'is awesome')
resp = s3.list_objects_v2(Bucket='mybucket', Prefix=key_name)
assert resp['Contents'][0]['Key'] == key_name
assert resp['KeyCount'] == 1
assert resp['MaxKeys'] == 1000
assert resp['Prefix'] == key_name
assert resp['IsTruncated'] == False
assert 'Delimiter' not in resp
assert 'StartAfter' not in resp
assert 'NextContinuationToken' not in resp
assert 'Owner' not in resp['Contents'][0]
@mock_s3
def test_boto3_list_objects_v2_common_prefix_pagination():
s3 = boto3.client('s3', region_name='us-east-1')
s3.create_bucket(Bucket='mybucket')
max_keys = 1
keys = ['test/{i}/{i}'.format(i=i) for i in range(3)]
for key in keys:
s3.put_object(Bucket='mybucket', Key=key, Body=b'v')
prefixes = []
args = {"Bucket": 'mybucket', "Delimiter": "/", "Prefix": "test/", "MaxKeys": max_keys}
resp = {"IsTruncated": True}
while resp.get("IsTruncated", False):
if "NextContinuationToken" in resp:
args["ContinuationToken"] = resp["NextContinuationToken"]
resp = s3.list_objects_v2(**args)
if "CommonPrefixes" in resp:
assert len(resp["CommonPrefixes"]) == max_keys
prefixes.extend(i["Prefix"] for i in resp["CommonPrefixes"])
assert prefixes == [k[:k.rindex('/') + 1] for k in keys]
@mock_s3
def test_boto3_list_objects_v2_truncated_response():
s3 = boto3.client('s3', region_name='us-east-1')
s3.create_bucket(Bucket='mybucket')
s3.put_object(Bucket='mybucket', Key='one', Body=b'1')
s3.put_object(Bucket='mybucket', Key='two', Body=b'22')
s3.put_object(Bucket='mybucket', Key='three', Body=b'333')
# First list
resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1)
listed_object = resp['Contents'][0]
assert listed_object['Key'] == 'one'
assert resp['MaxKeys'] == 1
assert resp['Prefix'] == ''
assert resp['KeyCount'] == 1
assert resp['IsTruncated'] == True
assert 'Delimiter' not in resp
assert 'StartAfter' not in resp
assert 'Owner' not in listed_object # owner info was not requested
next_token = resp['NextContinuationToken']
# Second list
resp = s3.list_objects_v2(
Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token)
listed_object = resp['Contents'][0]
assert listed_object['Key'] == 'three'
assert resp['MaxKeys'] == 1
assert resp['Prefix'] == ''
assert resp['KeyCount'] == 1
assert resp['IsTruncated'] == True
assert 'Delimiter' not in resp
assert 'StartAfter' not in resp
assert 'Owner' not in listed_object
next_token = resp['NextContinuationToken']
# Third list
resp = s3.list_objects_v2(
Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token)
listed_object = resp['Contents'][0]
assert listed_object['Key'] == 'two'
assert resp['MaxKeys'] == 1
assert resp['Prefix'] == ''
assert resp['KeyCount'] == 1
assert resp['IsTruncated'] == False
assert 'Delimiter' not in resp
assert 'Owner' not in listed_object
assert 'StartAfter' not in resp
assert 'NextContinuationToken' not in resp
@mock_s3
def test_boto3_list_objects_v2_truncated_response_start_after():
s3 = boto3.client('s3', region_name='us-east-1')
s3.create_bucket(Bucket='mybucket')
s3.put_object(Bucket='mybucket', Key='one', Body=b'1')
s3.put_object(Bucket='mybucket', Key='two', Body=b'22')
s3.put_object(Bucket='mybucket', Key='three', Body=b'333')
# First list
resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one')
listed_object = resp['Contents'][0]
assert listed_object['Key'] == 'three'
assert resp['MaxKeys'] == 1
assert resp['Prefix'] == ''
assert resp['KeyCount'] == 1
assert resp['IsTruncated'] == True
assert resp['StartAfter'] == 'one'
assert 'Delimiter' not in resp
assert 'Owner' not in listed_object
next_token = resp['NextContinuationToken']
# Second list
# The ContinuationToken must take precedence over StartAfter.
resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one',
ContinuationToken=next_token)
listed_object = resp['Contents'][0]
assert listed_object['Key'] == 'two'
assert resp['MaxKeys'] == 1
assert resp['Prefix'] == ''
assert resp['KeyCount'] == 1
assert resp['IsTruncated'] == False
# When ContinuationToken is given, StartAfter is ignored. This also means
# AWS does not return it in the response.
assert 'StartAfter' not in resp
assert 'Delimiter' not in resp
assert 'Owner' not in listed_object
@mock_s3
def test_boto3_list_objects_v2_fetch_owner():
s3 = boto3.client('s3', region_name='us-east-1')
s3.create_bucket(Bucket='mybucket')
s3.put_object(Bucket='mybucket', Key='one', Body=b'11')
resp = s3.list_objects_v2(Bucket='mybucket', FetchOwner=True)
owner = resp['Contents'][0]['Owner']
assert 'ID' in owner
assert 'DisplayName' in owner
assert len(owner.keys()) == 2
@mock_s3
def test_boto3_bucket_create():
s3 = boto3.resource('s3', region_name='us-east-1')
s3.create_bucket(Bucket="blah")
s3.Object('blah', 'hello.txt').put(Body="some text")
s3.Object('blah', 'hello.txt').get()['Body'].read().decode(
"utf-8").should.equal("some text")
@mock_s3
def test_bucket_create_duplicate():
s3 = boto3.resource('s3', region_name='us-west-2')
s3.create_bucket(Bucket="blah", CreateBucketConfiguration={
'LocationConstraint': 'us-west-2',
})
with assert_raises(ClientError) as exc:
s3.create_bucket(
Bucket="blah",
CreateBucketConfiguration={
'LocationConstraint': 'us-west-2',
}
)
exc.exception.response['Error']['Code'].should.equal('BucketAlreadyExists')
@mock_s3
def test_bucket_create_force_us_east_1():
s3 = boto3.resource('s3', region_name='us-east-1')
with assert_raises(ClientError) as exc:
s3.create_bucket(Bucket="blah", CreateBucketConfiguration={
'LocationConstraint': 'us-east-1',
})
exc.exception.response['Error']['Code'].should.equal('InvalidLocationConstraint')
@mock_s3
def test_boto3_bucket_create_eu_central():
s3 = boto3.resource('s3', region_name='eu-central-1')
s3.create_bucket(Bucket="blah")
s3.Object('blah', 'hello.txt').put(Body="some text")
s3.Object('blah', 'hello.txt').get()['Body'].read().decode(
"utf-8").should.equal("some text")
@mock_s3
def test_boto3_head_object():
s3 = boto3.resource('s3', region_name='us-east-1')
s3.create_bucket(Bucket="blah")
s3.Object('blah', 'hello.txt').put(Body="some text")
s3.Object('blah', 'hello.txt').meta.client.head_object(
Bucket='blah', Key='hello.txt')
with assert_raises(ClientError) as e:
s3.Object('blah', 'hello2.txt').meta.client.head_object(
Bucket='blah', Key='hello_bad.txt')
e.exception.response['Error']['Code'].should.equal('404')
@mock_s3
def test_boto3_bucket_deletion():
cli = boto3.client('s3', region_name='us-east-1')
cli.create_bucket(Bucket="foobar")
cli.put_object(Bucket="foobar", Key="the-key", Body="some value")
# Try to delete a bucket that still has keys
cli.delete_bucket.when.called_with(Bucket="foobar").should.throw(
cli.exceptions.ClientError,
('An error occurred (BucketNotEmpty) when calling the DeleteBucket operation: '
'The bucket you tried to delete is not empty'))
cli.delete_object(Bucket="foobar", Key="the-key")
cli.delete_bucket(Bucket="foobar")
# Get non-existing bucket
cli.head_bucket.when.called_with(Bucket="foobar").should.throw(
cli.exceptions.ClientError,
"An error occurred (404) when calling the HeadBucket operation: Not Found")
# Delete non-existing bucket
cli.delete_bucket.when.called_with(Bucket="foobar").should.throw(cli.exceptions.NoSuchBucket)
@mock_s3
def test_boto3_get_object():
s3 = boto3.resource('s3', region_name='us-east-1')
s3.create_bucket(Bucket="blah")
s3.Object('blah', 'hello.txt').put(Body="some text")
s3.Object('blah', 'hello.txt').meta.client.head_object(
Bucket='blah', Key='hello.txt')
with assert_raises(ClientError) as e:
s3.Object('blah', 'hello2.txt').get()
e.exception.response['Error']['Code'].should.equal('NoSuchKey')
@mock_s3
def test_boto3_head_object_with_versioning():
s3 = boto3.resource('s3', region_name='us-east-1')
bucket = s3.create_bucket(Bucket='blah')
bucket.Versioning().enable()
old_content = 'some text'
new_content = 'some new text'
s3.Object('blah', 'hello.txt').put(Body=old_content)
s3.Object('blah', 'hello.txt').put(Body=new_content)
versions = list(s3.Bucket('blah').object_versions.all())
latest = list(filter(lambda item: item.is_latest, versions))[0]
oldest = list(filter(lambda item: not item.is_latest, versions))[0]
head_object = s3.Object('blah', 'hello.txt').meta.client.head_object(
Bucket='blah', Key='hello.txt')
head_object['VersionId'].should.equal(latest.id)
head_object['ContentLength'].should.equal(len(new_content))
old_head_object = s3.Object('blah', 'hello.txt').meta.client.head_object(
Bucket='blah', Key='hello.txt', VersionId=oldest.id)
old_head_object['VersionId'].should.equal(oldest.id)
old_head_object['ContentLength'].should.equal(len(old_content))
old_head_object['VersionId'].should_not.equal(head_object['VersionId'])
@mock_s3
def test_boto3_copy_object_with_versioning():
client = boto3.client('s3', region_name='us-east-1')
client.create_bucket(Bucket='blah', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'})
client.put_object(Bucket='blah', Key='test1', Body=b'test1')
client.put_object(Bucket='blah', Key='test2', Body=b'test2')
obj1_version = client.get_object(Bucket='blah', Key='test1')['VersionId']
obj2_version = client.get_object(Bucket='blah', Key='test2')['VersionId']
client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test1'}, Bucket='blah', Key='test2')
obj2_version_new = client.get_object(Bucket='blah', Key='test2')['VersionId']
# Version should be different to previous version
obj2_version_new.should_not.equal(obj2_version)
client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test2', 'VersionId': obj2_version}, Bucket='blah', Key='test3')
obj3_version_new = client.get_object(Bucket='blah', Key='test3')['VersionId']
obj3_version_new.should_not.equal(obj2_version_new)
# Copy file that doesn't exist
with assert_raises(ClientError) as e:
client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test4', 'VersionId': obj2_version}, Bucket='blah', Key='test5')
e.exception.response['Error']['Code'].should.equal('404')
response = client.create_multipart_upload(Bucket='blah', Key='test4')
upload_id = response['UploadId']
response = client.upload_part_copy(Bucket='blah', Key='test4', CopySource={'Bucket': 'blah', 'Key': 'test3', 'VersionId': obj3_version_new},
UploadId=upload_id, PartNumber=1)
etag = response["CopyPartResult"]["ETag"]
client.complete_multipart_upload(
Bucket='blah', Key='test4', UploadId=upload_id,
MultipartUpload={'Parts': [{'ETag': etag, 'PartNumber': 1}]})
response = client.get_object(Bucket='blah', Key='test4')
data = response["Body"].read()
data.should.equal(b'test2')
@mock_s3
def test_boto3_copy_object_from_unversioned_to_versioned_bucket():
client = boto3.client('s3', region_name='us-east-1')
client.create_bucket(Bucket='src', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
client.create_bucket(Bucket='dest', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
client.put_bucket_versioning(Bucket='dest', VersioningConfiguration={'Status': 'Enabled'})
client.put_object(Bucket='src', Key='test', Body=b'content')
obj2_version_new = client.copy_object(CopySource={'Bucket': 'src', 'Key': 'test'}, Bucket='dest', Key='test') \
.get('VersionId')
# VersionId should be present in the response
obj2_version_new.should_not.equal(None)
@mock_s3
def test_boto3_deleted_versionings_list():
client = boto3.client('s3', region_name='us-east-1')
client.create_bucket(Bucket='blah')
client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'})
client.put_object(Bucket='blah', Key='test1', Body=b'test1')
client.put_object(Bucket='blah', Key='test2', Body=b'test2')
client.delete_objects(Bucket='blah', Delete={'Objects': [{'Key': 'test1'}]})
listed = client.list_objects_v2(Bucket='blah')
assert len(listed['Contents']) == 1
@mock_s3
def test_boto3_delete_versioned_bucket():
client = boto3.client('s3', region_name='us-east-1')
client.create_bucket(Bucket='blah')
client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'})
resp = client.put_object(Bucket='blah', Key='test1', Body=b'test1')
client.delete_object(Bucket='blah', Key='test1', VersionId=resp["VersionId"])
client.delete_bucket(Bucket='blah')
@mock_s3
def test_boto3_get_object_if_modified_since():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = "blah"
s3.create_bucket(Bucket=bucket_name)
key = 'hello.txt'
s3.put_object(
Bucket=bucket_name,
Key=key,
Body='test'
)
with assert_raises(botocore.exceptions.ClientError) as err:
s3.get_object(
Bucket=bucket_name,
Key=key,
IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1)
)
e = err.exception
e.response['Error'].should.equal({'Code': '304', 'Message': 'Not Modified'})
@mock_s3
def test_boto3_head_object_if_modified_since():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = "blah"
s3.create_bucket(Bucket=bucket_name)
key = 'hello.txt'
s3.put_object(
Bucket=bucket_name,
Key=key,
Body='test'
)
with assert_raises(botocore.exceptions.ClientError) as err:
s3.head_object(
Bucket=bucket_name,
Key=key,
IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1)
)
e = err.exception
e.response['Error'].should.equal({'Code': '304', 'Message': 'Not Modified'})
@mock_s3
@reduced_min_part_size
def test_boto3_multipart_etag():
# Create Bucket so that test can run
s3 = boto3.client('s3', region_name='us-east-1')
s3.create_bucket(Bucket='mybucket')
upload_id = s3.create_multipart_upload(
Bucket='mybucket', Key='the-key')['UploadId']
part1 = b'0' * REDUCED_PART_SIZE
etags = []
etags.append(
s3.upload_part(Bucket='mybucket', Key='the-key', PartNumber=1,
UploadId=upload_id, Body=part1)['ETag'])
# last part, can be less than 5 MB
part2 = b'1'
etags.append(
s3.upload_part(Bucket='mybucket', Key='the-key', PartNumber=2,
UploadId=upload_id, Body=part2)['ETag'])
s3.complete_multipart_upload(
Bucket='mybucket', Key='the-key', UploadId=upload_id,
MultipartUpload={'Parts': [{'ETag': etag, 'PartNumber': i}
for i, etag in enumerate(etags, 1)]})
# we should get both parts as the key contents
resp = s3.get_object(Bucket='mybucket', Key='the-key')
resp['ETag'].should.equal(EXPECTED_ETAG)
@mock_s3
@reduced_min_part_size
def test_boto3_multipart_part_size():
s3 = boto3.client('s3', region_name='us-east-1')
s3.create_bucket(Bucket='mybucket')
mpu = s3.create_multipart_upload(Bucket='mybucket', Key='the-key')
mpu_id = mpu["UploadId"]
parts = []
n_parts = 10
for i in range(1, n_parts + 1):
part_size = 5 * 1024 * 1024
body = b'1' * part_size
part = s3.upload_part(
Bucket='mybucket',
Key='the-key',
PartNumber=i,
UploadId=mpu_id,
Body=body,
ContentLength=len(body),
)
parts.append({"PartNumber": i, "ETag": part["ETag"]})
s3.complete_multipart_upload(
Bucket='mybucket',
Key='the-key',
UploadId=mpu_id,
MultipartUpload={"Parts": parts},
)
for i in range(1, n_parts + 1):
obj = s3.head_object(Bucket='mybucket', Key='the-key', PartNumber=i)
assert obj["ContentLength"] == part_size
@mock_s3
def test_boto3_put_object_with_tagging():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'mybucket'
key = 'key-with-tags'
s3.create_bucket(Bucket=bucket_name)
s3.put_object(
Bucket=bucket_name,
Key=key,
Body='test',
Tagging='foo=bar',
)
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key)
resp['TagSet'].should.contain({'Key': 'foo', 'Value': 'bar'})
@mock_s3
def test_boto3_put_bucket_tagging():
s3 = boto3.client("s3", region_name="us-east-1")
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
# With 1 tag:
resp = s3.put_bucket_tagging(Bucket=bucket_name,
Tagging={
"TagSet": [
{
"Key": "TagOne",
"Value": "ValueOne"
}
]
})
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
# With multiple tags:
resp = s3.put_bucket_tagging(Bucket=bucket_name,
Tagging={
"TagSet": [
{
"Key": "TagOne",
"Value": "ValueOne"
},
{
"Key": "TagTwo",
"Value": "ValueTwo"
}
]
})
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
# No tags is also OK:
resp = s3.put_bucket_tagging(Bucket=bucket_name, Tagging={
"TagSet": []
})
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
# With duplicate tag keys:
with assert_raises(ClientError) as err:
resp = s3.put_bucket_tagging(Bucket=bucket_name,
Tagging={
"TagSet": [
{
"Key": "TagOne",
"Value": "ValueOne"
},
{
"Key": "TagOne",
"Value": "ValueOneAgain"
}
]
})
e = err.exception
e.response["Error"]["Code"].should.equal("InvalidTag")
e.response["Error"]["Message"].should.equal("Cannot provide multiple Tags with the same key")
@mock_s3
def test_boto3_get_bucket_tagging():
s3 = boto3.client("s3", region_name="us-east-1")
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_tagging(Bucket=bucket_name,
Tagging={
"TagSet": [
{
"Key": "TagOne",
"Value": "ValueOne"
},
{
"Key": "TagTwo",
"Value": "ValueTwo"
}
]
})
# Get the tags for the bucket:
resp = s3.get_bucket_tagging(Bucket=bucket_name)
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
len(resp["TagSet"]).should.equal(2)
# With no tags:
s3.put_bucket_tagging(Bucket=bucket_name, Tagging={
"TagSet": []
})
with assert_raises(ClientError) as err:
s3.get_bucket_tagging(Bucket=bucket_name)
e = err.exception
e.response["Error"]["Code"].should.equal("NoSuchTagSet")
e.response["Error"]["Message"].should.equal("The TagSet does not exist")
@mock_s3
def test_boto3_delete_bucket_tagging():
s3 = boto3.client("s3", region_name="us-east-1")
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_tagging(Bucket=bucket_name,
Tagging={
"TagSet": [
{
"Key": "TagOne",
"Value": "ValueOne"
},
{
"Key": "TagTwo",
"Value": "ValueTwo"
}
]
})
resp = s3.delete_bucket_tagging(Bucket=bucket_name)
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(204)
with assert_raises(ClientError) as err:
s3.get_bucket_tagging(Bucket=bucket_name)
e = err.exception
e.response["Error"]["Code"].should.equal("NoSuchTagSet")
e.response["Error"]["Message"].should.equal("The TagSet does not exist")
@mock_s3
def test_boto3_put_bucket_cors():
s3 = boto3.client("s3", region_name="us-east-1")
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
resp = s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={
"CORSRules": [
{
"AllowedOrigins": [
"*"
],
"AllowedMethods": [
"GET",
"POST"
],
"AllowedHeaders": [
"Authorization"
],
"ExposeHeaders": [
"x-amz-request-id"
],
"MaxAgeSeconds": 123
},
{
"AllowedOrigins": [
"*"
],
"AllowedMethods": [
"PUT"
],
"AllowedHeaders": [
"Authorization"
],
"ExposeHeaders": [
"x-amz-request-id"
],
"MaxAgeSeconds": 123
}
]
})
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
with assert_raises(ClientError) as err:
s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={
"CORSRules": [
{
"AllowedOrigins": [
"*"
],
"AllowedMethods": [
"NOTREAL",
"POST"
]
}
]
})
e = err.exception
e.response["Error"]["Code"].should.equal("InvalidRequest")
e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. "
"Unsupported method is NOTREAL")
with assert_raises(ClientError) as err:
s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={
"CORSRules": []
})
e = err.exception
e.response["Error"]["Code"].should.equal("MalformedXML")
# And 101:
many_rules = [{"AllowedOrigins": ["*"], "AllowedMethods": ["GET"]}] * 101
with assert_raises(ClientError) as err:
s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={
"CORSRules": many_rules
})
e = err.exception
e.response["Error"]["Code"].should.equal("MalformedXML")
@mock_s3
def test_boto3_get_bucket_cors():
s3 = boto3.client("s3", region_name="us-east-1")
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
# Without CORS:
with assert_raises(ClientError) as err:
s3.get_bucket_cors(Bucket=bucket_name)
e = err.exception
e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration")
e.response["Error"]["Message"].should.equal("The CORS configuration does not exist")
s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={
"CORSRules": [
{
"AllowedOrigins": [
"*"
],
"AllowedMethods": [
"GET",
"POST"
],
"AllowedHeaders": [
"Authorization"
],
"ExposeHeaders": [
"x-amz-request-id"
],
"MaxAgeSeconds": 123
},
{
"AllowedOrigins": [
"*"
],
"AllowedMethods": [
"PUT"
],
"AllowedHeaders": [
"Authorization"
],
"ExposeHeaders": [
"x-amz-request-id"
],
"MaxAgeSeconds": 123
}
]
})
resp = s3.get_bucket_cors(Bucket=bucket_name)
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
len(resp["CORSRules"]).should.equal(2)
@mock_s3
def test_boto3_delete_bucket_cors():
s3 = boto3.client("s3", region_name="us-east-1")
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={
"CORSRules": [
{
"AllowedOrigins": [
"*"
],
"AllowedMethods": [
"GET"
]
}
]
})
resp = s3.delete_bucket_cors(Bucket=bucket_name)
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(204)
# Verify deletion:
with assert_raises(ClientError) as err:
s3.get_bucket_cors(Bucket=bucket_name)
e = err.exception
e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration")
e.response["Error"]["Message"].should.equal("The CORS configuration does not exist")
@mock_s3
def test_put_bucket_acl_body():
s3 = boto3.client("s3", region_name="us-east-1")
s3.create_bucket(Bucket="bucket")
bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"]
s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={
"Grants": [
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group"
},
"Permission": "WRITE"
},
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group"
},
"Permission": "READ_ACP"
}
],
"Owner": bucket_owner
})
result = s3.get_bucket_acl(Bucket="bucket")
assert len(result["Grants"]) == 2
for g in result["Grants"]:
assert g["Grantee"]["URI"] == "http://acs.amazonaws.com/groups/s3/LogDelivery"
assert g["Grantee"]["Type"] == "Group"
assert g["Permission"] in ["WRITE", "READ_ACP"]
# With one:
s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={
"Grants": [
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group"
},
"Permission": "WRITE"
}
],
"Owner": bucket_owner
})
result = s3.get_bucket_acl(Bucket="bucket")
assert len(result["Grants"]) == 1
# With no owner:
with assert_raises(ClientError) as err:
s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={
"Grants": [
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group"
},
"Permission": "WRITE"
}
]
})
assert err.exception.response["Error"]["Code"] == "MalformedACLError"
# With incorrect permission:
with assert_raises(ClientError) as err:
s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={
"Grants": [
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group"
},
"Permission": "lskjflkasdjflkdsjfalisdjflkdsjf"
}
],
"Owner": bucket_owner
})
assert err.exception.response["Error"]["Code"] == "MalformedACLError"
# Clear the ACLs:
result = s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={"Grants": [], "Owner": bucket_owner})
assert not result.get("Grants")
@mock_s3
def test_put_bucket_notification():
s3 = boto3.client("s3", region_name="us-east-1")
s3.create_bucket(Bucket="bucket")
# With no configuration:
result = s3.get_bucket_notification(Bucket="bucket")
assert not result.get("TopicConfigurations")
assert not result.get("QueueConfigurations")
assert not result.get("LambdaFunctionConfigurations")
# Place proper topic configuration:
s3.put_bucket_notification_configuration(Bucket="bucket",
NotificationConfiguration={
"TopicConfigurations": [
{
"TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic",
"Events": [
"s3:ObjectCreated:*",
"s3:ObjectRemoved:*"
]
},
{
"TopicArn": "arn:aws:sns:us-east-1:012345678910:myothertopic",
"Events": [
"s3:ObjectCreated:*"
],
"Filter": {
"Key": {
"FilterRules": [
{
"Name": "prefix",
"Value": "images/"
},
{
"Name": "suffix",
"Value": "png"
}
]
}
}
}
]
})
# Verify to completion:
result = s3.get_bucket_notification_configuration(Bucket="bucket")
assert len(result["TopicConfigurations"]) == 2
assert not result.get("QueueConfigurations")
assert not result.get("LambdaFunctionConfigurations")
assert result["TopicConfigurations"][0]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:mytopic"
assert result["TopicConfigurations"][1]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:myothertopic"
assert len(result["TopicConfigurations"][0]["Events"]) == 2
assert len(result["TopicConfigurations"][1]["Events"]) == 1
assert result["TopicConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*"
assert result["TopicConfigurations"][0]["Events"][1] == "s3:ObjectRemoved:*"
assert result["TopicConfigurations"][1]["Events"][0] == "s3:ObjectCreated:*"
assert result["TopicConfigurations"][0]["Id"]
assert result["TopicConfigurations"][1]["Id"]
assert not result["TopicConfigurations"][0].get("Filter")
assert len(result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"]) == 2
assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix"
assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/"
assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Name"] == "suffix"
assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Value"] == "png"
# Place proper queue configuration:
s3.put_bucket_notification_configuration(Bucket="bucket",
NotificationConfiguration={
"QueueConfigurations": [
{
"Id": "SomeID",
"QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue",
"Events": ["s3:ObjectCreated:*"],
"Filter": {
"Key": {
"FilterRules": [
{
"Name": "prefix",
"Value": "images/"
}
]
}
}
}
]
})
result = s3.get_bucket_notification_configuration(Bucket="bucket")
assert len(result["QueueConfigurations"]) == 1
assert not result.get("TopicConfigurations")
assert not result.get("LambdaFunctionConfigurations")
assert result["QueueConfigurations"][0]["Id"] == "SomeID"
assert result["QueueConfigurations"][0]["QueueArn"] == "arn:aws:sqs:us-east-1:012345678910:myQueue"
assert result["QueueConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*"
assert len(result["QueueConfigurations"][0]["Events"]) == 1
assert len(result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1
assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix"
assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/"
# Place proper Lambda configuration:
s3.put_bucket_notification_configuration(Bucket="bucket",
NotificationConfiguration={
"LambdaFunctionConfigurations": [
{
"LambdaFunctionArn":
"arn:aws:lambda:us-east-1:012345678910:function:lambda",
"Events": ["s3:ObjectCreated:*"],
"Filter": {
"Key": {
"FilterRules": [
{
"Name": "prefix",
"Value": "images/"
}
]
}
}
}
]
})
result = s3.get_bucket_notification_configuration(Bucket="bucket")
assert len(result["LambdaFunctionConfigurations"]) == 1
assert not result.get("TopicConfigurations")
assert not result.get("QueueConfigurations")
assert result["LambdaFunctionConfigurations"][0]["Id"]
assert result["LambdaFunctionConfigurations"][0]["LambdaFunctionArn"] == \
"arn:aws:lambda:us-east-1:012345678910:function:lambda"
assert result["LambdaFunctionConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*"
assert len(result["LambdaFunctionConfigurations"][0]["Events"]) == 1
assert len(result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1
assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix"
assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/"
# And with all 3 set:
s3.put_bucket_notification_configuration(Bucket="bucket",
NotificationConfiguration={
"TopicConfigurations": [
{
"TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic",
"Events": [
"s3:ObjectCreated:*",
"s3:ObjectRemoved:*"
]
}
],
"LambdaFunctionConfigurations": [
{
"LambdaFunctionArn":
"arn:aws:lambda:us-east-1:012345678910:function:lambda",
"Events": ["s3:ObjectCreated:*"]
}
],
"QueueConfigurations": [
{
"QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue",
"Events": ["s3:ObjectCreated:*"]
}
]
})
result = s3.get_bucket_notification_configuration(Bucket="bucket")
assert len(result["LambdaFunctionConfigurations"]) == 1
assert len(result["TopicConfigurations"]) == 1
assert len(result["QueueConfigurations"]) == 1
# And clear it out:
s3.put_bucket_notification_configuration(Bucket="bucket", NotificationConfiguration={})
result = s3.get_bucket_notification_configuration(Bucket="bucket")
assert not result.get("TopicConfigurations")
assert not result.get("QueueConfigurations")
assert not result.get("LambdaFunctionConfigurations")
@mock_s3
def test_put_bucket_notification_errors():
s3 = boto3.client("s3", region_name="us-east-1")
s3.create_bucket(Bucket="bucket")
# With incorrect ARNs:
for tech, arn in [("Queue", "sqs"), ("Topic", "sns"), ("LambdaFunction", "lambda")]:
with assert_raises(ClientError) as err:
s3.put_bucket_notification_configuration(Bucket="bucket",
NotificationConfiguration={
"{}Configurations".format(tech): [
{
"{}Arn".format(tech):
"arn:aws:{}:us-east-1:012345678910:lksajdfkldskfj",
"Events": ["s3:ObjectCreated:*"]
}
]
})
assert err.exception.response["Error"]["Code"] == "InvalidArgument"
assert err.exception.response["Error"]["Message"] == "The ARN is not well formed"
# Region not the same as the bucket:
with assert_raises(ClientError) as err:
s3.put_bucket_notification_configuration(Bucket="bucket",
NotificationConfiguration={
"QueueConfigurations": [
{
"QueueArn":
"arn:aws:sqs:us-west-2:012345678910:lksajdfkldskfj",
"Events": ["s3:ObjectCreated:*"]
}
]
})
assert err.exception.response["Error"]["Code"] == "InvalidArgument"
assert err.exception.response["Error"]["Message"] == \
"The notification destination service region is not valid for the bucket location constraint"
# Invalid event name:
with assert_raises(ClientError) as err:
s3.put_bucket_notification_configuration(Bucket="bucket",
NotificationConfiguration={
"QueueConfigurations": [
{
"QueueArn":
"arn:aws:sqs:us-east-1:012345678910:lksajdfkldskfj",
"Events": ["notarealeventname"]
}
]
})
assert err.exception.response["Error"]["Code"] == "InvalidArgument"
assert err.exception.response["Error"]["Message"] == "The event is not supported for notifications"
@mock_s3
def test_boto3_put_bucket_logging():
s3 = boto3.client("s3", region_name="us-east-1")
bucket_name = "mybucket"
log_bucket = "logbucket"
wrong_region_bucket = "wrongregionlogbucket"
s3.create_bucket(Bucket=bucket_name)
s3.create_bucket(Bucket=log_bucket) # Adding the ACL for log-delivery later...
s3.create_bucket(Bucket=wrong_region_bucket, CreateBucketConfiguration={"LocationConstraint": "us-west-2"})
# No logging config:
result = s3.get_bucket_logging(Bucket=bucket_name)
assert not result.get("LoggingEnabled")
# A log-bucket that doesn't exist:
with assert_raises(ClientError) as err:
s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": "IAMNOTREAL",
"TargetPrefix": ""
}
})
assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging"
# A log-bucket that's missing the proper ACLs for LogDelivery:
with assert_raises(ClientError) as err:
s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": log_bucket,
"TargetPrefix": ""
}
})
assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging"
assert "log-delivery" in err.exception.response["Error"]["Message"]
# Add the proper "log-delivery" ACL to the log buckets:
bucket_owner = s3.get_bucket_acl(Bucket=log_bucket)["Owner"]
for bucket in [log_bucket, wrong_region_bucket]:
s3.put_bucket_acl(Bucket=bucket, AccessControlPolicy={
"Grants": [
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group"
},
"Permission": "WRITE"
},
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group"
},
"Permission": "READ_ACP"
},
{
"Grantee": {
"Type": "CanonicalUser",
"ID": bucket_owner["ID"]
},
"Permission": "FULL_CONTROL"
}
],
"Owner": bucket_owner
})
# A log-bucket that's in the wrong region:
with assert_raises(ClientError) as err:
s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": wrong_region_bucket,
"TargetPrefix": ""
}
})
assert err.exception.response["Error"]["Code"] == "CrossLocationLoggingProhibitted"
# Correct logging:
s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": log_bucket,
"TargetPrefix": "{}/".format(bucket_name)
}
})
result = s3.get_bucket_logging(Bucket=bucket_name)
assert result["LoggingEnabled"]["TargetBucket"] == log_bucket
assert result["LoggingEnabled"]["TargetPrefix"] == "{}/".format(bucket_name)
assert not result["LoggingEnabled"].get("TargetGrants")
# And disabling:
s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={})
assert not s3.get_bucket_logging(Bucket=bucket_name).get("LoggingEnabled")
# And enabling with multiple target grants:
s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": log_bucket,
"TargetPrefix": "{}/".format(bucket_name),
"TargetGrants": [
{
"Grantee": {
"ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274",
"Type": "CanonicalUser"
},
"Permission": "READ"
},
{
"Grantee": {
"ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274",
"Type": "CanonicalUser"
},
"Permission": "WRITE"
}
]
}
})
result = s3.get_bucket_logging(Bucket=bucket_name)
assert len(result["LoggingEnabled"]["TargetGrants"]) == 2
assert result["LoggingEnabled"]["TargetGrants"][0]["Grantee"]["ID"] == \
"SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274"
# Test with just 1 grant:
s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": log_bucket,
"TargetPrefix": "{}/".format(bucket_name),
"TargetGrants": [
{
"Grantee": {
"ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274",
"Type": "CanonicalUser"
},
"Permission": "READ"
}
]
}
})
result = s3.get_bucket_logging(Bucket=bucket_name)
assert len(result["LoggingEnabled"]["TargetGrants"]) == 1
# With an invalid grant:
with assert_raises(ClientError) as err:
s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": log_bucket,
"TargetPrefix": "{}/".format(bucket_name),
"TargetGrants": [
{
"Grantee": {
"ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274",
"Type": "CanonicalUser"
},
"Permission": "NOTAREALPERM"
}
]
}
})
assert err.exception.response["Error"]["Code"] == "MalformedXML"
@mock_s3
def test_boto3_put_object_tagging():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'mybucket'
key = 'key-with-tags'
s3.create_bucket(Bucket=bucket_name)
with assert_raises(ClientError) as err:
s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={'TagSet': [
{'Key': 'item1', 'Value': 'foo'},
{'Key': 'item2', 'Value': 'bar'},
]}
)
e = err.exception
e.response['Error'].should.equal({
'Code': 'NoSuchKey',
'Message': 'The specified key does not exist.',
'RequestID': '7a62c49f-347e-4fc4-9331-6e8eEXAMPLE',
})
s3.put_object(
Bucket=bucket_name,
Key=key,
Body='test'
)
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={'TagSet': [
{'Key': 'item1', 'Value': 'foo'},
{'Key': 'item2', 'Value': 'bar'},
]}
)
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
@mock_s3
def test_boto3_put_object_tagging_with_single_tag():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'mybucket'
key = 'key-with-tags'
s3.create_bucket(Bucket=bucket_name)
s3.put_object(
Bucket=bucket_name,
Key=key,
Body='test'
)
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={'TagSet': [
{'Key': 'item1', 'Value': 'foo'}
]}
)
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
@mock_s3
def test_boto3_get_object_tagging():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'mybucket'
key = 'key-with-tags'
s3.create_bucket(Bucket=bucket_name)
s3.put_object(
Bucket=bucket_name,
Key=key,
Body='test'
)
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key)
resp['TagSet'].should.have.length_of(0)
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={'TagSet': [
{'Key': 'item1', 'Value': 'foo'},
{'Key': 'item2', 'Value': 'bar'},
]}
)
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key)
resp['TagSet'].should.have.length_of(2)
resp['TagSet'].should.contain({'Key': 'item1', 'Value': 'foo'})
resp['TagSet'].should.contain({'Key': 'item2', 'Value': 'bar'})
@mock_s3
def test_boto3_list_object_versions():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'mybucket'
key = 'key-with-versions'
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration={
'Status': 'Enabled'
}
)
items = (six.b('v1'), six.b('v2'))
for body in items:
s3.put_object(
Bucket=bucket_name,
Key=key,
Body=body
)
response = s3.list_object_versions(
Bucket=bucket_name
)
# Two object versions should be returned
len(response['Versions']).should.equal(2)
keys = set([item['Key'] for item in response['Versions']])
keys.should.equal({key})
# Test latest object version is returned
response = s3.get_object(Bucket=bucket_name, Key=key)
response['Body'].read().should.equal(items[-1])
@mock_s3
def test_boto3_list_object_versions_with_versioning_disabled():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'mybucket'
key = 'key-with-versions'
s3.create_bucket(Bucket=bucket_name)
items = (six.b('v1'), six.b('v2'))
for body in items:
s3.put_object(
Bucket=bucket_name,
Key=key,
Body=body
)
response = s3.list_object_versions(
Bucket=bucket_name
)
# One object version should be returned
len(response['Versions']).should.equal(1)
response['Versions'][0]['Key'].should.equal(key)
# The version id should be the string null
response['Versions'][0]['VersionId'].should.equal('null')
# Test latest object version is returned
response = s3.get_object(Bucket=bucket_name, Key=key)
response['Body'].read().should.equal(items[-1])
@mock_s3
def test_boto3_list_object_versions_with_versioning_enabled_late():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'mybucket'
key = 'key-with-versions'
s3.create_bucket(Bucket=bucket_name)
items = (six.b('v1'), six.b('v2'))
s3.put_object(
Bucket=bucket_name,
Key=key,
Body=six.b('v1')
)
s3.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration={
'Status': 'Enabled'
}
)
s3.put_object(
Bucket=bucket_name,
Key=key,
Body=six.b('v2')
)
response = s3.list_object_versions(
Bucket=bucket_name
)
# Two object versions should be returned
len(response['Versions']).should.equal(2)
keys = set([item['Key'] for item in response['Versions']])
keys.should.equal({key})
# There should still be a null version id.
versionsId = set([item['VersionId'] for item in response['Versions']])
versionsId.should.contain('null')
# Test latest object version is returned
response = s3.get_object(Bucket=bucket_name, Key=key)
response['Body'].read().should.equal(items[-1])
@mock_s3
def test_boto3_bad_prefix_list_object_versions():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'mybucket'
key = 'key-with-versions'
bad_prefix = 'key-that-does-not-exist'
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration={
'Status': 'Enabled'
}
)
items = (six.b('v1'), six.b('v2'))
for body in items:
s3.put_object(
Bucket=bucket_name,
Key=key,
Body=body
)
response = s3.list_object_versions(
Bucket=bucket_name,
Prefix=bad_prefix,
)
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response.should_not.contain('Versions')
response.should_not.contain('DeleteMarkers')
@mock_s3
def test_boto3_delete_markers():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'mybucket'
key = u'key-with-versions-and-unicode-ó'
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration={
'Status': 'Enabled'
}
)
items = (six.b('v1'), six.b('v2'))
for body in items:
s3.put_object(
Bucket=bucket_name,
Key=key,
Body=body
)
s3.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]})
with assert_raises(ClientError) as e:
s3.get_object(
Bucket=bucket_name,
Key=key
)
e.exception.response['Error']['Code'].should.equal('NoSuchKey')
response = s3.list_object_versions(
Bucket=bucket_name
)
response['Versions'].should.have.length_of(2)
response['DeleteMarkers'].should.have.length_of(1)
s3.delete_object(
Bucket=bucket_name,
Key=key,
VersionId=response['DeleteMarkers'][0]['VersionId']
)
response = s3.get_object(
Bucket=bucket_name,
Key=key
)
response['Body'].read().should.equal(items[-1])
response = s3.list_object_versions(
Bucket=bucket_name
)
response['Versions'].should.have.length_of(2)
# We've asserted there is only 2 records so one is newest, one is oldest
latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0]
oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0]
# Double check ordering of version ID's
latest['VersionId'].should_not.equal(oldest['VersionId'])
# Double check the name is still unicode
latest['Key'].should.equal('key-with-versions-and-unicode-ó')
oldest['Key'].should.equal('key-with-versions-and-unicode-ó')
@mock_s3
def test_boto3_multiple_delete_markers():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'mybucket'
key = u'key-with-versions-and-unicode-ó'
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration={
'Status': 'Enabled'
}
)
items = (six.b('v1'), six.b('v2'))
for body in items:
s3.put_object(
Bucket=bucket_name,
Key=key,
Body=body
)
# Delete the object twice to add multiple delete markers
s3.delete_object(Bucket=bucket_name, Key=key)
s3.delete_object(Bucket=bucket_name, Key=key)
response = s3.list_object_versions(Bucket=bucket_name)
response['DeleteMarkers'].should.have.length_of(2)
with assert_raises(ClientError) as e:
s3.get_object(
Bucket=bucket_name,
Key=key
)
e.response['Error']['Code'].should.equal('404')
# Remove both delete markers to restore the object
s3.delete_object(
Bucket=bucket_name,
Key=key,
VersionId=response['DeleteMarkers'][0]['VersionId']
)
s3.delete_object(
Bucket=bucket_name,
Key=key,
VersionId=response['DeleteMarkers'][1]['VersionId']
)
response = s3.get_object(
Bucket=bucket_name,
Key=key
)
response['Body'].read().should.equal(items[-1])
response = s3.list_object_versions(Bucket=bucket_name)
response['Versions'].should.have.length_of(2)
# We've asserted there is only 2 records so one is newest, one is oldest
latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0]
oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0]
# Double check ordering of version ID's
latest['VersionId'].should_not.equal(oldest['VersionId'])
# Double check the name is still unicode
latest['Key'].should.equal('key-with-versions-and-unicode-ó')
oldest['Key'].should.equal('key-with-versions-and-unicode-ó')
@mock_s3
def test_get_stream_gzipped():
payload = b"this is some stuff here"
s3_client = boto3.client("s3", region_name='us-east-1')
s3_client.create_bucket(Bucket='moto-tests')
buffer_ = BytesIO()
with GzipFile(fileobj=buffer_, mode='w') as f:
f.write(payload)
payload_gz = buffer_.getvalue()
s3_client.put_object(
Bucket='moto-tests',
Key='keyname',
Body=payload_gz,
ContentEncoding='gzip',
)
obj = s3_client.get_object(
Bucket='moto-tests',
Key='keyname',
)
res = zlib.decompress(obj['Body'].read(), 16 + zlib.MAX_WBITS)
assert res == payload
TEST_XML = """\
<?xml version="1.0" encoding="UTF-8"?>
<ns0:WebsiteConfiguration xmlns:ns0="http://s3.amazonaws.com/doc/2006-03-01/">
<ns0:IndexDocument>
<ns0:Suffix>index.html</ns0:Suffix>
</ns0:IndexDocument>
<ns0:RoutingRules>
<ns0:RoutingRule>
<ns0:Condition>
<ns0:KeyPrefixEquals>test/testing</ns0:KeyPrefixEquals>
</ns0:Condition>
<ns0:Redirect>
<ns0:ReplaceKeyWith>test.txt</ns0:ReplaceKeyWith>
</ns0:Redirect>
</ns0:RoutingRule>
</ns0:RoutingRules>
</ns0:WebsiteConfiguration>
"""
@mock_s3
def test_boto3_bucket_name_too_long():
s3 = boto3.client('s3', region_name='us-east-1')
with assert_raises(ClientError) as exc:
s3.create_bucket(Bucket='x'*64)
exc.exception.response['Error']['Code'].should.equal('InvalidBucketName')
@mock_s3
def test_boto3_bucket_name_too_short():
s3 = boto3.client('s3', region_name='us-east-1')
with assert_raises(ClientError) as exc:
s3.create_bucket(Bucket='x'*2)
exc.exception.response['Error']['Code'].should.equal('InvalidBucketName')
@mock_s3
def test_accelerated_none_when_unspecified():
bucket_name = 'some_bucket'
s3 = boto3.client('s3')
s3.create_bucket(Bucket=bucket_name)
resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name)
resp.shouldnt.have.key('Status')
@mock_s3
def test_can_enable_bucket_acceleration():
bucket_name = 'some_bucket'
s3 = boto3.client('s3')
s3.create_bucket(Bucket=bucket_name)
resp = s3.put_bucket_accelerate_configuration(
Bucket=bucket_name,
AccelerateConfiguration={'Status': 'Enabled'},
)
resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers)
resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name)
resp.should.have.key('Status')
resp['Status'].should.equal('Enabled')
@mock_s3
def test_can_suspend_bucket_acceleration():
bucket_name = 'some_bucket'
s3 = boto3.client('s3')
s3.create_bucket(Bucket=bucket_name)
resp = s3.put_bucket_accelerate_configuration(
Bucket=bucket_name,
AccelerateConfiguration={'Status': 'Enabled'},
)
resp = s3.put_bucket_accelerate_configuration(
Bucket=bucket_name,
AccelerateConfiguration={'Status': 'Suspended'},
)
resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers)
resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name)
resp.should.have.key('Status')
resp['Status'].should.equal('Suspended')
@mock_s3
def test_suspending_acceleration_on_not_configured_bucket_does_nothing():
bucket_name = 'some_bucket'
s3 = boto3.client('s3')
s3.create_bucket(Bucket=bucket_name)
resp = s3.put_bucket_accelerate_configuration(
Bucket=bucket_name,
AccelerateConfiguration={'Status': 'Suspended'},
)
resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers)
resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name)
resp.shouldnt.have.key('Status')
@mock_s3
def test_accelerate_configuration_status_validation():
bucket_name = 'some_bucket'
s3 = boto3.client('s3')
s3.create_bucket(Bucket=bucket_name)
with assert_raises(ClientError) as exc:
s3.put_bucket_accelerate_configuration(
Bucket=bucket_name,
AccelerateConfiguration={'Status': 'bad_status'},
)
exc.exception.response['Error']['Code'].should.equal('MalformedXML')
@mock_s3
def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots():
bucket_name = 'some.bucket.with.dots'
s3 = boto3.client('s3')
s3.create_bucket(Bucket=bucket_name)
with assert_raises(ClientError) as exc:
s3.put_bucket_accelerate_configuration(
Bucket=bucket_name,
AccelerateConfiguration={'Status': 'Enabled'},
)
exc.exception.response['Error']['Code'].should.equal('InvalidRequest')
| 35.081852
| 144
| 0.594307
|
87f1fe5256280adb430ea2f4bd1ca062c5e49d47
| 5,267
|
py
|
Python
|
arch/api/utils/upload.py
|
chanzhennan/FATE
|
26c4285146e249f7746571011c6f9326c5845a57
|
[
"Apache-2.0"
] | 1
|
2019-07-29T13:22:36.000Z
|
2019-07-29T13:22:36.000Z
|
arch/api/utils/upload.py
|
chanzhennan/FATE
|
26c4285146e249f7746571011c6f9326c5845a57
|
[
"Apache-2.0"
] | null | null | null |
arch/api/utils/upload.py
|
chanzhennan/FATE
|
26c4285146e249f7746571011c6f9326c5845a57
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import argparse
import os
import traceback
import csv
import sys
import time
from arch.api import eggroll
from arch.api.storage import save_data
CSV = 'csv'
LOAD_DATA_COUNT = 10000
MAX_PARTITION_NUM = 1024
def list_to_str(input_list):
str1 = ''
size = len(input_list)
for i in range(size):
if i == size - 1:
str1 += str(input_list[i])
else:
str1 += str(input_list[i]) + ','
return str1
def read_data(input_file='', head=True):
split_file_name = input_file.split('.')
if CSV in split_file_name:
with open(input_file) as csv_file:
csv_reader = csv.reader(csv_file)
if head is True:
csv_head = next(csv_reader)
for row in csv_reader:
yield (row[0], list_to_str(row[1:]))
else:
with open(input_file, 'r') as fin:
if head is True:
head = fin.readline()
lines = fin.readlines()
for line in lines:
values = line.replace("\n", "").replace("\t", ",").split(",")
yield (values[0], list_to_str(values[1:]))
def generate_table_name(input_file_path):
local_time = time.localtime(time.time())
str_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
file_name = input_file_path.split(".")[0]
file_name = file_name.split("/")[-1]
return file_name,str_time
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', required=False, type=str, help="you should provide a path of configure file with json format")
try:
args = parser.parse_args()
if not args.config:
print("Can not find the parameter -c")
sys.exit()
job_config = {}
try:
args.config = os.path.abspath(args.config)
input_file_path = None
head = True
table_name = None
namespace = None
with open(args.config, 'r') as f:
job_config = json.load(f)
try:
input_file_path = job_config['file']
except:
traceback.print_exc()
try:
read_head = job_config['head']
if read_head == 0:
head = False
elif read_head == 1:
head = True
except:
print("'head' in .json should be 0 or 1, set head to 1")
try:
partition = job_config['partition']
if partition <= 0 or partition > MAX_PARTITION_NUM:
print("Error number of partition, it should between %d and %d" %(0, MAX_PARTITION_NUM))
sys.exit()
except:
print("set partition to 1")
partition = 1
try:
table_name = job_config['table_name']
except:
print("not setting table_name or setting error, set table_name according to current time")
try:
namespace = job_config['namespace']
except:
print("not setting namespace or setting error, set namespace according to input file name")
work_mode = job_config.get('work_mode')
if work_mode is None:
work_mode = 0
if not os.path.exists(input_file_path):
print("%s is not exist, please check the configure" % (input_file_path))
sys.exit()
input_data = read_data(input_file_path, head)
_namespace, _table_name = generate_table_name(input_file_path)
if namespace is None:
namespace = _namespace
if table_name is None:
table_name = _table_name
eggroll.init(mode=work_mode)
data_table = save_data(input_data, name=table_name, namespace=namespace, partition=partition)
print("------------load data finish!-----------------")
print("file: {}".format(input_file_path))
print("total data_count: {}".format(data_table.count()))
print("table name: {}, table namespace: {}".format(table_name, namespace))
except ValueError:
print('json parse error')
exit(-102)
except IOError:
print('read file error')
exit(-103)
except:
traceback.print_exc()
| 34.424837
| 136
| 0.550598
|
0b6ec0e554f2e765d0bb81952bf59aed2071456b
| 4,152
|
py
|
Python
|
audio_part.py
|
Mridul9451/Proctoring
|
5615ae3191449d302b0d4c42965a665bcc81d407
|
[
"MIT"
] | 1
|
2021-07-23T05:58:36.000Z
|
2021-07-23T05:58:36.000Z
|
audio_part.py
|
Mridul9451/Proctoring
|
5615ae3191449d302b0d4c42965a665bcc81d407
|
[
"MIT"
] | null | null | null |
audio_part.py
|
Mridul9451/Proctoring
|
5615ae3191449d302b0d4c42965a665bcc81d407
|
[
"MIT"
] | null | null | null |
import speech_recognition as sr
import pyaudio
import wave
import time
import threading
import os
def read_audio(stream, filename):
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100 # Record at 44100 samples per second
seconds = 10 # Number of seconds to record at once
filename = filename
frames = [] # Initialize array to store frames
for i in range(0, int(fs / chunk * seconds)):
data = stream.read(chunk)
frames.append(data)
# Save the recorded data as a WAV file
wf = wave.open(filename, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(sample_format))
wf.setframerate(fs)
wf.writeframes(b''.join(frames))
wf.close()
# Stop and close the stream
stream.stop_stream()
stream.close()
def convert(i):
if i >= 0:
sound = 'record' + str(i) +'.wav'
r = sr.Recognizer()
with sr.AudioFile(sound) as source:
r.adjust_for_ambient_noise(source)
print("Converting Audio To Text and saving to file..... ")
audio = r.listen(source)
try:
value = r.recognize_google(audio) ##### API call to google for speech recognition
os.remove(sound)
if str is bytes:
result = u"{}".format(value).encode("utf-8")
else:
result = "{}".format(value)
with open("test.txt","a") as f:
f.write(result)
f.write(" ")
f.close()
except sr.UnknownValueError:
print("")
except sr.RequestError as e:
print("{0}".format(e))
except KeyboardInterrupt:
pass
p = pyaudio.PyAudio() # Create an interface to PortAudio
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100
def save_audios(i):
stream = p.open(format=sample_format,channels=channels,rate=fs,
frames_per_buffer=chunk,input=True)
filename = 'record'+str(i)+'.wav'
read_audio(stream, filename)
for i in range(30//10): # Number of total seconds to record/ Number of seconds per recording
t1 = threading.Thread(target=save_audios, args=[i])
x = i-1
t2 = threading.Thread(target=convert, args=[x]) # send one earlier than being recorded
t1.start()
t2.start()
t1.join()
t2.join()
if i==2:
flag = True
if flag:
convert(i)
p.terminate()
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
file = open("test.txt") ## Student speech file
data = file.read()
file.close()
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(data) ######### tokenizing sentence
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens: ####### Removing stop words
if w not in stop_words:
filtered_sentence.append(w)
####### creating a final file
f=open('final.txt','w')
for ele in filtered_sentence:
f.write(ele+' ')
f.close()
##### checking whether proctor needs to be alerted or not
file = open("paper.txt") ## Question file
data = file.read()
file.close()
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(data) ######### tokenizing sentence
filtered_questions = [w for w in word_tokens if not w in stop_words]
filtered_questions = []
for w in word_tokens: ####### Removing stop words
if w not in stop_words:
filtered_questions.append(w)
def common_member(a, b):
a_set = set(a)
b_set = set(b)
# check length
if len(a_set.intersection(b_set)) > 0:
return(a_set.intersection(b_set))
else:
return([])
comm = common_member(filtered_questions, filtered_sentence)
print('Number of common elements:', len(comm))
print(comm)
| 30.755556
| 94
| 0.601879
|
a89b87c86466700a9e157285d1762b71af0639ef
| 11,422
|
py
|
Python
|
map_merge/launch/tb3_simulation/multi_tb3_simulation_launch.py
|
LuisLechugaRuiz/m-explore-ros2
|
f808bc404a35fb47569b2907caac2338f943deca
|
[
"BSD-3-Clause"
] | 28
|
2021-08-04T06:00:03.000Z
|
2022-03-17T18:14:42.000Z
|
map_merge/launch/tb3_simulation/multi_tb3_simulation_launch.py
|
LuisLechugaRuiz/m-explore-ros2
|
f808bc404a35fb47569b2907caac2338f943deca
|
[
"BSD-3-Clause"
] | 8
|
2021-11-09T11:33:40.000Z
|
2022-02-24T21:54:56.000Z
|
map_merge/launch/tb3_simulation/multi_tb3_simulation_launch.py
|
LuisLechugaRuiz/m-explore-ros2
|
f808bc404a35fb47569b2907caac2338f943deca
|
[
"BSD-3-Clause"
] | 4
|
2021-11-09T11:30:19.000Z
|
2022-03-12T05:30:09.000Z
|
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example for spawing multiple robots in Gazebo.
This is an example on how to create a launch file for spawning multiple robots into Gazebo
and launch multiple instances of the navigation stack, each controlling one robot.
The robots co-exist on a shared environment and are controlled by independent nav stacks
"""
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription, condition
from launch.actions import (
DeclareLaunchArgument,
ExecuteProcess,
GroupAction,
IncludeLaunchDescription,
LogInfo,
)
from launch.conditions import IfCondition, UnlessCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, TextSubstitution
def generate_launch_description():
# Get the launch directory
bringup_dir = get_package_share_directory("nav2_bringup")
launch_dir = os.path.join(bringup_dir, "launch")
# Get the launch directory for multirobot_map_merge where we have a modified launch files
map_merge_dir = get_package_share_directory("multirobot_map_merge")
launch_dir_map_merge = os.path.join(map_merge_dir, "launch", "tb3_simulation")
# Names and poses of the robots for known poses demo
robots_known_poses = [
{"name": "robot1", "x_pose": 0.0, "y_pose": 0.5, "z_pose": 0.01},
{"name": "robot2", "x_pose": -3.0, "y_pose": 1.5, "z_pose": 0.01},
]
# Names and poses of the robots for unknown poses demo, the must be very close at beggining
robots_unknown_poses = [
{"name": "robot1", "x_pose": -2.0, "y_pose": 0.5, "z_pose": 0.01},
{"name": "robot2", "x_pose": -3.0, "y_pose": 0.5, "z_pose": 0.01},
]
# Simulation settings
world = LaunchConfiguration("world")
simulator = LaunchConfiguration("simulator")
# On this example all robots are launched with the same settings
map_yaml_file = LaunchConfiguration("map")
autostart = LaunchConfiguration("autostart")
rviz_config_file = LaunchConfiguration("rviz_config")
use_robot_state_pub = LaunchConfiguration("use_robot_state_pub")
use_rviz = LaunchConfiguration("use_rviz")
log_settings = LaunchConfiguration("log_settings", default="true")
known_init_poses = LaunchConfiguration("known_init_poses")
declare_known_init_poses_cmd = DeclareLaunchArgument(
"known_init_poses",
default_value="True",
description="Known initial poses of the robots. If so don't forget to declare them in the params.yaml file",
)
# Declare the launch arguments
declare_world_cmd = DeclareLaunchArgument(
"world",
default_value=os.path.join(launch_dir_map_merge, "worlds", "world_only.model"),
description="Full path to world file to load",
)
declare_simulator_cmd = DeclareLaunchArgument(
"simulator",
default_value="gazebo",
description="The simulator to use (gazebo or gzserver)",
)
declare_map_yaml_cmd = DeclareLaunchArgument(
"map",
default_value=os.path.join(bringup_dir, "maps", "turtlebot3_world.yaml"),
description="Full path to map file to load",
)
declare_robot1_params_file_cmd = DeclareLaunchArgument(
"robot1_params_file",
default_value=os.path.join(
launch_dir_map_merge, "config", "nav2_multirobot_params_1.yaml"
),
description="Full path to the ROS2 parameters file to use for robot1 launched nodes",
)
declare_robot2_params_file_cmd = DeclareLaunchArgument(
"robot2_params_file",
default_value=os.path.join(
launch_dir_map_merge, "config", "nav2_multirobot_params_2.yaml"
),
description="Full path to the ROS2 parameters file to use for robot2 launched nodes",
)
declare_autostart_cmd = DeclareLaunchArgument(
"autostart",
default_value="true",
description="Automatically startup the stacks",
)
declare_rviz_config_file_cmd = DeclareLaunchArgument(
"rviz_config",
default_value=os.path.join(bringup_dir, "rviz", "nav2_namespaced_view.rviz"),
description="Full path to the RVIZ config file to use.",
)
declare_use_robot_state_pub_cmd = DeclareLaunchArgument(
"use_robot_state_pub",
default_value="True",
description="Whether to start the robot state publisher",
)
declare_use_rviz_cmd = DeclareLaunchArgument(
"use_rviz", default_value="True", description="Whether to start RVIZ"
)
slam_toolbox = LaunchConfiguration("slam_toolbox")
slam_gmapping = LaunchConfiguration("slam_gmapping")
declare_slam_toolbox_cmd = DeclareLaunchArgument(
"slam_toolbox", default_value="False", description="Whether run a SLAM toolbox"
)
declare_slam_gmapping_cmd = DeclareLaunchArgument(
"slam_gmapping",
default_value="False",
description="Whether run a SLAM gmapping",
)
# Start Gazebo with plugin providing the robot spawing service
start_gazebo_cmd = ExecuteProcess(
cmd=[
simulator,
"--verbose",
"-s",
"libgazebo_ros_init.so",
"-s",
"libgazebo_ros_factory.so",
world,
],
output="screen",
)
# Define commands for spawing the robots into Gazebo
spawn_robots_cmds = []
for robot_known, robot_unknown in zip(robots_known_poses, robots_unknown_poses):
spawn_robots_cmds.append(
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(bringup_dir, "launch", "spawn_tb3_launch.py")
),
launch_arguments={
"x_pose": TextSubstitution(text=str(robot_known["x_pose"])),
"y_pose": TextSubstitution(text=str(robot_known["y_pose"])),
"z_pose": TextSubstitution(text=str(robot_known["z_pose"])),
"robot_name": robot_known["name"],
"turtlebot_type": TextSubstitution(text="waffle"),
}.items(),
condition=IfCondition(known_init_poses),
)
)
spawn_robots_cmds.append(
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(bringup_dir, "launch", "spawn_tb3_launch.py")
),
launch_arguments={
"x_pose": TextSubstitution(text=str(robot_unknown["x_pose"])),
"y_pose": TextSubstitution(text=str(robot_unknown["y_pose"])),
"z_pose": TextSubstitution(text=str(robot_unknown["z_pose"])),
"robot_name": robot_unknown["name"],
"turtlebot_type": TextSubstitution(text="waffle"),
}.items(),
condition=UnlessCondition(known_init_poses),
)
)
# Define commands for launching the navigation instances
nav_instances_cmds = []
for robot in robots_known_poses:
params_file = LaunchConfiguration(f"{robot['name']}_params_file")
group = GroupAction(
[
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(launch_dir, "rviz_launch.py")
),
condition=IfCondition(use_rviz),
launch_arguments={
"namespace": TextSubstitution(text=robot["name"]),
"use_namespace": "True",
"rviz_config": rviz_config_file,
}.items(),
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(launch_dir_map_merge, "tb3_simulation_launch.py")
),
launch_arguments={
"namespace": robot["name"],
"use_namespace": "True",
"map": map_yaml_file,
"use_sim_time": "True",
"params_file": params_file,
"autostart": autostart,
"use_rviz": "False",
"use_simulator": "False",
"headless": "False",
"slam": "True",
"slam_toolbox": slam_toolbox,
"slam_gmapping": slam_gmapping,
"use_robot_state_pub": use_robot_state_pub,
}.items(),
),
LogInfo(
condition=IfCondition(log_settings),
msg=["Launching ", robot["name"]],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " map yaml: ", map_yaml_file],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " params yaml: ", params_file],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " rviz config file: ", rviz_config_file],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[
robot["name"],
" using robot state pub: ",
use_robot_state_pub,
],
),
LogInfo(
condition=IfCondition(log_settings),
msg=[robot["name"], " autostart: ", autostart],
),
]
)
nav_instances_cmds.append(group)
# Create the launch description and populate
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_simulator_cmd)
ld.add_action(declare_world_cmd)
ld.add_action(declare_map_yaml_cmd)
ld.add_action(declare_robot1_params_file_cmd)
ld.add_action(declare_robot2_params_file_cmd)
ld.add_action(declare_use_rviz_cmd)
ld.add_action(declare_autostart_cmd)
ld.add_action(declare_rviz_config_file_cmd)
ld.add_action(declare_use_robot_state_pub_cmd)
ld.add_action(declare_slam_toolbox_cmd)
ld.add_action(declare_slam_gmapping_cmd)
ld.add_action(declare_known_init_poses_cmd)
# Add the actions to start gazebo, robots and simulations
ld.add_action(start_gazebo_cmd)
for spawn_robot_cmd in spawn_robots_cmds:
ld.add_action(spawn_robot_cmd)
for simulation_instance_cmd in nav_instances_cmds:
ld.add_action(simulation_instance_cmd)
return ld
| 38.85034
| 116
| 0.616354
|
818a4e8c8eb20798bf419df35f38eff5ed222e7b
| 4,463
|
py
|
Python
|
.history/instagram/settings_20201122035354.py
|
Nyash-Mauro/Instagram-clone
|
39f3b660b3bdd59996fa88816712c906d0e3fba5
|
[
"MIT"
] | null | null | null |
.history/instagram/settings_20201122035354.py
|
Nyash-Mauro/Instagram-clone
|
39f3b660b3bdd59996fa88816712c906d0e3fba5
|
[
"MIT"
] | null | null | null |
.history/instagram/settings_20201122035354.py
|
Nyash-Mauro/Instagram-clone
|
39f3b660b3bdd59996fa88816712c906d0e3fba5
|
[
"MIT"
] | null | null | null |
"""
Django settings for instagram project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
import django_heroku
import dj_database_url
from decouple import config, Csv
from pathlib import Path
import cloudinary
import cloudinary.uploader
import cloudinary.api
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#ne#dd-1kjq%d%n8+)7c43n^#ijlnwy8+mq$@)8oq3z%a_)3xy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# 'tinymce',
# 'vote',
# 'gram',
# 'cloudinary',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'instagram.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'instagram.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
cloudinary.config(
cloud_name="dapwcit3i",
api_key="917726294659896",
api_secret="PeLRKhRoA2E-r-5ykRTpuEBNcH4"
)
MODE = config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE') == "dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
django_heroku.settings(locals())
SENDGRID_API_KEY = os.getenv('SENDGRID_API_KEY')
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = '' # this is exactly the value 'apikey'
EMAIL_HOST_PASSWORD = SENDGRID_API_KEY
EMAIL_PORT = 587
EMAIL_USE_TLS = True
| 25.797688
| 91
| 0.691015
|
e9b9f23254a8327a46a4a1fde875c4649f0e6792
| 1,376
|
py
|
Python
|
Advance_Python/Database_Connectivity/Update_Data_Using_Parameterized_Query.py
|
siddharth-143/Python
|
293f4643a3a13e3b82d23fd8922db54dbb0f12bc
|
[
"MIT"
] | null | null | null |
Advance_Python/Database_Connectivity/Update_Data_Using_Parameterized_Query.py
|
siddharth-143/Python
|
293f4643a3a13e3b82d23fd8922db54dbb0f12bc
|
[
"MIT"
] | null | null | null |
Advance_Python/Database_Connectivity/Update_Data_Using_Parameterized_Query.py
|
siddharth-143/Python
|
293f4643a3a13e3b82d23fd8922db54dbb0f12bc
|
[
"MIT"
] | null | null | null |
# Update data in table parametrized query
import mysql.connector
def student_data(id, nm, ro, fe):
try:
conn = mysql.connector.connect(
user="root",
password="password",
host="localhost",
database="pdb",
port=3306
)
if conn.is_connected():
print("Connect Successfully")
except:
print("Unable To Connect")
# sql = 'UPDATE student SET name=%s, roll=%s, fees=%s WHERE stu_id=%s' # Using tuple
sql1 = "UPDATE student SET name=%(n)s, roll = %(r)s, fees=%(f)s WHERE stu_id=%(i)s" # using dictionary
myc = conn.cursor()
# update_val = (nm, ro, fe, id)
update_val1 = {"i": id, "n": nm, "r": ro, "f": fe}
try:
myc.execute(sql1, update_val1)
conn.commit() # Committing the change
print(myc.rowcount, "Row Update")
except:
conn.rollback() # Rollback the change
print("Unable to process data")
myc.close() # close cursor
conn.close() # close connection
while True:
id = int(input("Enter student id to update : "))
nm = input("Enter Name : ")
ro = int(input("Enter Roll No : "))
fe = int(input("Enter Fees : "))
student_data(id, nm, ro, fe)
ans = input("Do You Want To Exit (y/n) : ")
if ans == "y":
break
| 29.913043
| 110
| 0.539244
|
08a87560906fb8f4a3a1e0466bbdc770e08cdaaa
| 6,243
|
py
|
Python
|
tools/export_tarball/export_tarball.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2019-11-28T10:46:52.000Z
|
2019-11-28T10:46:52.000Z
|
tools/export_tarball/export_tarball.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/export_tarball/export_tarball.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2015-03-27T11:15:39.000Z
|
2016-08-17T14:19:56.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This tool creates a tarball with all the sources, but without .svn directories.
It can also remove files which are not strictly required for build, so that
the resulting tarball can be reasonably small (last time it was ~110 MB).
Example usage:
export_tarball.py /foo/bar
The above will create file /foo/bar.tar.bz2.
"""
import optparse
import os
import subprocess
import sys
import tarfile
NONESSENTIAL_DIRS = (
'breakpad/src/processor/testdata',
'chrome/browser/resources/tracing/tests',
'chrome/common/extensions/docs',
'chrome/tools/test/reference_build',
'courgette/testdata',
'data',
'native_client/src/trusted/service_runtime/testdata',
'src/chrome/test/data',
'o3d/documentation',
'o3d/samples',
'o3d/tests',
'ppapi/examples',
'ppapi/native_client/tests',
'third_party/angle/samples/gles2_book',
'third_party/findbugs',
'third_party/hunspell_dictionaries',
'third_party/hunspell/tests',
'third_party/lighttpd',
'third_party/sqlite/src/test',
'third_party/sqlite/test',
'third_party/vc_80',
'third_party/xdg-utils/tests',
'third_party/yasm/source/patched-yasm/modules/arch/x86/tests',
'third_party/yasm/source/patched-yasm/modules/dbgfmts/dwarf2/tests',
'third_party/yasm/source/patched-yasm/modules/objfmts/bin/tests',
'third_party/yasm/source/patched-yasm/modules/objfmts/coff/tests',
'third_party/yasm/source/patched-yasm/modules/objfmts/elf/tests',
'third_party/yasm/source/patched-yasm/modules/objfmts/macho/tests',
'third_party/yasm/source/patched-yasm/modules/objfmts/rdf/tests',
'third_party/yasm/source/patched-yasm/modules/objfmts/win32/tests',
'third_party/yasm/source/patched-yasm/modules/objfmts/win64/tests',
'third_party/yasm/source/patched-yasm/modules/objfmts/xdf/tests',
'third_party/WebKit/LayoutTests',
'third_party/WebKit/Source/JavaScriptCore/tests',
'third_party/WebKit/Source/WebCore/ChangeLog',
'third_party/WebKit/Source/WebKit2',
'third_party/WebKit/Tools/Scripts',
'tools/gyp/test',
'v8/test',
'webkit/data/layout_tests',
'webkit/tools/test/reference_build',
)
TESTDIRS = (
'chrome/test/data',
'content/test/data',
'media/test/data',
'net/data',
)
def GetSourceDirectory():
return os.path.realpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
# Workaround lack of the exclude parameter in add method in python-2.4.
# TODO(phajdan.jr): remove the workaround when it's not needed on the bot.
class MyTarFile(tarfile.TarFile):
def set_remove_nonessential_files(self, remove):
self.__remove_nonessential_files = remove
def set_verbose(self, verbose):
self.__verbose = verbose
def __report_skipped(self, name):
if self.__verbose:
print 'D\t%s' % name
def __report_added(self, name):
if self.__verbose:
print 'A\t%s' % name
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
head, tail = os.path.split(name)
if tail in ('.svn', '.git'):
self.__report_skipped(name)
return
if self.__remove_nonessential_files:
# WebKit change logs take quite a lot of space. This saves ~10 MB
# in a bzip2-compressed tarball.
if 'ChangeLog' in name:
self.__report_skipped(name)
return
# Remove contents of non-essential directories, but preserve gyp files,
# so that build/gyp_chromium can work.
for nonessential_dir in (NONESSENTIAL_DIRS + TESTDIRS):
dir_path = os.path.join(GetSourceDirectory(), nonessential_dir)
if (name.startswith(dir_path) and
os.path.isfile(name) and
'gyp' not in name):
self.__report_skipped(name)
return
self.__report_added(name)
tarfile.TarFile.add(self, name, arcname=arcname, recursive=recursive)
def main(argv):
parser = optparse.OptionParser()
parser.add_option("--basename")
parser.add_option("--remove-nonessential-files",
dest="remove_nonessential_files",
action="store_true", default=False)
parser.add_option("--test-data", action="store_true")
# TODO(phajdan.jr): Remove --xz option when it's not needed for compatibility.
parser.add_option("--xz", action="store_true")
parser.add_option("--verbose", action="store_true", default=False)
options, args = parser.parse_args(argv)
if len(args) != 1:
print 'You must provide only one argument: output file name'
print '(without .tar.xz extension).'
return 1
if not os.path.exists(GetSourceDirectory()):
print 'Cannot find the src directory ' + GetSourceDirectory()
return 1
# These two commands are from src/DEPS; please keep them in sync.
if subprocess.call(['python', 'build/util/lastchange.py', '-o',
'build/util/LASTCHANGE'], cwd=GetSourceDirectory()) != 0:
print 'Could not run build/util/lastchange.py to update LASTCHANGE.'
return 1
if subprocess.call(['python', 'build/util/lastchange.py', '-s',
'third_party/WebKit', '-o',
'build/util/LASTCHANGE.blink'],
cwd=GetSourceDirectory()) != 0:
print 'Could not run build/util/lastchange.py to update LASTCHANGE.blink.'
return 1
output_fullname = args[0] + '.tar'
output_basename = options.basename or os.path.basename(args[0])
archive = MyTarFile.open(output_fullname, 'w')
archive.set_remove_nonessential_files(options.remove_nonessential_files)
archive.set_verbose(options.verbose)
try:
if options.test_data:
for directory in TESTDIRS:
archive.add(os.path.join(GetSourceDirectory(), directory),
arcname=os.path.join(output_basename, directory))
else:
archive.add(GetSourceDirectory(), arcname=output_basename)
finally:
archive.close()
if subprocess.call(['xz', '-9', output_fullname]) != 0:
print 'xz -9 failed!'
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 33.564516
| 80
| 0.690533
|
42defece52744a4c0c59320aec5a356c66428f1b
| 27,615
|
py
|
Python
|
ample/util/pyrvapi_results.py
|
FilomenoSanchez/ample
|
e985cdffcb93e574cde782ce579c3bfdcd8b1830
|
[
"BSD-3-Clause"
] | null | null | null |
ample/util/pyrvapi_results.py
|
FilomenoSanchez/ample
|
e985cdffcb93e574cde782ce579c3bfdcd8b1830
|
[
"BSD-3-Clause"
] | null | null | null |
ample/util/pyrvapi_results.py
|
FilomenoSanchez/ample
|
e985cdffcb93e574cde782ce579c3bfdcd8b1830
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env ccp4-python
__author__ = "Jens Thomas"
__date__ = "03 Mar 2015"
__version__ = "1.0"
import logging
import json
import os
import subprocess
import sys
import traceback
if sys.version_info.major < 3:
from urlparse import urljoin
else:
from urllib.parse import urljoin
import uuid
from ample import ensembler
from ample.util import mrbump_util, reference_manager
try:
import pyrvapi
except:
pyrvapi = None
# Hack to use Andre's pyrvapi API
try:
import pyrvapi_ext as API
except ImportError:
API = None
logger = logging.getLogger(__name__)
# Utility functions
def have_files(results_dict, *keylist, **kwargs):
"""Check if files in dictionary exist.
if kwarg check is 'all' (default) return True if all exist,
if check is 'any' return True of any exist
"""
check = 'all'
if 'check' in kwargs:
assert kwargs['check'] in ['all', 'any']
check = kwargs['check']
found = 0
for k in keylist:
if k in results_dict and os.path.isfile(str(results_dict[k])):
if check == 'any':
return True
found += 1
if check == 'all' and len(keylist) == found:
return True
return False
class AmpleOutput(object):
"""Display the output of an AMPLE job."""
_ensemble_tooltips = {
"Name": "Ensemble name - used to name the pdb file and the directory where mrbump carries out molecular replacement.",
"Cluster": "The SPICKER cluster that this ensemble was derived from.",
"Truncation Level": "Percentage of the model remaining after the varying residues were pruned away",
"Variance Threshold (A^2)": "THESEUS variance score for the most variable residue that remains in this ensemble",
"No. Residues": "Number of residues for each model in the ensemble",
"Radius Threshold (A)": "Radius threshold (1,2 or 3 A) used for subclustering the models in a truncation level",
"No. Decoys": "Number of models within this ensemble",
"Number of Atoms": "Number of atoms for each model in the ensemble",
"Sidechain Treatment": "allatom - all sidechains were retained, reliable - MET, ASP, PRO, GLN, LYS, ARG, GLU, SER were retained, polyAla - all sidechains were stripped back to polyalanine",
}
_mrbump_tooltips = {
"ensemble_name": "The identifier of the AMPLE ensemble search model",
"MR_program": "Molecular replacement program",
"Solution_Type": "MRBUMP categorisation of the solution",
"PHASER_LLG": "PHASER Log-likelihood gain for the Molecular Replacement solution",
"PHASER_TFZ": "PHASER Translation Function Z-score for the Molecular Replacement solution",
"REFMAC_Rfact": "Rfact score for REFMAC refinement of the Molecular Replacement solution",
"REFMAC_Rfree": "Rfree score for REFMAC refinement of the Molecular Replacement solution",
"BUCC_final_Rfact": "Rfact score for BUCCANEER rebuild of the Molecular Replacement solution",
"BUCC_final_Rfree": "Rfree score for BUCCANEER rebuild of the Molecular Replacement solution",
"ARP_final_Rfact": "Rfact score for ARPWARP rebuild of the Molecular Replacement solution",
"ARP_final_Rfree": "Rfree score for ARPWARP rebuild of the Molecular Replacement solution",
"SHELXE_CC": "SHELXE Correlation Coefficient score after C-alpha trace",
"SHELXE_ACL": "Average Chain Length of the fragments of the SHELXE C-alpha trace",
"SXRBUCC_final_Rfact": "Rfact score for BUCCANEER rebuild of the SHELXE C-alpha trace",
"SXRBUCC_final_Rfree": "Rfree score for BUCCANEER rebuild of the SHELXE C-alpha trace",
"SXRARP_final_Rfact": "Rfact score for ARPWARP rebuild of the SHELXE C-alpha trace",
"SXRAP_final_Rfree": "Rfree score for ARPWARP rebuild of the SHELXE C-alpha trace",
}
def __init__(self, amopt):
self.header = False
self.jsrview_dir = None
self.citation_tab_id = None
self.log_tab_id = None
self.old_mrbump_results = None
self.results_tab_id = None
self.results_tab_sections = []
self.summary_tab_id = None
self.summary_tab_ensemble_sec_id = None
self.summary_tab_results_sec_id = None
self.summary_tab_survey_sec_id = None
self.summary_tab_results_sec_table_id = None
self.summary_tab_pending_sec_id = None
# Process variables from amopt
ccp4i2_xml = amopt.get('ccp4i2_xml')
rvapi_document = amopt.get('rvapi_document')
work_dir = amopt['work_dir']
run_dir = amopt['run_dir']
show_gui = bool(amopt['show_gui'])
webserver_uri = amopt['webserver_uri']
# Process modes and set flags
self.ccp4i2 = bool(ccp4i2_xml) # Indicate we are running under CCP4I2
self.jscofe = bool(rvapi_document) # Indicate we are running under jscofe
# Show the gui if we are under ccp4i2, jscofe or show_gui has been specified (e.g. ccp4i)
self.generate_output = self.ccp4i2 | self.jscofe | show_gui
# No log tab with jscofe or ccp4i2
self.own_log_tab = not (self.ccp4i2 or self.jscofe)
# For running under old CCP4online
if webserver_uri:
# don't start browser and setup variables for the path on the webserver
self._webserver_start = len(run_dir) + 1
self.webserver_uri = webserver_uri
else:
self.webserver_uri = None
self.wbeserver_start = None
self.setup(work_dir=work_dir, ccp4i2_xml=ccp4i2_xml, rvapi_document=rvapi_document, show_gui=show_gui)
return
def setup(self, work_dir=None, ccp4i2_xml=None, rvapi_document=None, show_gui=False):
if not pyrvapi or not self.generate_output:
return
title = "AMPLE Results"
logger.debug("Using Andre's Pyrvapi" if API else "COULD NOT FIND Andre's API!")
if rvapi_document:
logger.debug("Restoring document: %s", rvapi_document)
pyrvapi.rvapi_restore_document2(rvapi_document)
self.jsrview_dir = os.path.dirname(rvapi_document)
else:
# Quick hack to init with Andre's stuff - can switch out for Felix's API when done
logger.debug("Starting with xml %s", ccp4i2_xml)
self.jsrview_dir = os.path.join(work_dir, "jsrview")
if not os.path.isdir(self.jsrview_dir):
os.mkdir(self.jsrview_dir)
kwargs = dict(
wintitle=title,
reportdir=self.jsrview_dir,
xml=ccp4i2_xml,
abspaths=False,
# bug in jsrview:
# layout = 4 if i1 else 7,
)
API.document.newdoc(**kwargs)
if not self.own_log_tab:
self.log_tab_id = pyrvapi.rvapi_get_meta()
if show_gui:
# We start our own browser
jsrview = os.path.join(os.environ["CCP4"], "libexec", "jsrview")
subprocess.Popen([jsrview, os.path.join(self.jsrview_dir, "index.html")])
return
def create_citation_tab(self, ample_dict):
if self.citation_tab_id:
return
self.citation_tab_id = "citation_tab"
pyrvapi.rvapi_insert_tab(self.citation_tab_id, "Citation", self.log_tab_id, False)
refMgr = reference_manager.ReferenceManager(ample_dict)
bibtex_file = refMgr.save_citations_to_file(ample_dict)
if self.ccp4i2:
# The horror of ccp4i2 means that this all gets dumped into xml so we can't use any markup tags
tdata = refMgr.citations_as_text
else:
tdata = refMgr.methods_as_html
tdata += refMgr.citations_as_html
tdata += '<hr><p>A bibtex file with the relevant citations has been saved to: {}</p>'.format(bibtex_file)
pyrvapi.rvapi_add_text(tdata, self.citation_tab_id, 0, 0, 1, 1)
if not self.ccp4i2:
pyrvapi.rvapi_add_data(
"bibtex_file",
"Citations as BIBTEX",
self.fix_path(bibtex_file),
"text",
self.citation_tab_id,
2,
0,
1,
1,
True,
)
return self.citation_tab_id
def create_log_tab(self, ample_dict):
if not self.own_log_tab or self.log_tab_id:
return
logfile = ample_dict['ample_log']
if not os.path.isfile(logfile):
return False
self.log_tab_id = "log_tab"
logurl = self.fix_path(logfile)
pyrvapi.rvapi_add_tab(self.log_tab_id, "Log file", True)
pyrvapi.rvapi_append_content(logurl, True, self.log_tab_id)
return self.log_tab_id
def create_results_tab(self, ample_dict):
if self.ccp4i2 or not self.summary_tab_id or not self._got_mrbump_results(ample_dict):
return
mrb_results = ample_dict.get('mrbump_results')
if mrb_results == self.old_mrbump_results:
return
self.old_mrbump_results = mrb_results
if not self.results_tab_id:
self.results_tab_id = "results_tab"
pyrvapi.rvapi_insert_tab(self.results_tab_id, "Results", self.summary_tab_id, False)
# Delete old sections:
pyrvapi.rvapi_flush()
for section_id in self.results_tab_sections:
pyrvapi.rvapi_remove_widget(section_id)
pyrvapi.rvapi_flush()
self.results_tab_sections = []
ensemble_results = ample_dict['ensembles_data'] if 'ensembles_data' in ample_dict['ensembles_data'] else None
mrbsum = mrbump_util.ResultsSummary(results=mrb_results[0 : min(len(mrb_results), mrbump_util.TOP_KEEP)])
mrbsum.sortResults(prioritise="SHELXE_CC")
self.results_section(
self.results_tab_id, mrbsum.results, ensemble_results, "Top {0} SHELXE Results".format(mrbump_util.TOP_KEEP)
)
mrbsum.sortResults(prioritise="PHASER_TFZ")
# Add seperator between results - doesn't work as not deleted on refresh
# pyrvapi.rvapi_add_text("<br/><hr/><br/>", self.results_tab_id, 0, 0, 1, 1)
self.results_section(
self.results_tab_id, mrbsum.results, ensemble_results, "Top {0} PHASER Results".format(mrbump_util.TOP_KEEP)
)
return self.results_tab_id
def results_section(self, results_tab_id, mrb_results, ensemble_results, section_title):
"""Results Tab"""
if not mrb_results:
return
# Create unique identifier for this section by using the id
# All ids will have this appended to avoid clashes
uid = str(uuid.uuid4())
section_id = section_title.replace(" ", "_") + uid
self.results_tab_sections.append(section_id) # Add to list so we can remove if we update
pyrvapi.rvapi_add_panel(section_id, results_tab_id, 0, 0, 1, 1)
pyrvapi.rvapi_add_text("<h3>{0}</h3>".format(section_title), section_id, 0, 0, 1, 1)
results_tree = "results_tree" + section_id
pyrvapi.rvapi_add_tree_widget(results_tree, section_title, section_id, 0, 0, 1, 1)
for r in mrb_results:
ensemble_name = r['ensemble_name']
container_id = "sec_{0}".format(ensemble_name) + uid
pyrvapi.rvapi_add_panel(container_id, results_tree, 0, 0, 1, 1)
header = "<h3>Results for ensemble: {0}</h3>".format(ensemble_name)
pyrvapi.rvapi_add_text(header, container_id, 0, 0, 1, 1)
sec_table = "sec_table_{0}".format(ensemble_name) + uid
title = "Results table: {0}".format(ensemble_name)
title = "Summary"
pyrvapi.rvapi_add_section(sec_table, title, container_id, 0, 0, 1, 1, True)
table_id = "table_{0}".format(ensemble_name) + uid
pyrvapi.rvapi_add_table(table_id, "", sec_table, 1, 0, 1, 1, False)
tdata = mrbump_util.ResultsSummary().results_table([r])
self.fill_table(table_id, tdata, tooltips=self._mrbump_tooltips)
# Ensemble
if ensemble_results:
epdb = self.ensemble_pdb(r, ensemble_results)
if epdb:
sec_ensemble = "sec_ensemble_{0}".format(ensemble_name) + uid
pyrvapi.rvapi_add_section(sec_ensemble, "Ensemble Search Model", container_id, 0, 0, 1, 1, False)
data_ensemble = "data_ensemble_{0}".format(ensemble_name) + uid
pyrvapi.rvapi_add_data(
data_ensemble, "Ensemble PDB", self.fix_path(epdb), "XYZOUT", sec_ensemble, 2, 0, 1, 1, True
)
# PHASER
self.add_results_section(
result_dict=r,
ensemble_name=ensemble_name,
program_name='PHASER',
logfile_key='PHASER_logfile',
pdb_key='PHASER_pdbout',
mtz_key='PHASER_mtzout',
uid=uid,
container_id=container_id,
)
# REFMAC
self.add_results_section(
result_dict=r,
ensemble_name=ensemble_name,
program_name='Refmac',
logfile_key='REFMAC_logfile',
pdb_key='REFMAC_pdbout',
mtz_key='REFMAC_mtzout',
uid=uid,
container_id=container_id,
)
# Buccaner
self.add_results_section(
result_dict=r,
ensemble_name=ensemble_name,
program_name='BUCCANEER',
logfile_key='BUCC_logfile',
pdb_key='BUCC_pdbout',
mtz_key='BUCC_mtzout',
uid=uid,
container_id=container_id,
)
# Arpwarp
self.add_results_section(
result_dict=r,
ensemble_name=ensemble_name,
program_name='ArpWarp',
logfile_key='ARP_logfile',
pdb_key='ARP_pdbout',
mtz_key='ARP_mtzout',
uid=uid,
container_id=container_id,
)
# SHELXE
self.add_results_section(
result_dict=r,
ensemble_name=ensemble_name,
program_name='SHELXE',
logfile_key='SHELXE_logfile',
pdb_key='SHELXE_pdbout',
mtz_key='SHELXE_mtzout',
uid=uid,
container_id=container_id,
)
# Buccaner Rebuild
self.add_results_section(
result_dict=r,
ensemble_name=ensemble_name,
program_name='BUCCANEER SHELXE Trace Rebuild',
logfile_key='SXRBUCC_logfile',
pdb_key='SXRBUCC_pdbout',
mtz_key='SXRBUCC_mtzout',
uid=uid,
container_id=container_id,
)
# Arpwarp Rebuild
self.add_results_section(
result_dict=r,
ensemble_name=ensemble_name,
program_name='ARPWARP SHELXE Trace Rebuild',
logfile_key='SXRARP_logfile',
pdb_key='SXRARP_pdbout',
mtz_key='SXRARP_mtzout',
uid=uid,
container_id=container_id,
)
pyrvapi.rvapi_set_tree_node(results_tree, container_id, "{0}".format(ensemble_name), "auto", "")
return
def add_results_section(
self,
result_dict=None,
ensemble_name=None,
program_name=None,
logfile_key=None,
pdb_key=None,
mtz_key=None,
uid=None,
container_id=None,
):
assert (
result_dict
and ensemble_name
and program_name
and logfile_key
and pdb_key
and mtz_key
and uid
and container_id
)
have_logfile = have_files(result_dict, logfile_key)
have_pdb_and_mtz = have_files(result_dict, pdb_key, mtz_key)
if not (have_logfile or have_pdb_and_mtz):
return
program_id = program_name.lower().replace(' ', '_')
this_sec_id = "sec_{0}_{1}".format(program_id, ensemble_name) + uid
pyrvapi.rvapi_add_section(this_sec_id, "{} Outputs".format(program_name), container_id, 0, 0, 1, 1, False)
if have_pdb_and_mtz:
data_id = "o{0}{1}".format(program_id, ensemble_name) + uid
pyrvapi.rvapi_add_data(
data_id,
"{} OUTPUTS".format(program_name),
self.fix_path(result_dict[pdb_key]),
"xyz",
this_sec_id,
2,
0,
1,
1,
True,
)
pyrvapi.rvapi_append_to_data(data_id, self.fix_path(result_dict[mtz_key]), "hkl:map")
if have_logfile:
data_id = "l{0}{1}".format(program_id, ensemble_name) + uid
pyrvapi.rvapi_add_data(
data_id,
"{} Logfile".format(program_name),
self.fix_path(result_dict[logfile_key]),
# "summary",
"text",
this_sec_id,
2,
0,
1,
1,
True,
)
def create_summary_tab(self, ample_dict):
self._create_summary_tab()
if self.do_create_ensembles_section(ample_dict):
self.create_ensembles_section(ample_dict)
if not self._got_mrbump_results(ample_dict):
return self.summary_tab_id
if not self.summary_tab_results_sec_id:
self.rm_pending_section()
# Only create the table once
self.summary_tab_results_sec_id = "mrbump"
pyrvapi.rvapi_add_section(self.summary_tab_results_sec_id, "MRBUMP", self.summary_tab_id, 0, 0, 1, 1, True)
self.summary_tab_results_sec_table_id = "mrbump_table"
pyrvapi.rvapi_add_table1(
self.summary_tab_results_sec_id + "/" + self.summary_tab_results_sec_table_id,
"MRBUMP Results",
1,
0,
1,
1,
True,
)
mrb_results = ample_dict.get('mrbump_results')
if not mrb_results == self.old_mrbump_results:
# We set old_mrbump_results when we create the results_tab
self.fill_table(
self.summary_tab_results_sec_table_id,
mrbump_util.ResultsSummary().results_table(mrb_results),
tooltips=self._mrbump_tooltips,
)
if not self.summary_tab_survey_sec_id and not self.ccp4i2:
# Only create the table once
self.summary_tab_survey_sec_id = "survey"
pyrvapi.rvapi_add_section(self.summary_tab_survey_sec_id, "Feedback", self.summary_tab_id, 0, 0, 1, 1, True)
rstr = "<h2>How did we do?</h2><h3>Please follow this link and leave some feedback:</h3><a href='{0}' style='color: blue'>{0}</a>".format(
reference_manager.survey_url
)
pyrvapi.rvapi_add_text(rstr, self.summary_tab_survey_sec_id, 0, 0, 1, 1)
return self.summary_tab_id
def _create_summary_tab(self):
if not self.summary_tab_id:
self.summary_tab_id = "summary_tab"
title = "Summary"
pyrvapi.rvapi_insert_tab(self.summary_tab_id, title, self.citation_tab_id, False)
# Create pending section until we have data to show
self.summary_tab_pending_sec_id = 'summary_tab_pending'
pyrvapi.rvapi_add_section(
self.summary_tab_pending_sec_id, "Processing...", self.summary_tab_id, 0, 0, 1, 1, True
)
rstr = "<p>No results are currently available. Please check back later.</p>"
pyrvapi.rvapi_add_text(rstr, self.summary_tab_pending_sec_id, 0, 0, 1, 1)
return
def do_create_ensembles_section(self, ample_dict):
return (
not (ample_dict.get('single_model_mode') or ample_dict.get('homologs') or
ample_dict.get('ideal_helices') or ample_dict.get('helical_ensembles'))
and bool(ample_dict.get('ensembles_data'))
and not self.summary_tab_ensemble_sec_id
)
def create_ensembles_section(self, ample_dict):
self.rm_pending_section()
ensembles_data = ample_dict['ensembles_data']
self.summary_tab_ensemble_sec_id = "ensembles"
pyrvapi.rvapi_add_section(self.summary_tab_ensemble_sec_id, "Ensembles", self.summary_tab_id, 0, 0, 1, 1, True)
if ample_dict['import_ensembles']:
rstr = 'Imported {0} ensembles.'.format(len(ensembles_data))
pyrvapi.rvapi_add_text(rstr, self.summary_tab_ensemble_sec_id, 0, 0, 1, 1)
else:
# Get the ensembling data
d = ensembler.collate_cluster_data(ensembles_data)
clusters = d['clusters']
rstr = ""
rstr += "Ensemble Results<br/>"
rstr += "----------------<br/><br/>"
rstr += "Cluster method: {0}<br/>".format(d['cluster_method'])
rstr += "Cluster score type: {0}<br/>".format(d['cluster_score_type'])
rstr += "Truncation method: {0}<br/>".format(d['truncation_method'])
rstr += "Percent truncation: {0}<br/>".format(d['percent_truncation'])
rstr += "Side-chain treatments: {0}<br/>".format(d['side_chain_treatments'])
rstr += "Number of clusters: {0}<br/><br/>".format(len(clusters.keys()))
rstr += "Generated {0} ensembles<br/><br/>".format(len(ensembles_data))
pyrvapi.rvapi_add_text(rstr, self.summary_tab_ensemble_sec_id, 0, 0, 1, 1)
ensemble_table = "ensemble_table"
pyrvapi.rvapi_add_table1(
self.summary_tab_ensemble_sec_id + "/" + ensemble_table, "Ensembling Results", 1, 0, 1, 1, True
)
tdata = []
for i, cluster_num in enumerate(sorted(d['clusters'].keys())):
header = True if i == 0 else False
tdata += ensembler.cluster_table_data(clusters, cluster_num, d['side_chain_treatments'], header=header)
self.fill_table(ensemble_table, tdata, tooltips=self._ensemble_tooltips)
return
def display_results(self, ample_dict):
"""Display the results of an AMPLE run using pyrvapi
Parameters
----------
ample_dict : dict
An AMPLE job dictionary
"""
if not (pyrvapi or self.generate_output):
return
try:
if not self.header:
pyrvapi.rvapi_add_header("AMPLE Results")
self.header = True
self.create_log_tab(ample_dict)
self.create_citation_tab(ample_dict)
self.create_summary_tab(ample_dict)
self.create_results_tab(ample_dict)
pyrvapi.rvapi_flush()
except Exception as e:
logger.critical("Error displaying results: %s\n%s", e, traceback.format_exc())
return True
def ensemble_pdb(self, mrbump_result, ensembles_data):
try:
ensemble_dict = None
for e in ensembles_data:
if e['name'] == mrbump_result.get('ensemble_name'):
ensemble_dict = e
break
if os.path.isfile(ensemble_dict['ensemble_pdb']):
return ensemble_dict['ensemble_pdb']
else:
return False
except:
return False
def fix_path(self, path):
"""Ammend path so it's suitable for the webserver or jscofe/standalone"""
if self.webserver_uri:
return urljoin(self.webserver_uri, path[self._webserver_start :])
elif self.jscofe:
return os.path.join("..", os.path.relpath(path, self.jsrview_dir))
return path
def fill_table(self, table_id, tdata, tooltips={}):
# Make column headers
for i in range(len(tdata[0])): # Skip name as it's the row header
h = tdata[0][i]
tt = tooltips[h] if h in tooltips else ""
pyrvapi.rvapi_put_horz_theader(table_id, h.encode('utf-8'), tt, i) # Add table data
for i in range(1, len(tdata)):
for j in range(len(tdata[i])):
pyrvapi.rvapi_put_table_string(table_id, str(tdata[i][j]), i - 1, j)
# REM - can use pyrvapi.rvapi_shape_table_cell to format cells is required
return
def _got_mrbump_results(self, ample_dict):
return ample_dict.get('mrbump_results') and len(ample_dict['mrbump_results'])
def rm_pending_section(self):
if self.summary_tab_pending_sec_id:
pyrvapi.rvapi_flush()
pyrvapi.rvapi_remove_widget(self.summary_tab_pending_sec_id)
pyrvapi.rvapi_flush()
self.summary_tab_pending_sec_id = None
return
def rvapi_shutdown(self, amopt):
"""Return any results to jscofe
Parameters
----------
amopt : dict
AMPLE results dictionary with all information
"""
rvdoc = amopt['rvapi_document']
if not rvdoc:
return
# Create dictionary we're going to return
meta = {'results': []}
nresults = 0
if bool(amopt.get('mrbump_results')):
mrb_results = amopt['mrbump_results']
nresults = min(3, len(mrb_results))
if nresults > 0:
for fdata in mrbump_util.ResultsSummary(mrb_results[:nresults]).topFiles(nresults):
# Mangle paths. relpath assumes args are directories so need to add ..
fdata['pdb'] = self.fix_path(fdata['pdb'])
fdata['mtz'] = self.fix_path(fdata['mtz'])
meta['results'].append(fdata)
# Commit to file
logger.debug("Exporting pyrvapi metadata:\n{0}".format(meta))
pyrvapi.rvapi_put_meta(json.dumps(meta))
pyrvapi.rvapi_store_document2(rvdoc)
return
if __name__ == "__main__":
import copy, sys, time
from ample.util import ample_util
logging.basicConfig(level=logging.DEBUG)
pklfile = sys.argv[1]
ample_dict = ample_util.read_amoptd(pklfile)
ample_dict['show_gui'] = True
ample_dict['ample_log'] = os.path.abspath(__file__)
report_dir = os.path.abspath(os.path.join(os.curdir, "pyrvapi_tmp"))
AR = AmpleOutput(ample_dict)
AR.display_results(ample_dict)
view1_dict = copy.copy(ample_dict)
del view1_dict['ensembles_data']
del view1_dict['mrbump_results']
SLEEP = 5
AR.display_results(view1_dict)
time.sleep(SLEEP)
# for i in range(10):
view1_dict['ensembles_data'] = ample_dict['ensembles_data']
AR.display_results(view1_dict)
time.sleep(SLEEP)
mrbump_results = []
for r in ample_dict['mrbump_results'][0:3]:
r['SHELXE_CC'] = None
r['SHELXE_ACL'] = None
mrbump_results.append(r)
view1_dict['mrbump_results'] = mrbump_results
AR.display_results(view1_dict)
time.sleep(SLEEP)
view1_dict['mrbump_results'] = ample_dict['mrbump_results'][0:5]
AR.display_results(view1_dict)
time.sleep(SLEEP)
view1_dict['mrbump_results'] = ample_dict['mrbump_results']
AR.display_results(view1_dict)
| 41.968085
| 197
| 0.60717
|
bfb92a1255d766a7b64ae8407817632e0c47e493
| 483
|
py
|
Python
|
one-get/util/hex_tool.py
|
wenbindu/HungryTools
|
7b44d373784246e215e82d6278bf97c7b1afcb95
|
[
"MIT"
] | null | null | null |
one-get/util/hex_tool.py
|
wenbindu/HungryTools
|
7b44d373784246e215e82d6278bf97c7b1afcb95
|
[
"MIT"
] | null | null | null |
one-get/util/hex_tool.py
|
wenbindu/HungryTools
|
7b44d373784246e215e82d6278bf97c7b1afcb95
|
[
"MIT"
] | null | null | null |
import re
def hex_to_char(hex_str):
""" converts a single hex-encoded character 'FFFF' into the corresponding real character """
return chr(int(hex_str, 16))
def parser_hex(hex_str):
"""converts a hex-encoded character '%u****' into the real character string.
Args:
hex_str (str): string contains the %u
"""
percent_u = re.compile(r"%u([0-9a-fA-F]{4})")
decoded = percent_u.sub(lambda m: hex_to_char(m.group(1)), hex_str)
return decoded
| 25.421053
| 96
| 0.660455
|
2d50e8a9f58e49b5325f046a62c3474845ed55f5
| 987
|
py
|
Python
|
agent/src/agent/pipeline/config/stages/base.py
|
anodot/daria
|
d475899309f56cd85347be0f7001a0dd97dd197a
|
[
"Apache-2.0"
] | 16
|
2019-04-03T08:31:54.000Z
|
2021-01-24T17:12:04.000Z
|
agent/src/agent/pipeline/config/stages/base.py
|
anodot/daria
|
d475899309f56cd85347be0f7001a0dd97dd197a
|
[
"Apache-2.0"
] | 10
|
2020-01-20T14:59:06.000Z
|
2022-01-21T10:19:16.000Z
|
agent/src/agent/pipeline/config/stages/base.py
|
anodot/daria
|
d475899309f56cd85347be0f7001a0dd97dd197a
|
[
"Apache-2.0"
] | 5
|
2021-01-08T19:23:03.000Z
|
2021-11-09T14:15:49.000Z
|
import os
import pytz
from abc import ABC, abstractmethod
from datetime import datetime, timedelta
from agent.modules.constants import ROOT_DIR
from agent.pipeline import Pipeline
class Stage(ABC):
JYTHON_SCRIPT = ''
JYTHON_SCRIPTS_PATH = os.path.join('pipeline', 'config', 'jython_scripts')
JS_SCRIPTS_PATH = os.path.join('pipeline', 'config', 'js_scripts')
def __init__(self, pipeline_: Pipeline):
self.pipeline = pipeline_
@abstractmethod
def get_config(self) -> dict:
pass
def get_jython_file_path(self):
return os.path.join(ROOT_DIR, self.JYTHON_SCRIPTS_PATH, self.JYTHON_SCRIPT)
def _get_js_file_path(self, name: str):
return os.path.join(ROOT_DIR, self.JS_SCRIPTS_PATH, name)
def get_initial_timestamp(self) -> datetime:
midnight = datetime.now(pytz.timezone('UTC')).replace(hour=0, minute=0, second=0, microsecond=0)
return midnight - timedelta(days=int(self.pipeline.days_to_backfill))
| 31.83871
| 104
| 0.721378
|
342eaf1a609e7f20ed7c93eba055ade8059bb55c
| 4,465
|
py
|
Python
|
RecoEgamma/EgammaElectronProducers/python/lowPtGsfElectronSequence_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 3
|
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
RecoEgamma/EgammaElectronProducers/python/lowPtGsfElectronSequence_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 3
|
2018-08-23T13:40:24.000Z
|
2019-12-05T21:16:03.000Z
|
RecoEgamma/EgammaElectronProducers/python/lowPtGsfElectronSequence_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 5
|
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
import FWCore.ParameterSet.Config as cms
# Modifier for FastSim
from Configuration.Eras.Modifier_fastSim_cff import fastSim
# PFRecTracks from generalTracks
from RecoParticleFlow.PFTracking.pfTrack_cfi import *
lowPtGsfElePfTracks = pfTrack.clone()
lowPtGsfElePfTracks.TkColList = ['generalTracks']
lowPtGsfElePfTracks.GsfTracksInEvents = False
lowPtGsfElePfTracks.GsfTrackModuleLabel = ''
fastSim.toModify(lowPtGsfElePfTracks,TkColList = ['generalTracksBeforeMixing'])
# Low pT ElectronSeeds
from RecoEgamma.EgammaElectronProducers.lowPtGsfElectronSeeds_cfi import *
# Electron track candidates
from TrackingTools.GsfTracking.CkfElectronCandidateMaker_cff import *
lowPtGsfEleTrajectoryFilter = TrajectoryFilterForElectrons.clone()
lowPtGsfEleTrajectoryFilter.minPt = 0.
lowPtGsfEleTrajectoryFilter.minimumNumberOfHits = 3
lowPtGsfEleTrajectoryBuilder = TrajectoryBuilderForElectrons.clone()
lowPtGsfEleTrajectoryBuilder.trajectoryFilter.refToPSet_ = 'lowPtGsfEleTrajectoryFilter'
lowPtGsfEleCkfTrackCandidates = electronCkfTrackCandidates.clone()
lowPtGsfEleCkfTrackCandidates.TrajectoryBuilderPSet.refToPSet_ = 'lowPtGsfEleTrajectoryBuilder'
lowPtGsfEleCkfTrackCandidates.src = 'lowPtGsfElectronSeeds'
import FastSimulation.Tracking.electronCkfTrackCandidates_cff
fastLowPtGsfTkfTrackCandidates = FastSimulation.Tracking.electronCkfTrackCandidates_cff.electronCkfTrackCandidates.clone(src = cms.InputTag("lowPtGsfElectronSeeds"))
# GsfTracks
from TrackingTools.GsfTracking.GsfElectronGsfFit_cff import *
lowPtGsfEleFittingSmoother = GsfElectronFittingSmoother.clone()
lowPtGsfEleFittingSmoother.ComponentName = 'lowPtGsfEleFittingSmoother'
lowPtGsfEleFittingSmoother.MinNumberOfHits = 2
from TrackingTools.GsfTracking.GsfElectronGsfFit_cff import *
lowPtGsfEleGsfTracks = electronGsfTracks.clone()
lowPtGsfEleGsfTracks.Fitter = 'lowPtGsfEleFittingSmoother'
lowPtGsfEleGsfTracks.src = 'lowPtGsfEleCkfTrackCandidates'
fastSim.toModify(lowPtGsfEleGsfTracks,src = cms.InputTag("fastLowPtGsfTkfTrackCandidates"))
# GSFTrack to track association
from RecoEgamma.EgammaElectronProducers.lowPtGsfToTrackLinks_cfi import *
# GsfPFRecTracks
from RecoParticleFlow.PFTracking.pfTrackElec_cfi import *
lowPtGsfElePfGsfTracks = pfTrackElec.clone()
lowPtGsfElePfGsfTracks.GsfTrackModuleLabel = 'lowPtGsfEleGsfTracks'
lowPtGsfElePfGsfTracks.PFRecTrackLabel = 'lowPtGsfElePfTracks'
lowPtGsfElePfGsfTracks.applyGsfTrackCleaning = False
lowPtGsfElePfGsfTracks.useFifthStepForTrackerDrivenGsf = True
# SuperCluster generator and matching to GSF tracks
# Below relies on the following default configurations:
# RecoParticleFlow/PFClusterProducer/python/particleFlowClusterECALUncorrected_cfi.py
# RecoParticleFlow/PFClusterProducer/python/particleFlowClusterECAL_cff.py
# (particleFlowClusterECAL_cfi is generated automatically)
from RecoEgamma.EgammaElectronProducers.lowPtGsfElectronSuperClusters_cff import lowPtGsfElectronSuperClusters
# Low pT electron cores
from RecoEgamma.EgammaElectronProducers.lowPtGsfElectronCores_cff import *
# Low pT electrons
from RecoEgamma.EgammaElectronProducers.lowPtGsfElectrons_cfi import *
# Low pT Electron value maps
from RecoEgamma.EgammaElectronProducers.lowPtGsfElectronSeedValueMaps_cff import lowPtGsfElectronSeedValueMaps
# Low pT Electron ID
from RecoEgamma.EgammaElectronProducers.lowPtGsfElectronID_cff import lowPtGsfElectronID
# Full sequence
lowPtGsfElectronTask = cms.Task(lowPtGsfElePfTracks,
lowPtGsfElectronSeeds,
lowPtGsfEleCkfTrackCandidates,
lowPtGsfEleGsfTracks,
lowPtGsfToTrackLinks,
lowPtGsfElePfGsfTracks,
lowPtGsfElectronSuperClusters,
lowPtGsfElectronCores,
lowPtGsfElectrons,
lowPtGsfElectronSeedValueMaps,
lowPtGsfElectronID
)
lowPtGsfElectronSequence = cms.Sequence(lowPtGsfElectronTask)
_fastSim_lowPtGsfElectronTask = lowPtGsfElectronTask.copy()
_fastSim_lowPtGsfElectronTask.replace(lowPtGsfElectronSeeds, cms.Task(lowPtGsfElectronSeedsTmp,lowPtGsfElectronSeeds))
_fastSim_lowPtGsfElectronTask.replace(lowPtGsfEleCkfTrackCandidates, fastLowPtGsfTkfTrackCandidates)
fastSim.toReplaceWith(lowPtGsfElectronTask, _fastSim_lowPtGsfElectronTask)
| 48.010753
| 165
| 0.81411
|
716009cbc250e11e639b89555b2a488034af1ef1
| 3,780
|
py
|
Python
|
v6_sqlite/create_db_from_v4.py
|
carlosal1015/proofofconcept
|
579873aff082e6fa497a387e0d0a5f8e5ec3ecd2
|
[
"CC-BY-4.0"
] | 14
|
2015-01-02T19:39:36.000Z
|
2022-03-09T06:08:10.000Z
|
v6_sqlite/create_db_from_v4.py
|
carlosal1015/proofofconcept
|
579873aff082e6fa497a387e0d0a5f8e5ec3ecd2
|
[
"CC-BY-4.0"
] | 242
|
2015-01-02T13:59:58.000Z
|
2022-03-27T17:22:21.000Z
|
v6_sqlite/create_db_from_v4.py
|
carlosal1015/proofofconcept
|
579873aff082e6fa497a387e0d0a5f8e5ec3ecd2
|
[
"CC-BY-4.0"
] | 6
|
2015-02-13T16:00:25.000Z
|
2020-08-05T17:51:26.000Z
|
# to run, use
# python3 v6_sqlite/create_db.py
# https://allofphysicsgraph.github.io/proofofconcept/site/how_to_build_the_physics_derivation.html
# https://docs.python.org/3/library/sqlite3.html
import sqlite3
import csv
import glob
print('sqlite3 version:',sqlite3.version)
db_file = "sqlite.db"
try:
conn = sqlite3.connect(db_file)
except sqlite3.Error:
print(sqlite3.Error)
c = conn.cursor()
try:
c.execute('''drop table inference_rules''')
except:
print('did not drop table inference_rules')
pass
# source of schema is v3_CSV/databases/README
c.execute('''CREATE TABLE inference_rules
("inference rule abbreviation","number of arguments","number of feeds","number of input expressions","number of output expressions","comments","latex expansion",yyyymmdd,author,ast)''')
inf_rules = []
list_of_ast_files = glob.glob('../v4_file_per_expression/inference_rules/*.ast')
with open('../v3_CSV/databases/inference_rules_database.csv') as fil:
csv_reader = csv.reader(fil, delimiter=',')
for line in csv_reader:
line_as_list = [x.strip() for x in line]
#print(line_as_list)
if (len(line_as_list)==7):
line_as_list.append('20190617')
line_as_list.append('bhpayne')
found_ast=False
for this_ast in list_of_ast_files:
#print('this_ast=',this_ast.split('/')[-1])
#print(line_as_list[0])
if this_ast.split('/')[-1].startswith(line_as_list[0]):
# print('found',)
with open(this_ast) as ast_fil:
ast_content = ast_fil.read()
#print(ast_content)
found_ast=True
line_as_list.append(ast_content)
break # only use the first ast
if not found_ast:
line_as_list.append('input:\nouput:\n')
inf_rules.append(tuple(line_as_list))
elif (len(line_as_list)==0):
pass # empty line
else:
print('ERROR with',line)
c.executemany('INSERT INTO inference_rules VALUES (?,?,?,?,?,?,?,?,?,?)', inf_rules)
try:
c.execute('''drop table expressions''')
except:
print('did not drop table expressions')
pass
c.execute('''CREATE TABLE expressions
("unique identifier",latex)''')
list_of_expr_tuples=[]
list_of_expr_files = glob.glob('../v4_file_per_expression/expressions/*.tex')
for expr_file in list_of_expr_files:
with open(expr_file,'r') as fil:
latex_expr = fil.read().strip()
#print(expr_file.split('/')[-1].split('_')[0],':',latex_expr)
list_of_expr_tuples.append(tuple([expr_file.split('/')[-1].split('_')[0],latex_expr]))
c.executemany('INSERT INTO expressions VALUES (?,?)', list_of_expr_tuples)
try:
c.execute('''drop table feeds''')
except:
print('did not drop table feeds')
pass
c.execute('''CREATE TABLE feeds
("local identifier",latex)''')
list_of_feed_tuples=[]
list_of_feed_files = glob.glob('../v4_file_per_expression/feeds/*.tex')
for feed_file in list_of_feed_files:
with open(feed_file,'r') as fil:
latex_feed = fil.read().strip()
list_of_feed_tuples.append(tuple([feed_file.split('/')[-1].split('_')[0],latex_feed]))
c.executemany('INSERT INTO feeds VALUES (?,?)', list_of_feed_tuples)
list_of_derivation_folders = glob.glob('../v4_file_per_expression/derivations/*')
for deriv_folder in list_of_derivation_folders:
if deriv_folder.split('/')[-1]!='all':
print('deriv folder =',deriv_folder)
#derivation_edge_list.csv
#expression_identifiers.csv
#feeds.csv
#inference_rule_identifiers.csv
conn.commit() # Save (commit) the changes
conn.close()
| 31.764706
| 185
| 0.646032
|
71b601a40e8fa9aa7dedefd3860ea674e9f8a3bb
| 394
|
py
|
Python
|
private_messages/urls.py
|
plazix/django-private-messages
|
e0b2a4821f74da25d8b140501aa20da247a03382
|
[
"BSD-3-Clause"
] | 5
|
2015-07-01T13:23:08.000Z
|
2017-11-15T09:48:11.000Z
|
private_messages/urls.py
|
plazix/django-private-messages
|
e0b2a4821f74da25d8b140501aa20da247a03382
|
[
"BSD-3-Clause"
] | null | null | null |
private_messages/urls.py
|
plazix/django-private-messages
|
e0b2a4821f74da25d8b140501aa20da247a03382
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
urlpatterns = patterns('private_messages.views',
url(r'^$', 'topics', name='private_messages'),
url(r'^read/(?P<topic_id>[\d]+)/$', 'topic_read', name='private_messages_topic'),
url(r'^new/$', 'topic_new', name='private_messages_new'),
url(r'^delete/$', 'topic_delete', name='private_messages_topic_delete'),
)
| 32.833333
| 85
| 0.662437
|
cb0b956cfdd049f201e315ab1cf6dd3eef39c377
| 3,420
|
py
|
Python
|
src/formattedcode/output_jsonlines.py
|
nicoddemus/scancode-toolkit
|
58dfec66faa2c8a90f1125861081266594a1e1d7
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/formattedcode/output_jsonlines.py
|
nicoddemus/scancode-toolkit
|
58dfec66faa2c8a90f1125861081266594a1e1d7
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/formattedcode/output_jsonlines.py
|
nicoddemus/scancode-toolkit
|
58dfec66faa2c8a90f1125861081266594a1e1d7
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
#
# Copyright (c) 2018 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import OrderedDict
import simplejson
from formattedcode.utils import get_headings
from plugincode.output import output_impl
from plugincode.output import OutputPlugin
from scancode import CommandLineOption
from scancode import FileOptionType
from scancode import OUTPUT_GROUP
"""
Output plugin to write scan results as JSON lines.
"""
@output_impl
class JsonLinesOutput(OutputPlugin):
options = [
CommandLineOption(('--json-lines', 'output_json_lines',),
type=FileOptionType(mode='wb', lazy=False),
metavar='FILE',
help='Write scan output as JSON Lines to FILE.',
help_group=OUTPUT_GROUP,
sort_order=15),
]
def is_enabled(self, output_json_lines, **kwargs):
return output_json_lines
def process_codebase(self, codebase, output_json_lines, **kwargs):
results = self.get_results(codebase, **kwargs)
files_count, version, notice, scan_start, options = get_headings(codebase)
header = dict(header=OrderedDict([
('scancode_notice', notice),
('scancode_version', version),
('scancode_options', options),
('scan_start', scan_start),
('files_count', files_count)
]))
kwargs = dict(
iterable_as_array=True,
encoding='utf-8',
separators=(b',', b':',)
)
output_json_lines.write(simplejson.dumps(header, **kwargs))
output_json_lines.write(b'\n')
for name, value in codebase.attributes.to_dict().items():
if value:
smry = {name: value}
output_json_lines.write(simplejson.dumps(smry, **kwargs))
output_json_lines.write(b'\n')
for scanned_file in results:
scanned_file_line = {'files': [scanned_file]}
output_json_lines.write(simplejson.dumps(scanned_file_line, **kwargs))
output_json_lines.write(b'\n')
| 38.426966
| 82
| 0.69883
|
86228abc4bb2ff625b2dd78d535a3a481f47cbcd
| 5,676
|
py
|
Python
|
dependencies/ui/tab1/run_button.py
|
statisticalbiotechnology/quandenser-pipeline
|
4175f7d3de29d08dbb53e0e4f1b0f2fba8147df3
|
[
"Apache-2.0"
] | 8
|
2019-05-17T14:45:30.000Z
|
2021-06-24T15:53:34.000Z
|
dependencies/ui/tab1/run_button.py
|
statisticalbiotechnology/quandenser-pipeline
|
4175f7d3de29d08dbb53e0e4f1b0f2fba8147df3
|
[
"Apache-2.0"
] | 33
|
2019-03-07T09:00:11.000Z
|
2021-09-07T07:47:18.000Z
|
dependencies/ui/tab1/run_button.py
|
statisticalbiotechnology/quandenser-pipeline
|
4175f7d3de29d08dbb53e0e4f1b0f2fba8147df3
|
[
"Apache-2.0"
] | 1
|
2019-11-21T12:32:07.000Z
|
2019-11-21T12:32:07.000Z
|
import os
import sys
from PySide2.QtWidgets import QPushButton, QTableWidget, QLineEdit, QMessageBox
from PySide2.QtCore import QCoreApplication
import subprocess
from shutil import copyfile
import time
from colorama import Fore, Back, Style
import secrets
import re
# Custom parser for both sh files and nf configs
from custom_config_parser import custom_config_parser
from utils import ERROR
class run_button(QPushButton):
def __init__(self, nf_settings_path, sh_script_path, pipe_path, config_path):
super(run_button,self).__init__(parent = None)
self.setText('RUN')
self.nf_settings_path = nf_settings_path
self.sh_script_path = sh_script_path
self.pipe_path = pipe_path
self.config_path = config_path
#self.setStyleSheet("background-color:grey") # Change color depending on if you can run or not
self.clicked.connect(self.run)
def run(self):
# Load settings
self.pipe_parser = custom_config_parser()
self.pipe_parser.load(self.pipe_path)
self.nf_settings_parser = custom_config_parser()
self.nf_settings_parser.load(self.nf_settings_path)
self.sh_parser = custom_config_parser()
self.sh_parser.load(self.sh_script_path)
# Read parent
parent = self.parentWidget()
# OUTPUT_DIRECTORY #
children = parent.findChildren(QLineEdit)
for child in children: # This is so I can have whatever order in widgets I want
if child.type == 'directory':
break # Will keep child
output_path = child.text()
if not os.path.isdir(output_path):
ERROR('Not a valid output path')
return 1
# Change output parameters in both nf_settings and sh
self.sh_parser.write("OUTPUT_PATH", output_path) # In sh
self.sh_parser.write("CONFIG_LOCATION", self.config_path) # In sh
self.nf_settings_parser.write("params.output_path", output_path) # In sh
# OUTPUT_LABEL #
label = self.sh_parser.get('OUTPUT_PATH_LABEL')
if label != '': # Check if label has been set
label = re.sub("_*", '', label) # Remove previous indexing
index = 0
while True:
if os.path.isdir(output_path + label + "_" + str(index)):
index += 1
else:
label = label + "_" + str(index)
self.nf_settings_parser.write("params.output_label", label)
self.sh_parser.write('OUTPUT_PATH_LABEL', label)
break
else:
self.nf_settings_parser.write("params.output_label", '')
# BATCH_FILE #
child = parent.findChildren(QTableWidget)[0]
full_table = []
errors = []
for row in range(child.rowCount()):
if child.item(row, 0).text() == ' ':
child.item(row, 0).setText('')
f = child.item(row, 0).text()
if f != '' and f != ' ':
if not os.path.isfile(child.item(row, 0).text()):
errors.append(f"File {f} in row {row+1} does not exist")
elif self.nf_settings_parser.get('params.workflow') in ["MSconvert", "Quandenser"] and child.item(row, 1).text() == '':
label = 'A' # Add junk labeling
elif child.item(row, 1).text() == '' and self.nf_settings_parser.get('params.workflow') == "Full":
errors.append(f"File {f} in row {row+1} is missing a label (Full workflow enabled)")
elif child.item(row, 1).text() != '':
label = child.item(row, 1).text()
input_string = f + '\t' + label + '\n'
full_table.append(input_string)
if full_table == []:
errors.append('No files choosen')
if errors != []:
errors = '\n'.join(errors)
ERROR(errors)
return 1
with open(f"{output_path}/file_list.txt", 'w') as fp:
for line in full_table:
fp.write(line)
batch_file_path = f"{output_path}/file_list.txt"
self.nf_settings_parser.write("params.batch_file", batch_file_path)
# DATABASE_FILE #
children = parent.findChildren(QLineEdit)
for child in children: # This is so I can have whatever order in widgets I want
if child.type == 'file':
break # Will keep child
database_path = child.text()
self.nf_settings_parser.write("params.db", database_path)
workflow = self.nf_settings_parser.get("params.workflow")
if workflow == "Full" and not os.path.isfile(database_path):
ERROR("You must choose an database if you are running the full pipeline")
return 1
# EMAIL #
email = self.nf_settings_parser.get("params.email")
if email != '':
# Need to add -N here, since without it, nextflow will display a warning
self.sh_parser.write("EMAIL_NOTIFICATION", f"-N {email}")
else:
self.sh_parser.write("EMAIL_NOTIFICATION", f"")
# CUSTOM MOUNTS #
custom_mounts = self.pipe_parser.get('custom_mounts').replace('\r', '').replace('\n', '')
self.nf_settings_parser.write('params.custom_mounts', custom_mounts)
# Generate random hash for nextflow
random_hash = secrets.token_urlsafe(16)
self.nf_settings_parser.write('params.random_hash', random_hash)
# Set pipe to launch nextflow pipeline
self.pipe_parser.write('exit_code', '0', isString=False)
self.window().close()
| 42.676692
| 135
| 0.604475
|
65d74864ce6b1eed4a68fa285358afabc9245667
| 4,418
|
py
|
Python
|
tests/geometricFlow.py
|
twguest/FELpy
|
0ac9dd965b0d8e04dddbf2c9aef5ac137d1f0dfd
|
[
"Apache-2.0"
] | 1
|
2021-03-15T14:04:19.000Z
|
2021-03-15T14:04:19.000Z
|
tests/geometricFlow.py
|
twguest/FELpy
|
0ac9dd965b0d8e04dddbf2c9aef5ac137d1f0dfd
|
[
"Apache-2.0"
] | 2
|
2021-11-27T11:55:48.000Z
|
2021-11-27T11:56:26.000Z
|
tests/geometricFlow.py
|
twguest/FELpy
|
0ac9dd965b0d8e04dddbf2c9aef5ac137d1f0dfd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 8 14:39:58 2020
@author: twguest
Let's test the geometric flow theory
"""
import sys
sys.path.append("/opt/spytlab")
sys.path.append("/opt/WPG/")
sys.path.append("/opt/spb_model")
import numpy as np
from model.materials.phaseMask import phaseMask
from model.beamline.structure import propagation_parameters
from model.tools import constructPulse
from utils.banded_utils import diagonal_form, solve_banded
from wpg.optical_elements import Drift
from felpy.model.core.beamline import Beamline
from OpticalFlow import processOneProjection
from wpg.wpg_uti_wf import plot_intensity_map as plotIntensity
from sklearn.preprocessing import minmax_scale as norm
from matplotlib import pyplot as plt
from model.src.coherent import construct_SA1_wavefront
from wpg import srwlib
from wpg.wpg_uti_wf import get_axis
from scipy.constants import h,c,e
if __name__ == "__main__":
slc = 2
N = 10
nx, ny = 128, 128
II = np.zeros((nx,ny,N))
PH = np.zeros((nx,ny,N))
PHz = np.zeros((nx,ny,N))
A = np.zeros((N,N))
B = np.zeros((N,1))
val = nx//4
wfr = construct_SA1_wavefront(nx,ny,4.96,0.25)
wav = (h*c)/(wfr.params.photonEnergy*e)
sp = phaseMask(np.random.rand(50,50), [ get_axis(wfr, axis = 'x').max()-
get_axis(wfr, axis = 'x').min(),
get_axis(wfr, axis = 'y').max()-
get_axis(wfr, axis = 'y').min()], wav) ##speckle
slc = 2
N = 10
nx, ny = 128, 128
II = np.zeros((nx,ny,N))
PH = np.zeros((nx,ny,N))
PHz = np.zeros((nx,ny,N))
A = np.zeros((N,N))
B = np.zeros((N,1))
val = nx//4
for i in range(N):
wfr = construct_SA1_wavefront(nx,ny,4.96,0.25)
pm = np.random.rand(nx,ny)*1e-2
print(pm[val,val])
srwlib.srwl.SetRepresElecField(wfr._srwl_wf, 'f')
ps = phaseMask(pm, [ get_axis(wfr, axis = 'x').max()-
get_axis(wfr, axis = 'x').min(),
get_axis(wfr, axis = 'y').max()-
get_axis(wfr, axis = 'y').min()], wav) ##speckle
bl = Beamline()
bl.append(sp, propagation_parameters(1,1,1,1))
bl.propagate(wfr)
bl = Beamline()
bl.append(ps, propagation_parameters(1,1,1,1))
bl.propagate(wfr)
PH[:,:,i] = wfr.get_phase()[:,:,0] #% np.pi*2
## PH = (PH + np.pi) % (2 * np.pi) - np.pi
bl = Beamline()
bl.append(Drift(0.10), propagation_parameters(1,1,1,1, mode = 'normal'))
bl.propagate(wfr)
II[:,:,i] = wfr.get_intensity()[:,:,0]
plotIntensity(wfr)
II = np.random.rand(*II.shape)*1e-100
print("")
for i in range(N):
a = II[:,:,i]
if i+1 != N:
b = II[:,:,i+1]
else:
b = a
results = processOneProjection(a,b)
phi = results['phi'].real
print("Phase Diff: {}".format((phi[val,val] + np.pi) % (2 * np.pi) - np.pi))
#if i+1 != N:
#print("M Diff: {}".format((PH[val,val,i] - PH[val,val, i+1])/((phi[val,val] + np.pi) % (2 * np.pi) - np.pi)))
# =============================================================================
# plt.imshow(norm(phi))
# plt.show()
# =============================================================================
print("\nactual phase difference")
for i in range(N):
if i+1 != N:
print(PH[val,val,i] + PH[val,val, i+1])
else:
print(0)
# =============================================================================
#
# A[i,i] = 1
# B[i,:] = (phi[val,val])
#
#
#
# if i+1 != N:
# A[i,i+1] = -1
#
#
# ab = diagonal_form(A)
#
# x = solve_banded((1,1), ab, B)
#
#
# =============================================================================
#print(phi[val,val] - np.matmul(A,x))
#print(np.matmul(A,x))
#print(x)
#print(PH[val,val,:])
| 27.104294
| 122
| 0.459937
|
4f523a89306206ef6f34328e5a23740f6448a4cf
| 116
|
py
|
Python
|
src/pybind/doc/tutorial/code/test_float_vector.py
|
aadps/kaldi
|
cd351bb31c98f9d540c409478cbf2c5fef1853ca
|
[
"Apache-2.0"
] | null | null | null |
src/pybind/doc/tutorial/code/test_float_vector.py
|
aadps/kaldi
|
cd351bb31c98f9d540c409478cbf2c5fef1853ca
|
[
"Apache-2.0"
] | null | null | null |
src/pybind/doc/tutorial/code/test_float_vector.py
|
aadps/kaldi
|
cd351bb31c98f9d540c409478cbf2c5fef1853ca
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import kaldi
f = kaldi.FloatVector(3)
f[0] = 10
print(f)
g = f.numpy()
g[1] = 20
print(f)
| 9.666667
| 24
| 0.62069
|
5d619820d9234da7c28e8aed3cbaa148c4e4e16a
| 821
|
py
|
Python
|
cx_Oracle-doc/samples/DatabaseStartup.py
|
zaygeee/MASTER
|
6e11ec3383a13ae6f86ab1a23613bee7a2fc9ed5
|
[
"bzip2-1.0.6"
] | null | null | null |
cx_Oracle-doc/samples/DatabaseStartup.py
|
zaygeee/MASTER
|
6e11ec3383a13ae6f86ab1a23613bee7a2fc9ed5
|
[
"bzip2-1.0.6"
] | null | null | null |
cx_Oracle-doc/samples/DatabaseStartup.py
|
zaygeee/MASTER
|
6e11ec3383a13ae6f86ab1a23613bee7a2fc9ed5
|
[
"bzip2-1.0.6"
] | null | null | null |
#------------------------------------------------------------------------------
# DatabaseStartup.py
# This script demonstrates starting up a database using Python. It is only
# possible in Oracle 10g Release 2 and higher. The connection used assumes that
# the environment variable ORACLE_SID has been set.
#------------------------------------------------------------------------------
import cx_Oracle
# the connection must be in PRELIM_AUTH mode
connection = cx_Oracle.connect("/",
mode = cx_Oracle.SYSDBA | cx_Oracle.PRELIM_AUTH)
connection.startup()
# the following statements must be issued in normal SYSDBA mode
connection = cx_Oracle.connect("/", mode = cx_Oracle.SYSDBA)
cursor = connection.cursor()
cursor.execute("alter database mount")
cursor.execute("alter database open")
| 39.095238
| 80
| 0.604141
|
7d858a9150de6bb794ce960dd5936b2546bd88b3
| 4,104
|
py
|
Python
|
isi_sdk_8_1_1/isi_sdk_8_1_1/models/cloud_jobs.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_1_1/isi_sdk_8_1_1/models/cloud_jobs.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_1_1/isi_sdk_8_1_1/models/cloud_jobs.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_1_1.models.cloud_job_extended import CloudJobExtended # noqa: F401,E501
class CloudJobs(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'jobs': 'list[CloudJobExtended]',
'resume': 'str'
}
attribute_map = {
'jobs': 'jobs',
'resume': 'resume'
}
def __init__(self, jobs=None, resume=None): # noqa: E501
"""CloudJobs - a model defined in Swagger""" # noqa: E501
self._jobs = None
self._resume = None
self.discriminator = None
if jobs is not None:
self.jobs = jobs
if resume is not None:
self.resume = resume
@property
def jobs(self):
"""Gets the jobs of this CloudJobs. # noqa: E501
:return: The jobs of this CloudJobs. # noqa: E501
:rtype: list[CloudJobExtended]
"""
return self._jobs
@jobs.setter
def jobs(self, jobs):
"""Sets the jobs of this CloudJobs.
:param jobs: The jobs of this CloudJobs. # noqa: E501
:type: list[CloudJobExtended]
"""
self._jobs = jobs
@property
def resume(self):
"""Gets the resume of this CloudJobs. # noqa: E501
Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options). # noqa: E501
:return: The resume of this CloudJobs. # noqa: E501
:rtype: str
"""
return self._resume
@resume.setter
def resume(self, resume):
"""Sets the resume of this CloudJobs.
Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options). # noqa: E501
:param resume: The resume of this CloudJobs. # noqa: E501
:type: str
"""
if resume is not None and len(resume) < 0:
raise ValueError("Invalid value for `resume`, length must be greater than or equal to `0`") # noqa: E501
self._resume = resume
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudJobs):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.303448
| 170
| 0.572612
|
a062bdd48c2f783bbde07ec2b74fe64b9b8c6ef3
| 7,405
|
py
|
Python
|
api/client/swagger_client/models/api_credential.py
|
Ophendy663/my-travel-plans
|
3da86c777aba636f930a9b8e19b488d08b0031fa
|
[
"Apache-2.0"
] | null | null | null |
api/client/swagger_client/models/api_credential.py
|
Ophendy663/my-travel-plans
|
3da86c777aba636f930a9b8e19b488d08b0031fa
|
[
"Apache-2.0"
] | 1
|
2021-09-21T23:31:13.000Z
|
2021-09-21T23:31:13.000Z
|
api/client/swagger_client/models/api_credential.py
|
Ophendy663/my-travel-plans
|
3da86c777aba636f930a9b8e19b488d08b0031fa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
MLX API
MLX API Extension for Kubeflow Pipelines # noqa: E501
OpenAPI spec version: 0.1.29-filter-categories
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApiCredential(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'created_at': 'datetime',
'pipeline_id': 'str',
'project_id': 'str',
'api_key': 'str',
'data_assets': 'list[str]'
}
attribute_map = {
'id': 'id',
'created_at': 'created_at',
'pipeline_id': 'pipeline_id',
'project_id': 'project_id',
'api_key': 'api_key',
'data_assets': 'data_assets'
}
def __init__(self, id=None, created_at=None, pipeline_id=None, project_id=None, api_key=None, data_assets=None): # noqa: E501
"""ApiCredential - a model defined in Swagger""" # noqa: E501
self._id = None
self._created_at = None
self._pipeline_id = None
self._project_id = None
self._api_key = None
self._data_assets = None
self.discriminator = None
if id is not None:
self.id = id
if created_at is not None:
self.created_at = created_at
self.pipeline_id = pipeline_id
self.project_id = project_id
if api_key is not None:
self.api_key = api_key
if data_assets is not None:
self.data_assets = data_assets
@property
def id(self):
"""Gets the id of this ApiCredential. # noqa: E501
:return: The id of this ApiCredential. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ApiCredential.
:param id: The id of this ApiCredential. # noqa: E501
:type: str
"""
self._id = id
@property
def created_at(self):
"""Gets the created_at of this ApiCredential. # noqa: E501
:return: The created_at of this ApiCredential. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this ApiCredential.
:param created_at: The created_at of this ApiCredential. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def pipeline_id(self):
"""Gets the pipeline_id of this ApiCredential. # noqa: E501
:return: The pipeline_id of this ApiCredential. # noqa: E501
:rtype: str
"""
return self._pipeline_id
@pipeline_id.setter
def pipeline_id(self, pipeline_id):
"""Sets the pipeline_id of this ApiCredential.
:param pipeline_id: The pipeline_id of this ApiCredential. # noqa: E501
:type: str
"""
if pipeline_id is None:
raise ValueError("Invalid value for `pipeline_id`, must not be `None`") # noqa: E501
self._pipeline_id = pipeline_id
@property
def project_id(self):
"""Gets the project_id of this ApiCredential. # noqa: E501
:return: The project_id of this ApiCredential. # noqa: E501
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this ApiCredential.
:param project_id: The project_id of this ApiCredential. # noqa: E501
:type: str
"""
if project_id is None:
raise ValueError("Invalid value for `project_id`, must not be `None`") # noqa: E501
self._project_id = project_id
@property
def api_key(self):
"""Gets the api_key of this ApiCredential. # noqa: E501
TODO: what is the api_key # noqa: E501
:return: The api_key of this ApiCredential. # noqa: E501
:rtype: str
"""
return self._api_key
@api_key.setter
def api_key(self, api_key):
"""Sets the api_key of this ApiCredential.
TODO: what is the api_key # noqa: E501
:param api_key: The api_key of this ApiCredential. # noqa: E501
:type: str
"""
self._api_key = api_key
@property
def data_assets(self):
"""Gets the data_assets of this ApiCredential. # noqa: E501
List of data asset IDs # noqa: E501
:return: The data_assets of this ApiCredential. # noqa: E501
:rtype: list[str]
"""
return self._data_assets
@data_assets.setter
def data_assets(self, data_assets):
"""Sets the data_assets of this ApiCredential.
List of data asset IDs # noqa: E501
:param data_assets: The data_assets of this ApiCredential. # noqa: E501
:type: list[str]
"""
self._data_assets = data_assets
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApiCredential, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiCredential):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.943396
| 130
| 0.588521
|
75a3bd58ff8a50c5f429277333f918ea3beb9521
| 1,463
|
py
|
Python
|
test/functional/reindex.py
|
RossClelland/uscbuild
|
db77df86e94ba4362040d5bedf1c71e5b4f01654
|
[
"MIT"
] | null | null | null |
test/functional/reindex.py
|
RossClelland/uscbuild
|
db77df86e94ba4362040d5bedf1c71e5b4f01654
|
[
"MIT"
] | null | null | null |
test/functional/reindex.py
|
RossClelland/uscbuild
|
db77df86e94ba4362040d5bedf1c71e5b4f01654
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Uscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running uscoind with -reindex and -reindex-chainstate options.
- Start a single node and generate 3 blocks.
- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3.
- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3.
"""
from test_framework.test_framework import UscoinTestFramework
from test_framework.util import assert_equal
import time
class ReindexTest(UscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def reindex(self, justchainstate=False):
self.nodes[0].generate(3)
blockcount = self.nodes[0].getblockcount()
self.stop_nodes()
extra_args = [["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]]
self.start_nodes(extra_args)
while self.nodes[0].getblockcount() < blockcount:
time.sleep(0.1)
assert_equal(self.nodes[0].getblockcount(), blockcount)
self.log.info("Success")
def run_test(self):
self.reindex(False)
self.reindex(True)
self.reindex(False)
self.reindex(True)
if __name__ == '__main__':
ReindexTest().main()
| 35.682927
| 106
| 0.704716
|
97926975567740e756cb41f6b33eb182718cab05
| 2,665
|
py
|
Python
|
src/pip/_internal/cli/main_parser.py
|
dwt/pip
|
7b2548905db91b584b5f8a11e7d3c87bf807faae
|
[
"MIT"
] | null | null | null |
src/pip/_internal/cli/main_parser.py
|
dwt/pip
|
7b2548905db91b584b5f8a11e7d3c87bf807faae
|
[
"MIT"
] | null | null | null |
src/pip/_internal/cli/main_parser.py
|
dwt/pip
|
7b2548905db91b584b5f8a11e7d3c87bf807faae
|
[
"MIT"
] | null | null | null |
"""A single place for constructing and exposing the main parser
"""
import os
import sys
from typing import List, Tuple
from pip._internal.cli import cmdoptions
from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip._internal.commands import commands_dict, get_similar_commands
from pip._internal.exceptions import CommandError
from pip._internal.utils.misc import get_pip_version, get_prog
__all__ = ["create_main_parser", "parse_command"]
def create_main_parser():
# type: () -> ConfigOptionParser
"""Creates and returns the main parser for pip's CLI
"""
parser = ConfigOptionParser(
usage='\n%prog <command> [options]',
add_help_option=False,
formatter=UpdatingDefaultsHelpFormatter(),
name='global',
prog=get_prog(),
)
parser.disable_interspersed_args()
parser.version = get_pip_version()
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
# so the help formatter knows
parser.main = True # type: ignore
# create command listing for description
description = [''] + [
'{name:27} {command_info.summary}'.format(**locals())
for name, command_info in commands_dict.items()
]
parser.description = '\n'.join(description)
return parser
def parse_command(args):
# type: (List[str]) -> Tuple[str, List[str]]
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = [f'unknown command "{cmd_name}"']
if guess:
msg.append(f'maybe you meant "{guess}"')
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
| 29.285714
| 86
| 0.672795
|
7ec5398dd983e77435d8f47f080d32f536784f45
| 18,448
|
py
|
Python
|
q2cli/_version.py
|
Oddant1/q2cli
|
3ef25ca9979518eec4bed0c3e9cd5ae9c57f2a6d
|
[
"BSD-3-Clause"
] | 15
|
2016-04-18T22:25:05.000Z
|
2022-01-31T06:06:26.000Z
|
q2cli/_version.py
|
Oddant1/q2cli
|
3ef25ca9979518eec4bed0c3e9cd5ae9c57f2a6d
|
[
"BSD-3-Clause"
] | 187
|
2016-04-18T22:22:05.000Z
|
2022-01-20T22:54:31.000Z
|
q2cli/_version.py
|
Oddant1/q2cli
|
3ef25ca9979518eec4bed0c3e9cd5ae9c57f2a6d
|
[
"BSD-3-Clause"
] | 30
|
2016-04-12T19:45:07.000Z
|
2022-03-21T14:07:22.000Z
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "q2cli-"
cfg.versionfile_source = "q2cli/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| 35.408829
| 79
| 0.584454
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.